mcdi.c revision cd0ecc9a6d279c8c5c5336f576330c45f5c80939
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include "net_driver.h"
12#include "nic.h"
13#include "io.h"
14#include "farch_regs.h"
15#include "mcdi_pcol.h"
16#include "phy.h"
17
18/**************************************************************************
19 *
20 * Management-Controller-to-Driver Interface
21 *
22 **************************************************************************
23 */
24
25#define MCDI_RPC_TIMEOUT       (10 * HZ)
26
27/* A reboot/assertion causes the MCDI status word to be set after the
28 * command word is set or a REBOOT event is sent. If we notice a reboot
29 * via these mechanisms then wait 10ms for the status word to be set. */
30#define MCDI_STATUS_DELAY_US		100
31#define MCDI_STATUS_DELAY_COUNT		100
32#define MCDI_STATUS_SLEEP_MS						\
33	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
34
35#define SEQ_MASK							\
36	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
37
38static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
39{
40	EFX_BUG_ON_PARANOID(!efx->mcdi);
41	return &efx->mcdi->iface;
42}
43
44int efx_mcdi_init(struct efx_nic *efx)
45{
46	struct efx_mcdi_iface *mcdi;
47
48	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
49	if (!efx->mcdi)
50		return -ENOMEM;
51
52	mcdi = efx_mcdi(efx);
53	init_waitqueue_head(&mcdi->wq);
54	spin_lock_init(&mcdi->iface_lock);
55	atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
56	mcdi->mode = MCDI_MODE_POLL;
57
58	(void) efx_mcdi_poll_reboot(efx);
59
60	/* Recover from a failed assertion before probing */
61	return efx_mcdi_handle_assertion(efx);
62}
63
64void efx_mcdi_fini(struct efx_nic *efx)
65{
66	BUG_ON(efx->mcdi &&
67	       atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT);
68	kfree(efx->mcdi);
69}
70
71static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
72			    const efx_dword_t *inbuf, size_t inlen)
73{
74	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
75	efx_dword_t hdr[2];
76	size_t hdr_len;
77	u32 xflags, seqno;
78
79	BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
80
81	seqno = mcdi->seqno & SEQ_MASK;
82	xflags = 0;
83	if (mcdi->mode == MCDI_MODE_EVENTS)
84		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
85
86	if (efx->type->mcdi_max_ver == 1) {
87		/* MCDI v1 */
88		EFX_POPULATE_DWORD_6(hdr[0],
89				     MCDI_HEADER_RESPONSE, 0,
90				     MCDI_HEADER_RESYNC, 1,
91				     MCDI_HEADER_CODE, cmd,
92				     MCDI_HEADER_DATALEN, inlen,
93				     MCDI_HEADER_SEQ, seqno,
94				     MCDI_HEADER_XFLAGS, xflags);
95		hdr_len = 4;
96	} else {
97		/* MCDI v2 */
98		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
99		EFX_POPULATE_DWORD_6(hdr[0],
100				     MCDI_HEADER_RESPONSE, 0,
101				     MCDI_HEADER_RESYNC, 1,
102				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
103				     MCDI_HEADER_DATALEN, 0,
104				     MCDI_HEADER_SEQ, seqno,
105				     MCDI_HEADER_XFLAGS, xflags);
106		EFX_POPULATE_DWORD_2(hdr[1],
107				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
108				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
109		hdr_len = 8;
110	}
111
112	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
113}
114
115static int efx_mcdi_errno(unsigned int mcdi_err)
116{
117	switch (mcdi_err) {
118	case 0:
119		return 0;
120#define TRANSLATE_ERROR(name)					\
121	case MC_CMD_ERR_ ## name:				\
122		return -name;
123	TRANSLATE_ERROR(EPERM);
124	TRANSLATE_ERROR(ENOENT);
125	TRANSLATE_ERROR(EINTR);
126	TRANSLATE_ERROR(EAGAIN);
127	TRANSLATE_ERROR(EACCES);
128	TRANSLATE_ERROR(EBUSY);
129	TRANSLATE_ERROR(EINVAL);
130	TRANSLATE_ERROR(EDEADLK);
131	TRANSLATE_ERROR(ENOSYS);
132	TRANSLATE_ERROR(ETIME);
133	TRANSLATE_ERROR(EALREADY);
134	TRANSLATE_ERROR(ENOSPC);
135#undef TRANSLATE_ERROR
136	case MC_CMD_ERR_ALLOC_FAIL:
137		return -ENOBUFS;
138	case MC_CMD_ERR_MAC_EXIST:
139		return -EADDRINUSE;
140	default:
141		return -EPROTO;
142	}
143}
144
145static void efx_mcdi_read_response_header(struct efx_nic *efx)
146{
147	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
148	unsigned int respseq, respcmd, error;
149	efx_dword_t hdr;
150
151	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
152	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
153	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
154	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
155
156	if (respcmd != MC_CMD_V2_EXTN) {
157		mcdi->resp_hdr_len = 4;
158		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
159	} else {
160		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
161		mcdi->resp_hdr_len = 8;
162		mcdi->resp_data_len =
163			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
164	}
165
166	if (error && mcdi->resp_data_len == 0) {
167		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
168		mcdi->resprc = -EIO;
169	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
170		netif_err(efx, hw, efx->net_dev,
171			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
172			  respseq, mcdi->seqno);
173		mcdi->resprc = -EIO;
174	} else if (error) {
175		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
176		mcdi->resprc =
177			efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
178	} else {
179		mcdi->resprc = 0;
180	}
181}
182
183static int efx_mcdi_poll(struct efx_nic *efx)
184{
185	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
186	unsigned long time, finish;
187	unsigned int spins;
188	int rc;
189
190	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
191	rc = efx_mcdi_poll_reboot(efx);
192	if (rc) {
193		spin_lock_bh(&mcdi->iface_lock);
194		mcdi->resprc = rc;
195		mcdi->resp_hdr_len = 0;
196		mcdi->resp_data_len = 0;
197		spin_unlock_bh(&mcdi->iface_lock);
198		return 0;
199	}
200
201	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
202	 * because generally mcdi responses are fast. After that, back off
203	 * and poll once a jiffy (approximately)
204	 */
205	spins = TICK_USEC;
206	finish = jiffies + MCDI_RPC_TIMEOUT;
207
208	while (1) {
209		if (spins != 0) {
210			--spins;
211			udelay(1);
212		} else {
213			schedule_timeout_uninterruptible(1);
214		}
215
216		time = jiffies;
217
218		rmb();
219		if (efx->type->mcdi_poll_response(efx))
220			break;
221
222		if (time_after(time, finish))
223			return -ETIMEDOUT;
224	}
225
226	spin_lock_bh(&mcdi->iface_lock);
227	efx_mcdi_read_response_header(efx);
228	spin_unlock_bh(&mcdi->iface_lock);
229
230	/* Return rc=0 like wait_event_timeout() */
231	return 0;
232}
233
234/* Test and clear MC-rebooted flag for this port/function; reset
235 * software state as necessary.
236 */
237int efx_mcdi_poll_reboot(struct efx_nic *efx)
238{
239	if (!efx->mcdi)
240		return 0;
241
242	return efx->type->mcdi_poll_reboot(efx);
243}
244
245static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
246{
247	/* Wait until the interface becomes QUIESCENT and we win the race
248	 * to mark it RUNNING. */
249	wait_event(mcdi->wq,
250		   atomic_cmpxchg(&mcdi->state,
251				  MCDI_STATE_QUIESCENT,
252				  MCDI_STATE_RUNNING)
253		   == MCDI_STATE_QUIESCENT);
254}
255
256static int efx_mcdi_await_completion(struct efx_nic *efx)
257{
258	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
259
260	if (wait_event_timeout(
261		    mcdi->wq,
262		    atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
263		    MCDI_RPC_TIMEOUT) == 0)
264		return -ETIMEDOUT;
265
266	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
267	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
268	 * completed the request first, then we'll just end up completing the
269	 * request again, which is safe.
270	 *
271	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
272	 * wait_event_timeout() implicitly provides.
273	 */
274	if (mcdi->mode == MCDI_MODE_POLL)
275		return efx_mcdi_poll(efx);
276
277	return 0;
278}
279
280static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
281{
282	/* If the interface is RUNNING, then move to COMPLETED and wake any
283	 * waiters. If the interface isn't in RUNNING then we've received a
284	 * duplicate completion after we've already transitioned back to
285	 * QUIESCENT. [A subsequent invocation would increment seqno, so would
286	 * have failed the seqno check].
287	 */
288	if (atomic_cmpxchg(&mcdi->state,
289			   MCDI_STATE_RUNNING,
290			   MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
291		wake_up(&mcdi->wq);
292		return true;
293	}
294
295	return false;
296}
297
298static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
299{
300	atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
301	wake_up(&mcdi->wq);
302}
303
304static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
305			    unsigned int datalen, unsigned int mcdi_err)
306{
307	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
308	bool wake = false;
309
310	spin_lock(&mcdi->iface_lock);
311
312	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
313		if (mcdi->credits)
314			/* The request has been cancelled */
315			--mcdi->credits;
316		else
317			netif_err(efx, hw, efx->net_dev,
318				  "MC response mismatch tx seq 0x%x rx "
319				  "seq 0x%x\n", seqno, mcdi->seqno);
320	} else {
321		if (efx->type->mcdi_max_ver >= 2) {
322			/* MCDI v2 responses don't fit in an event */
323			efx_mcdi_read_response_header(efx);
324		} else {
325			mcdi->resprc = efx_mcdi_errno(mcdi_err);
326			mcdi->resp_hdr_len = 4;
327			mcdi->resp_data_len = datalen;
328		}
329
330		wake = true;
331	}
332
333	spin_unlock(&mcdi->iface_lock);
334
335	if (wake)
336		efx_mcdi_complete(mcdi);
337}
338
339int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
340		 const efx_dword_t *inbuf, size_t inlen,
341		 efx_dword_t *outbuf, size_t outlen,
342		 size_t *outlen_actual)
343{
344	int rc;
345
346	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
347	if (rc)
348		return rc;
349	return efx_mcdi_rpc_finish(efx, cmd, inlen,
350				   outbuf, outlen, outlen_actual);
351}
352
353int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
354		       const efx_dword_t *inbuf, size_t inlen)
355{
356	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
357
358	if (efx->type->mcdi_max_ver < 0 ||
359	     (efx->type->mcdi_max_ver < 2 &&
360	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
361		return -EINVAL;
362
363	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
364	    (efx->type->mcdi_max_ver < 2 &&
365	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
366		return -EMSGSIZE;
367
368	efx_mcdi_acquire(mcdi);
369
370	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
371	spin_lock_bh(&mcdi->iface_lock);
372	++mcdi->seqno;
373	spin_unlock_bh(&mcdi->iface_lock);
374
375	efx_mcdi_copyin(efx, cmd, inbuf, inlen);
376	return 0;
377}
378
379int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
380			efx_dword_t *outbuf, size_t outlen,
381			size_t *outlen_actual)
382{
383	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
384	int rc;
385
386	if (mcdi->mode == MCDI_MODE_POLL)
387		rc = efx_mcdi_poll(efx);
388	else
389		rc = efx_mcdi_await_completion(efx);
390
391	if (rc != 0) {
392		/* Close the race with efx_mcdi_ev_cpl() executing just too late
393		 * and completing a request we've just cancelled, by ensuring
394		 * that the seqno check therein fails.
395		 */
396		spin_lock_bh(&mcdi->iface_lock);
397		++mcdi->seqno;
398		++mcdi->credits;
399		spin_unlock_bh(&mcdi->iface_lock);
400
401		netif_err(efx, hw, efx->net_dev,
402			  "MC command 0x%x inlen %d mode %d timed out\n",
403			  cmd, (int)inlen, mcdi->mode);
404	} else {
405		size_t hdr_len, data_len;
406
407		/* At the very least we need a memory barrier here to ensure
408		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
409		 * a spurious efx_mcdi_ev_cpl() running concurrently by
410		 * acquiring the iface_lock. */
411		spin_lock_bh(&mcdi->iface_lock);
412		rc = mcdi->resprc;
413		hdr_len = mcdi->resp_hdr_len;
414		data_len = mcdi->resp_data_len;
415		spin_unlock_bh(&mcdi->iface_lock);
416
417		BUG_ON(rc > 0);
418
419		if (rc == 0) {
420			efx->type->mcdi_read_response(efx, outbuf, hdr_len,
421						      min(outlen, data_len));
422			if (outlen_actual != NULL)
423				*outlen_actual = data_len;
424		} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
425			; /* Don't reset if MC_CMD_REBOOT returns EIO */
426		else if (rc == -EIO || rc == -EINTR) {
427			netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
428				  -rc);
429			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
430		} else
431			netif_dbg(efx, hw, efx->net_dev,
432				  "MC command 0x%x inlen %d failed rc=%d\n",
433				  cmd, (int)inlen, -rc);
434
435		if (rc == -EIO || rc == -EINTR) {
436			msleep(MCDI_STATUS_SLEEP_MS);
437			efx_mcdi_poll_reboot(efx);
438		}
439	}
440
441	efx_mcdi_release(mcdi);
442	return rc;
443}
444
445void efx_mcdi_mode_poll(struct efx_nic *efx)
446{
447	struct efx_mcdi_iface *mcdi;
448
449	if (!efx->mcdi)
450		return;
451
452	mcdi = efx_mcdi(efx);
453	if (mcdi->mode == MCDI_MODE_POLL)
454		return;
455
456	/* We can switch from event completion to polled completion, because
457	 * mcdi requests are always completed in shared memory. We do this by
458	 * switching the mode to POLL'd then completing the request.
459	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
460	 *
461	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
462	 * which efx_mcdi_complete() provides for us.
463	 */
464	mcdi->mode = MCDI_MODE_POLL;
465
466	efx_mcdi_complete(mcdi);
467}
468
469void efx_mcdi_mode_event(struct efx_nic *efx)
470{
471	struct efx_mcdi_iface *mcdi;
472
473	if (!efx->mcdi)
474		return;
475
476	mcdi = efx_mcdi(efx);
477
478	if (mcdi->mode == MCDI_MODE_EVENTS)
479		return;
480
481	/* We can't switch from polled to event completion in the middle of a
482	 * request, because the completion method is specified in the request.
483	 * So acquire the interface to serialise the requestors. We don't need
484	 * to acquire the iface_lock to change the mode here, but we do need a
485	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
486	 * efx_mcdi_acquire() provides.
487	 */
488	efx_mcdi_acquire(mcdi);
489	mcdi->mode = MCDI_MODE_EVENTS;
490	efx_mcdi_release(mcdi);
491}
492
493static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
494{
495	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
496
497	/* If there is an outstanding MCDI request, it has been terminated
498	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
499	 * in polled mode, then do nothing because the MC reboot handler will
500	 * set the header correctly. However, if the mcdi interface is waiting
501	 * for a CMDDONE event it won't receive it [and since all MCDI events
502	 * are sent to the same queue, we can't be racing with
503	 * efx_mcdi_ev_cpl()]
504	 *
505	 * There's a race here with efx_mcdi_rpc(), because we might receive
506	 * a REBOOT event *before* the request has been copied out. In polled
507	 * mode (during startup) this is irrelevant, because efx_mcdi_complete()
508	 * is ignored. In event mode, this condition is just an edge-case of
509	 * receiving a REBOOT event after posting the MCDI request. Did the mc
510	 * reboot before or after the copyout? The best we can do always is
511	 * just return failure.
512	 */
513	spin_lock(&mcdi->iface_lock);
514	if (efx_mcdi_complete(mcdi)) {
515		if (mcdi->mode == MCDI_MODE_EVENTS) {
516			mcdi->resprc = rc;
517			mcdi->resp_hdr_len = 0;
518			mcdi->resp_data_len = 0;
519			++mcdi->credits;
520		}
521	} else {
522		int count;
523
524		/* Nobody was waiting for an MCDI request, so trigger a reset */
525		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
526
527		/* Consume the status word since efx_mcdi_rpc_finish() won't */
528		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
529			if (efx_mcdi_poll_reboot(efx))
530				break;
531			udelay(MCDI_STATUS_DELAY_US);
532		}
533	}
534
535	spin_unlock(&mcdi->iface_lock);
536}
537
538/* Called from  falcon_process_eventq for MCDI events */
539void efx_mcdi_process_event(struct efx_channel *channel,
540			    efx_qword_t *event)
541{
542	struct efx_nic *efx = channel->efx;
543	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
544	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
545
546	switch (code) {
547	case MCDI_EVENT_CODE_BADSSERT:
548		netif_err(efx, hw, efx->net_dev,
549			  "MC watchdog or assertion failure at 0x%x\n", data);
550		efx_mcdi_ev_death(efx, -EINTR);
551		break;
552
553	case MCDI_EVENT_CODE_PMNOTICE:
554		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
555		break;
556
557	case MCDI_EVENT_CODE_CMDDONE:
558		efx_mcdi_ev_cpl(efx,
559				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
560				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
561				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
562		break;
563
564	case MCDI_EVENT_CODE_LINKCHANGE:
565		efx_mcdi_process_link_change(efx, event);
566		break;
567	case MCDI_EVENT_CODE_SENSOREVT:
568		efx_mcdi_sensor_event(efx, event);
569		break;
570	case MCDI_EVENT_CODE_SCHEDERR:
571		netif_info(efx, hw, efx->net_dev,
572			   "MC Scheduler error address=0x%x\n", data);
573		break;
574	case MCDI_EVENT_CODE_REBOOT:
575		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
576		efx_mcdi_ev_death(efx, -EIO);
577		break;
578	case MCDI_EVENT_CODE_MAC_STATS_DMA:
579		/* MAC stats are gather lazily.  We can ignore this. */
580		break;
581	case MCDI_EVENT_CODE_FLR:
582		efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
583		break;
584	case MCDI_EVENT_CODE_PTP_RX:
585	case MCDI_EVENT_CODE_PTP_FAULT:
586	case MCDI_EVENT_CODE_PTP_PPS:
587		efx_ptp_event(efx, event);
588		break;
589
590	default:
591		netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
592			  code);
593	}
594}
595
596/**************************************************************************
597 *
598 * Specific request functions
599 *
600 **************************************************************************
601 */
602
603void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
604{
605	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
606	size_t outlength;
607	const __le16 *ver_words;
608	int rc;
609
610	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
611
612	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
613			  outbuf, sizeof(outbuf), &outlength);
614	if (rc)
615		goto fail;
616
617	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
618		rc = -EIO;
619		goto fail;
620	}
621
622	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
623	snprintf(buf, len, "%u.%u.%u.%u",
624		 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
625		 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
626	return;
627
628fail:
629	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
630	buf[0] = 0;
631}
632
633int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
634			bool *was_attached)
635{
636	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
637	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
638	size_t outlen;
639	int rc;
640
641	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
642		       driver_operating ? 1 : 0);
643	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
644	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
645
646	rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
647			  outbuf, sizeof(outbuf), &outlen);
648	if (rc)
649		goto fail;
650	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
651		rc = -EIO;
652		goto fail;
653	}
654
655	if (was_attached != NULL)
656		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
657	return 0;
658
659fail:
660	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
661	return rc;
662}
663
664int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
665			   u16 *fw_subtype_list, u32 *capabilities)
666{
667	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
668	size_t outlen, i;
669	int port_num = efx_port_num(efx);
670	int rc;
671
672	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
673
674	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
675			  outbuf, sizeof(outbuf), &outlen);
676	if (rc)
677		goto fail;
678
679	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
680		rc = -EIO;
681		goto fail;
682	}
683
684	if (mac_address)
685		memcpy(mac_address,
686		       port_num ?
687		       MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
688		       MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
689		       ETH_ALEN);
690	if (fw_subtype_list) {
691		for (i = 0;
692		     i < MCDI_VAR_ARRAY_LEN(outlen,
693					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
694		     i++)
695			fw_subtype_list[i] = MCDI_ARRAY_WORD(
696				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
697		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
698			fw_subtype_list[i] = 0;
699	}
700	if (capabilities) {
701		if (port_num)
702			*capabilities = MCDI_DWORD(outbuf,
703					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
704		else
705			*capabilities = MCDI_DWORD(outbuf,
706					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
707	}
708
709	return 0;
710
711fail:
712	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
713		  __func__, rc, (int)outlen);
714
715	return rc;
716}
717
718int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
719{
720	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
721	u32 dest = 0;
722	int rc;
723
724	if (uart)
725		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
726	if (evq)
727		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
728
729	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
730	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
731
732	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
733
734	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
735			  NULL, 0, NULL);
736	if (rc)
737		goto fail;
738
739	return 0;
740
741fail:
742	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
743	return rc;
744}
745
746int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
747{
748	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
749	size_t outlen;
750	int rc;
751
752	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
753
754	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
755			  outbuf, sizeof(outbuf), &outlen);
756	if (rc)
757		goto fail;
758	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
759		rc = -EIO;
760		goto fail;
761	}
762
763	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
764	return 0;
765
766fail:
767	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
768		  __func__, rc);
769	return rc;
770}
771
772int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
773			size_t *size_out, size_t *erase_size_out,
774			bool *protected_out)
775{
776	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
777	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
778	size_t outlen;
779	int rc;
780
781	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
782
783	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
784			  outbuf, sizeof(outbuf), &outlen);
785	if (rc)
786		goto fail;
787	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
788		rc = -EIO;
789		goto fail;
790	}
791
792	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
793	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
794	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
795				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
796	return 0;
797
798fail:
799	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
800	return rc;
801}
802
803static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
804{
805	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
806	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
807	int rc;
808
809	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
810
811	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
812			  outbuf, sizeof(outbuf), NULL);
813	if (rc)
814		return rc;
815
816	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
817	case MC_CMD_NVRAM_TEST_PASS:
818	case MC_CMD_NVRAM_TEST_NOTSUPP:
819		return 0;
820	default:
821		return -EIO;
822	}
823}
824
825int efx_mcdi_nvram_test_all(struct efx_nic *efx)
826{
827	u32 nvram_types;
828	unsigned int type;
829	int rc;
830
831	rc = efx_mcdi_nvram_types(efx, &nvram_types);
832	if (rc)
833		goto fail1;
834
835	type = 0;
836	while (nvram_types != 0) {
837		if (nvram_types & 1) {
838			rc = efx_mcdi_nvram_test(efx, type);
839			if (rc)
840				goto fail2;
841		}
842		type++;
843		nvram_types >>= 1;
844	}
845
846	return 0;
847
848fail2:
849	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
850		  __func__, type);
851fail1:
852	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
853	return rc;
854}
855
856static int efx_mcdi_read_assertion(struct efx_nic *efx)
857{
858	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
859	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
860	unsigned int flags, index;
861	const char *reason;
862	size_t outlen;
863	int retry;
864	int rc;
865
866	/* Attempt to read any stored assertion state before we reboot
867	 * the mcfw out of the assertion handler. Retry twice, once
868	 * because a boot-time assertion might cause this command to fail
869	 * with EINTR. And once again because GET_ASSERTS can race with
870	 * MC_CMD_REBOOT running on the other port. */
871	retry = 2;
872	do {
873		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
874		rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
875				  inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
876				  outbuf, sizeof(outbuf), &outlen);
877	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
878
879	if (rc)
880		return rc;
881	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
882		return -EIO;
883
884	/* Print out any recorded assertion state */
885	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
886	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
887		return 0;
888
889	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
890		? "system-level assertion"
891		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
892		? "thread-level assertion"
893		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
894		? "watchdog reset"
895		: "unknown assertion";
896	netif_err(efx, hw, efx->net_dev,
897		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
898		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
899		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
900
901	/* Print out the registers */
902	for (index = 0;
903	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
904	     index++)
905		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
906			  1 + index,
907			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
908					   index));
909
910	return 0;
911}
912
913static void efx_mcdi_exit_assertion(struct efx_nic *efx)
914{
915	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
916
917	/* If the MC is running debug firmware, it might now be
918	 * waiting for a debugger to attach, but we just want it to
919	 * reboot.  We set a flag that makes the command a no-op if it
920	 * has already done so.  We don't know what return code to
921	 * expect (0 or -EIO), so ignore it.
922	 */
923	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
924	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
925		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
926	(void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
927			    NULL, 0, NULL);
928}
929
930int efx_mcdi_handle_assertion(struct efx_nic *efx)
931{
932	int rc;
933
934	rc = efx_mcdi_read_assertion(efx);
935	if (rc)
936		return rc;
937
938	efx_mcdi_exit_assertion(efx);
939
940	return 0;
941}
942
943void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
944{
945	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
946	int rc;
947
948	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
949	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
950	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
951
952	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
953
954	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
955
956	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
957			  NULL, 0, NULL);
958	if (rc)
959		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
960			  __func__, rc);
961}
962
963static int efx_mcdi_reset_port(struct efx_nic *efx)
964{
965	int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
966	if (rc)
967		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
968			  __func__, rc);
969	return rc;
970}
971
972static int efx_mcdi_reset_mc(struct efx_nic *efx)
973{
974	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
975	int rc;
976
977	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
978	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
979	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
980			  NULL, 0, NULL);
981	/* White is black, and up is down */
982	if (rc == -EIO)
983		return 0;
984	if (rc == 0)
985		rc = -EIO;
986	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
987	return rc;
988}
989
990enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
991{
992	return RESET_TYPE_RECOVER_OR_ALL;
993}
994
995int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
996{
997	int rc;
998
999	/* Recover from a failed assertion pre-reset */
1000	rc = efx_mcdi_handle_assertion(efx);
1001	if (rc)
1002		return rc;
1003
1004	if (method == RESET_TYPE_WORLD)
1005		return efx_mcdi_reset_mc(efx);
1006	else
1007		return efx_mcdi_reset_port(efx);
1008}
1009
1010static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1011				   const u8 *mac, int *id_out)
1012{
1013	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1014	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1015	size_t outlen;
1016	int rc;
1017
1018	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1019	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1020		       MC_CMD_FILTER_MODE_SIMPLE);
1021	memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1022
1023	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1024			  outbuf, sizeof(outbuf), &outlen);
1025	if (rc)
1026		goto fail;
1027
1028	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1029		rc = -EIO;
1030		goto fail;
1031	}
1032
1033	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1034
1035	return 0;
1036
1037fail:
1038	*id_out = -1;
1039	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1040	return rc;
1041
1042}
1043
1044
1045int
1046efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
1047{
1048	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1049}
1050
1051
1052int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1053{
1054	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1055	size_t outlen;
1056	int rc;
1057
1058	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1059			  outbuf, sizeof(outbuf), &outlen);
1060	if (rc)
1061		goto fail;
1062
1063	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1064		rc = -EIO;
1065		goto fail;
1066	}
1067
1068	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1069
1070	return 0;
1071
1072fail:
1073	*id_out = -1;
1074	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1075	return rc;
1076}
1077
1078
1079int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1080{
1081	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
1082	int rc;
1083
1084	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1085
1086	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1087			  NULL, 0, NULL);
1088	if (rc)
1089		goto fail;
1090
1091	return 0;
1092
1093fail:
1094	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1095	return rc;
1096}
1097
1098int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1099{
1100	struct efx_channel *channel;
1101	struct efx_rx_queue *rx_queue;
1102	MCDI_DECLARE_BUF(inbuf,
1103			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
1104	int rc, count;
1105
1106	BUILD_BUG_ON(EFX_MAX_CHANNELS >
1107		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1108
1109	count = 0;
1110	efx_for_each_channel(channel, efx) {
1111		efx_for_each_channel_rx_queue(rx_queue, channel) {
1112			if (rx_queue->flush_pending) {
1113				rx_queue->flush_pending = false;
1114				atomic_dec(&efx->rxq_flush_pending);
1115				MCDI_SET_ARRAY_DWORD(
1116					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
1117					count, efx_rx_queue_index(rx_queue));
1118				count++;
1119			}
1120		}
1121	}
1122
1123	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
1124			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
1125	WARN_ON(rc < 0);
1126
1127	return rc;
1128}
1129
1130int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1131{
1132	int rc;
1133
1134	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1135	if (rc)
1136		goto fail;
1137
1138	return 0;
1139
1140fail:
1141	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1142	return rc;
1143}
1144
1145#ifdef CONFIG_SFC_MTD
1146
1147#define EFX_MCDI_NVRAM_LEN_MAX 128
1148
1149static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
1150{
1151	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
1152	int rc;
1153
1154	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
1155
1156	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
1157
1158	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
1159			  NULL, 0, NULL);
1160	if (rc)
1161		goto fail;
1162
1163	return 0;
1164
1165fail:
1166	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1167	return rc;
1168}
1169
1170static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
1171			       loff_t offset, u8 *buffer, size_t length)
1172{
1173	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
1174	MCDI_DECLARE_BUF(outbuf,
1175			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1176	size_t outlen;
1177	int rc;
1178
1179	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
1180	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
1181	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
1182
1183	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
1184			  outbuf, sizeof(outbuf), &outlen);
1185	if (rc)
1186		goto fail;
1187
1188	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
1189	return 0;
1190
1191fail:
1192	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1193	return rc;
1194}
1195
1196static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
1197				loff_t offset, const u8 *buffer, size_t length)
1198{
1199	MCDI_DECLARE_BUF(inbuf,
1200			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1201	int rc;
1202
1203	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
1204	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
1205	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
1206	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
1207
1208	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
1209
1210	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
1211			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
1212			  NULL, 0, NULL);
1213	if (rc)
1214		goto fail;
1215
1216	return 0;
1217
1218fail:
1219	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1220	return rc;
1221}
1222
1223static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
1224				loff_t offset, size_t length)
1225{
1226	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
1227	int rc;
1228
1229	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
1230	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
1231	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
1232
1233	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
1234
1235	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
1236			  NULL, 0, NULL);
1237	if (rc)
1238		goto fail;
1239
1240	return 0;
1241
1242fail:
1243	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1244	return rc;
1245}
1246
1247static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
1248{
1249	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
1250	int rc;
1251
1252	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
1253
1254	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
1255
1256	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
1257			  NULL, 0, NULL);
1258	if (rc)
1259		goto fail;
1260
1261	return 0;
1262
1263fail:
1264	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1265	return rc;
1266}
1267
1268int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
1269		      size_t len, size_t *retlen, u8 *buffer)
1270{
1271	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1272	struct efx_nic *efx = mtd->priv;
1273	loff_t offset = start;
1274	loff_t end = min_t(loff_t, start + len, mtd->size);
1275	size_t chunk;
1276	int rc = 0;
1277
1278	while (offset < end) {
1279		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1280		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
1281					 buffer, chunk);
1282		if (rc)
1283			goto out;
1284		offset += chunk;
1285		buffer += chunk;
1286	}
1287out:
1288	*retlen = offset - start;
1289	return rc;
1290}
1291
1292int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
1293{
1294	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1295	struct efx_nic *efx = mtd->priv;
1296	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
1297	loff_t end = min_t(loff_t, start + len, mtd->size);
1298	size_t chunk = part->common.mtd.erasesize;
1299	int rc = 0;
1300
1301	if (!part->updating) {
1302		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1303		if (rc)
1304			goto out;
1305		part->updating = true;
1306	}
1307
1308	/* The MCDI interface can in fact do multiple erase blocks at once;
1309	 * but erasing may be slow, so we make multiple calls here to avoid
1310	 * tripping the MCDI RPC timeout. */
1311	while (offset < end) {
1312		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
1313					  chunk);
1314		if (rc)
1315			goto out;
1316		offset += chunk;
1317	}
1318out:
1319	return rc;
1320}
1321
1322int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
1323		       size_t len, size_t *retlen, const u8 *buffer)
1324{
1325	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1326	struct efx_nic *efx = mtd->priv;
1327	loff_t offset = start;
1328	loff_t end = min_t(loff_t, start + len, mtd->size);
1329	size_t chunk;
1330	int rc = 0;
1331
1332	if (!part->updating) {
1333		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1334		if (rc)
1335			goto out;
1336		part->updating = true;
1337	}
1338
1339	while (offset < end) {
1340		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1341		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
1342					  buffer, chunk);
1343		if (rc)
1344			goto out;
1345		offset += chunk;
1346		buffer += chunk;
1347	}
1348out:
1349	*retlen = offset - start;
1350	return rc;
1351}
1352
1353int efx_mcdi_mtd_sync(struct mtd_info *mtd)
1354{
1355	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1356	struct efx_nic *efx = mtd->priv;
1357	int rc = 0;
1358
1359	if (part->updating) {
1360		part->updating = false;
1361		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
1362	}
1363
1364	return rc;
1365}
1366
1367void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
1368{
1369	struct efx_mcdi_mtd_partition *mcdi_part =
1370		container_of(part, struct efx_mcdi_mtd_partition, common);
1371	struct efx_nic *efx = part->mtd.priv;
1372
1373	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
1374		 efx->name, part->type_name, mcdi_part->fw_subtype);
1375}
1376
1377#endif /* CONFIG_SFC_MTD */
1378