mcdi.c revision 251111d9a1bd9a26e25446d876156bf265858cb5
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include <asm/cmpxchg.h>
12#include "net_driver.h"
13#include "nic.h"
14#include "io.h"
15#include "farch_regs.h"
16#include "mcdi_pcol.h"
17#include "phy.h"
18
19/**************************************************************************
20 *
21 * Management-Controller-to-Driver Interface
22 *
23 **************************************************************************
24 */
25
26#define MCDI_RPC_TIMEOUT       (10 * HZ)
27
28/* A reboot/assertion causes the MCDI status word to be set after the
29 * command word is set or a REBOOT event is sent. If we notice a reboot
30 * via these mechanisms then wait 20ms for the status word to be set.
31 */
32#define MCDI_STATUS_DELAY_US		100
33#define MCDI_STATUS_DELAY_COUNT		200
34#define MCDI_STATUS_SLEEP_MS						\
35	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
36
37#define SEQ_MASK							\
38	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
39
40static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
41{
42	EFX_BUG_ON_PARANOID(!efx->mcdi);
43	return &efx->mcdi->iface;
44}
45
46int efx_mcdi_init(struct efx_nic *efx)
47{
48	struct efx_mcdi_iface *mcdi;
49
50	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
51	if (!efx->mcdi)
52		return -ENOMEM;
53
54	mcdi = efx_mcdi(efx);
55	init_waitqueue_head(&mcdi->wq);
56	spin_lock_init(&mcdi->iface_lock);
57	mcdi->state = MCDI_STATE_QUIESCENT;
58	mcdi->mode = MCDI_MODE_POLL;
59
60	(void) efx_mcdi_poll_reboot(efx);
61	mcdi->new_epoch = true;
62
63	/* Recover from a failed assertion before probing */
64	return efx_mcdi_handle_assertion(efx);
65}
66
67void efx_mcdi_fini(struct efx_nic *efx)
68{
69	BUG_ON(efx->mcdi && efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
70	kfree(efx->mcdi);
71}
72
73static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
74				  const efx_dword_t *inbuf, size_t inlen)
75{
76	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
77	efx_dword_t hdr[2];
78	size_t hdr_len;
79	u32 xflags, seqno;
80
81	BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
82
83	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
84	spin_lock_bh(&mcdi->iface_lock);
85	++mcdi->seqno;
86	spin_unlock_bh(&mcdi->iface_lock);
87
88	seqno = mcdi->seqno & SEQ_MASK;
89	xflags = 0;
90	if (mcdi->mode == MCDI_MODE_EVENTS)
91		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
92
93	if (efx->type->mcdi_max_ver == 1) {
94		/* MCDI v1 */
95		EFX_POPULATE_DWORD_7(hdr[0],
96				     MCDI_HEADER_RESPONSE, 0,
97				     MCDI_HEADER_RESYNC, 1,
98				     MCDI_HEADER_CODE, cmd,
99				     MCDI_HEADER_DATALEN, inlen,
100				     MCDI_HEADER_SEQ, seqno,
101				     MCDI_HEADER_XFLAGS, xflags,
102				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
103		hdr_len = 4;
104	} else {
105		/* MCDI v2 */
106		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
107		EFX_POPULATE_DWORD_7(hdr[0],
108				     MCDI_HEADER_RESPONSE, 0,
109				     MCDI_HEADER_RESYNC, 1,
110				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
111				     MCDI_HEADER_DATALEN, 0,
112				     MCDI_HEADER_SEQ, seqno,
113				     MCDI_HEADER_XFLAGS, xflags,
114				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
115		EFX_POPULATE_DWORD_2(hdr[1],
116				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
117				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
118		hdr_len = 8;
119	}
120
121	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
122
123	mcdi->new_epoch = false;
124}
125
126static int efx_mcdi_errno(unsigned int mcdi_err)
127{
128	switch (mcdi_err) {
129	case 0:
130		return 0;
131#define TRANSLATE_ERROR(name)					\
132	case MC_CMD_ERR_ ## name:				\
133		return -name;
134	TRANSLATE_ERROR(EPERM);
135	TRANSLATE_ERROR(ENOENT);
136	TRANSLATE_ERROR(EINTR);
137	TRANSLATE_ERROR(EAGAIN);
138	TRANSLATE_ERROR(EACCES);
139	TRANSLATE_ERROR(EBUSY);
140	TRANSLATE_ERROR(EINVAL);
141	TRANSLATE_ERROR(EDEADLK);
142	TRANSLATE_ERROR(ENOSYS);
143	TRANSLATE_ERROR(ETIME);
144	TRANSLATE_ERROR(EALREADY);
145	TRANSLATE_ERROR(ENOSPC);
146#undef TRANSLATE_ERROR
147	case MC_CMD_ERR_ALLOC_FAIL:
148		return -ENOBUFS;
149	case MC_CMD_ERR_MAC_EXIST:
150		return -EADDRINUSE;
151	default:
152		return -EPROTO;
153	}
154}
155
156static void efx_mcdi_read_response_header(struct efx_nic *efx)
157{
158	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
159	unsigned int respseq, respcmd, error;
160	efx_dword_t hdr;
161
162	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
163	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
164	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
165	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
166
167	if (respcmd != MC_CMD_V2_EXTN) {
168		mcdi->resp_hdr_len = 4;
169		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
170	} else {
171		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
172		mcdi->resp_hdr_len = 8;
173		mcdi->resp_data_len =
174			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
175	}
176
177	if (error && mcdi->resp_data_len == 0) {
178		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
179		mcdi->resprc = -EIO;
180	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
181		netif_err(efx, hw, efx->net_dev,
182			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
183			  respseq, mcdi->seqno);
184		mcdi->resprc = -EIO;
185	} else if (error) {
186		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
187		mcdi->resprc =
188			efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
189	} else {
190		mcdi->resprc = 0;
191	}
192}
193
194static int efx_mcdi_poll(struct efx_nic *efx)
195{
196	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
197	unsigned long time, finish;
198	unsigned int spins;
199	int rc;
200
201	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
202	rc = efx_mcdi_poll_reboot(efx);
203	if (rc) {
204		spin_lock_bh(&mcdi->iface_lock);
205		mcdi->resprc = rc;
206		mcdi->resp_hdr_len = 0;
207		mcdi->resp_data_len = 0;
208		spin_unlock_bh(&mcdi->iface_lock);
209		return 0;
210	}
211
212	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
213	 * because generally mcdi responses are fast. After that, back off
214	 * and poll once a jiffy (approximately)
215	 */
216	spins = TICK_USEC;
217	finish = jiffies + MCDI_RPC_TIMEOUT;
218
219	while (1) {
220		if (spins != 0) {
221			--spins;
222			udelay(1);
223		} else {
224			schedule_timeout_uninterruptible(1);
225		}
226
227		time = jiffies;
228
229		rmb();
230		if (efx->type->mcdi_poll_response(efx))
231			break;
232
233		if (time_after(time, finish))
234			return -ETIMEDOUT;
235	}
236
237	spin_lock_bh(&mcdi->iface_lock);
238	efx_mcdi_read_response_header(efx);
239	spin_unlock_bh(&mcdi->iface_lock);
240
241	/* Return rc=0 like wait_event_timeout() */
242	return 0;
243}
244
245/* Test and clear MC-rebooted flag for this port/function; reset
246 * software state as necessary.
247 */
248int efx_mcdi_poll_reboot(struct efx_nic *efx)
249{
250	if (!efx->mcdi)
251		return 0;
252
253	return efx->type->mcdi_poll_reboot(efx);
254}
255
256static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
257{
258	/* Wait until the interface becomes QUIESCENT and we win the race
259	 * to mark it RUNNING. */
260	wait_event(mcdi->wq,
261		   cmpxchg(&mcdi->state,
262			   MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING) ==
263		   MCDI_STATE_QUIESCENT);
264}
265
266static int efx_mcdi_await_completion(struct efx_nic *efx)
267{
268	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
269
270	if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
271			       MCDI_RPC_TIMEOUT) == 0)
272		return -ETIMEDOUT;
273
274	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
275	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
276	 * completed the request first, then we'll just end up completing the
277	 * request again, which is safe.
278	 *
279	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
280	 * wait_event_timeout() implicitly provides.
281	 */
282	if (mcdi->mode == MCDI_MODE_POLL)
283		return efx_mcdi_poll(efx);
284
285	return 0;
286}
287
288static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
289{
290	/* If the interface is RUNNING, then move to COMPLETED and wake any
291	 * waiters. If the interface isn't in RUNNING then we've received a
292	 * duplicate completion after we've already transitioned back to
293	 * QUIESCENT. [A subsequent invocation would increment seqno, so would
294	 * have failed the seqno check].
295	 */
296	if (cmpxchg(&mcdi->state, MCDI_STATE_RUNNING, MCDI_STATE_COMPLETED) ==
297	    MCDI_STATE_RUNNING) {
298		wake_up(&mcdi->wq);
299		return true;
300	}
301
302	return false;
303}
304
305static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
306{
307	mcdi->state = MCDI_STATE_QUIESCENT;
308	wake_up(&mcdi->wq);
309}
310
311static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
312			    unsigned int datalen, unsigned int mcdi_err)
313{
314	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
315	bool wake = false;
316
317	spin_lock(&mcdi->iface_lock);
318
319	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
320		if (mcdi->credits)
321			/* The request has been cancelled */
322			--mcdi->credits;
323		else
324			netif_err(efx, hw, efx->net_dev,
325				  "MC response mismatch tx seq 0x%x rx "
326				  "seq 0x%x\n", seqno, mcdi->seqno);
327	} else {
328		if (efx->type->mcdi_max_ver >= 2) {
329			/* MCDI v2 responses don't fit in an event */
330			efx_mcdi_read_response_header(efx);
331		} else {
332			mcdi->resprc = efx_mcdi_errno(mcdi_err);
333			mcdi->resp_hdr_len = 4;
334			mcdi->resp_data_len = datalen;
335		}
336
337		wake = true;
338	}
339
340	spin_unlock(&mcdi->iface_lock);
341
342	if (wake)
343		efx_mcdi_complete(mcdi);
344}
345
346static int
347efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
348{
349	if (efx->type->mcdi_max_ver < 0 ||
350	     (efx->type->mcdi_max_ver < 2 &&
351	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
352		return -EINVAL;
353
354	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
355	    (efx->type->mcdi_max_ver < 2 &&
356	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
357		return -EMSGSIZE;
358
359	return 0;
360}
361
362int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
363		 const efx_dword_t *inbuf, size_t inlen,
364		 efx_dword_t *outbuf, size_t outlen,
365		 size_t *outlen_actual)
366{
367	int rc;
368
369	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
370	if (rc)
371		return rc;
372	return efx_mcdi_rpc_finish(efx, cmd, inlen,
373				   outbuf, outlen, outlen_actual);
374}
375
376int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
377		       const efx_dword_t *inbuf, size_t inlen)
378{
379	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
380	int rc;
381
382	rc = efx_mcdi_check_supported(efx, cmd, inlen);
383	if (rc)
384		return rc;
385
386	efx_mcdi_acquire(mcdi);
387	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
388	return 0;
389}
390
391int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
392			efx_dword_t *outbuf, size_t outlen,
393			size_t *outlen_actual)
394{
395	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
396	int rc;
397
398	if (mcdi->mode == MCDI_MODE_POLL)
399		rc = efx_mcdi_poll(efx);
400	else
401		rc = efx_mcdi_await_completion(efx);
402
403	if (rc != 0) {
404		/* Close the race with efx_mcdi_ev_cpl() executing just too late
405		 * and completing a request we've just cancelled, by ensuring
406		 * that the seqno check therein fails.
407		 */
408		spin_lock_bh(&mcdi->iface_lock);
409		++mcdi->seqno;
410		++mcdi->credits;
411		spin_unlock_bh(&mcdi->iface_lock);
412
413		netif_err(efx, hw, efx->net_dev,
414			  "MC command 0x%x inlen %d mode %d timed out\n",
415			  cmd, (int)inlen, mcdi->mode);
416	} else {
417		size_t hdr_len, data_len;
418
419		/* At the very least we need a memory barrier here to ensure
420		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
421		 * a spurious efx_mcdi_ev_cpl() running concurrently by
422		 * acquiring the iface_lock. */
423		spin_lock_bh(&mcdi->iface_lock);
424		rc = mcdi->resprc;
425		hdr_len = mcdi->resp_hdr_len;
426		data_len = mcdi->resp_data_len;
427		spin_unlock_bh(&mcdi->iface_lock);
428
429		BUG_ON(rc > 0);
430
431		if (rc == 0) {
432			efx->type->mcdi_read_response(efx, outbuf, hdr_len,
433						      min(outlen, data_len));
434			if (outlen_actual != NULL)
435				*outlen_actual = data_len;
436		} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
437			; /* Don't reset if MC_CMD_REBOOT returns EIO */
438		else if (rc == -EIO || rc == -EINTR) {
439			netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
440				  -rc);
441			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
442		} else
443			netif_dbg(efx, hw, efx->net_dev,
444				  "MC command 0x%x inlen %d failed rc=%d\n",
445				  cmd, (int)inlen, -rc);
446
447		if (rc == -EIO || rc == -EINTR) {
448			msleep(MCDI_STATUS_SLEEP_MS);
449			efx_mcdi_poll_reboot(efx);
450			mcdi->new_epoch = true;
451		}
452	}
453
454	efx_mcdi_release(mcdi);
455	return rc;
456}
457
458void efx_mcdi_mode_poll(struct efx_nic *efx)
459{
460	struct efx_mcdi_iface *mcdi;
461
462	if (!efx->mcdi)
463		return;
464
465	mcdi = efx_mcdi(efx);
466	if (mcdi->mode == MCDI_MODE_POLL)
467		return;
468
469	/* We can switch from event completion to polled completion, because
470	 * mcdi requests are always completed in shared memory. We do this by
471	 * switching the mode to POLL'd then completing the request.
472	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
473	 *
474	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
475	 * which efx_mcdi_complete() provides for us.
476	 */
477	mcdi->mode = MCDI_MODE_POLL;
478
479	efx_mcdi_complete(mcdi);
480}
481
482void efx_mcdi_mode_event(struct efx_nic *efx)
483{
484	struct efx_mcdi_iface *mcdi;
485
486	if (!efx->mcdi)
487		return;
488
489	mcdi = efx_mcdi(efx);
490
491	if (mcdi->mode == MCDI_MODE_EVENTS)
492		return;
493
494	/* We can't switch from polled to event completion in the middle of a
495	 * request, because the completion method is specified in the request.
496	 * So acquire the interface to serialise the requestors. We don't need
497	 * to acquire the iface_lock to change the mode here, but we do need a
498	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
499	 * efx_mcdi_acquire() provides.
500	 */
501	efx_mcdi_acquire(mcdi);
502	mcdi->mode = MCDI_MODE_EVENTS;
503	efx_mcdi_release(mcdi);
504}
505
506static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
507{
508	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
509
510	/* If there is an outstanding MCDI request, it has been terminated
511	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
512	 * in polled mode, then do nothing because the MC reboot handler will
513	 * set the header correctly. However, if the mcdi interface is waiting
514	 * for a CMDDONE event it won't receive it [and since all MCDI events
515	 * are sent to the same queue, we can't be racing with
516	 * efx_mcdi_ev_cpl()]
517	 *
518	 * There's a race here with efx_mcdi_rpc(), because we might receive
519	 * a REBOOT event *before* the request has been copied out. In polled
520	 * mode (during startup) this is irrelevant, because efx_mcdi_complete()
521	 * is ignored. In event mode, this condition is just an edge-case of
522	 * receiving a REBOOT event after posting the MCDI request. Did the mc
523	 * reboot before or after the copyout? The best we can do always is
524	 * just return failure.
525	 */
526	spin_lock(&mcdi->iface_lock);
527	if (efx_mcdi_complete(mcdi)) {
528		if (mcdi->mode == MCDI_MODE_EVENTS) {
529			mcdi->resprc = rc;
530			mcdi->resp_hdr_len = 0;
531			mcdi->resp_data_len = 0;
532			++mcdi->credits;
533		}
534	} else {
535		int count;
536
537		/* Nobody was waiting for an MCDI request, so trigger a reset */
538		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
539
540		/* Consume the status word since efx_mcdi_rpc_finish() won't */
541		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
542			if (efx_mcdi_poll_reboot(efx))
543				break;
544			udelay(MCDI_STATUS_DELAY_US);
545		}
546		mcdi->new_epoch = true;
547	}
548
549	spin_unlock(&mcdi->iface_lock);
550}
551
552/* Called from  falcon_process_eventq for MCDI events */
553void efx_mcdi_process_event(struct efx_channel *channel,
554			    efx_qword_t *event)
555{
556	struct efx_nic *efx = channel->efx;
557	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
558	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
559
560	switch (code) {
561	case MCDI_EVENT_CODE_BADSSERT:
562		netif_err(efx, hw, efx->net_dev,
563			  "MC watchdog or assertion failure at 0x%x\n", data);
564		efx_mcdi_ev_death(efx, -EINTR);
565		break;
566
567	case MCDI_EVENT_CODE_PMNOTICE:
568		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
569		break;
570
571	case MCDI_EVENT_CODE_CMDDONE:
572		efx_mcdi_ev_cpl(efx,
573				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
574				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
575				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
576		break;
577
578	case MCDI_EVENT_CODE_LINKCHANGE:
579		efx_mcdi_process_link_change(efx, event);
580		break;
581	case MCDI_EVENT_CODE_SENSOREVT:
582		efx_mcdi_sensor_event(efx, event);
583		break;
584	case MCDI_EVENT_CODE_SCHEDERR:
585		netif_info(efx, hw, efx->net_dev,
586			   "MC Scheduler error address=0x%x\n", data);
587		break;
588	case MCDI_EVENT_CODE_REBOOT:
589		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
590		efx_mcdi_ev_death(efx, -EIO);
591		break;
592	case MCDI_EVENT_CODE_MAC_STATS_DMA:
593		/* MAC stats are gather lazily.  We can ignore this. */
594		break;
595	case MCDI_EVENT_CODE_FLR:
596		efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
597		break;
598	case MCDI_EVENT_CODE_PTP_RX:
599	case MCDI_EVENT_CODE_PTP_FAULT:
600	case MCDI_EVENT_CODE_PTP_PPS:
601		efx_ptp_event(efx, event);
602		break;
603
604	case MCDI_EVENT_CODE_TX_ERR:
605	case MCDI_EVENT_CODE_RX_ERR:
606		netif_err(efx, hw, efx->net_dev,
607			  "%s DMA error (event: "EFX_QWORD_FMT")\n",
608			  code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
609			  EFX_QWORD_VAL(*event));
610		efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
611		break;
612	default:
613		netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
614			  code);
615	}
616}
617
618/**************************************************************************
619 *
620 * Specific request functions
621 *
622 **************************************************************************
623 */
624
625void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
626{
627	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
628	size_t outlength;
629	const __le16 *ver_words;
630	int rc;
631
632	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
633
634	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
635			  outbuf, sizeof(outbuf), &outlength);
636	if (rc)
637		goto fail;
638
639	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
640		rc = -EIO;
641		goto fail;
642	}
643
644	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
645	snprintf(buf, len, "%u.%u.%u.%u",
646		 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
647		 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
648	return;
649
650fail:
651	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
652	buf[0] = 0;
653}
654
655int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
656			bool *was_attached)
657{
658	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
659	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
660	size_t outlen;
661	int rc;
662
663	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
664		       driver_operating ? 1 : 0);
665	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
666	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
667
668	rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
669			  outbuf, sizeof(outbuf), &outlen);
670	if (rc)
671		goto fail;
672	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
673		rc = -EIO;
674		goto fail;
675	}
676
677	if (was_attached != NULL)
678		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
679	return 0;
680
681fail:
682	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
683	return rc;
684}
685
686int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
687			   u16 *fw_subtype_list, u32 *capabilities)
688{
689	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
690	size_t outlen, i;
691	int port_num = efx_port_num(efx);
692	int rc;
693
694	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
695
696	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
697			  outbuf, sizeof(outbuf), &outlen);
698	if (rc)
699		goto fail;
700
701	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
702		rc = -EIO;
703		goto fail;
704	}
705
706	if (mac_address)
707		memcpy(mac_address,
708		       port_num ?
709		       MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
710		       MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
711		       ETH_ALEN);
712	if (fw_subtype_list) {
713		for (i = 0;
714		     i < MCDI_VAR_ARRAY_LEN(outlen,
715					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
716		     i++)
717			fw_subtype_list[i] = MCDI_ARRAY_WORD(
718				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
719		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
720			fw_subtype_list[i] = 0;
721	}
722	if (capabilities) {
723		if (port_num)
724			*capabilities = MCDI_DWORD(outbuf,
725					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
726		else
727			*capabilities = MCDI_DWORD(outbuf,
728					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
729	}
730
731	return 0;
732
733fail:
734	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
735		  __func__, rc, (int)outlen);
736
737	return rc;
738}
739
740int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
741{
742	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
743	u32 dest = 0;
744	int rc;
745
746	if (uart)
747		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
748	if (evq)
749		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
750
751	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
752	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
753
754	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
755
756	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
757			  NULL, 0, NULL);
758	if (rc)
759		goto fail;
760
761	return 0;
762
763fail:
764	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
765	return rc;
766}
767
768int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
769{
770	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
771	size_t outlen;
772	int rc;
773
774	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
775
776	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
777			  outbuf, sizeof(outbuf), &outlen);
778	if (rc)
779		goto fail;
780	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
781		rc = -EIO;
782		goto fail;
783	}
784
785	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
786	return 0;
787
788fail:
789	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
790		  __func__, rc);
791	return rc;
792}
793
794int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
795			size_t *size_out, size_t *erase_size_out,
796			bool *protected_out)
797{
798	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
799	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
800	size_t outlen;
801	int rc;
802
803	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
804
805	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
806			  outbuf, sizeof(outbuf), &outlen);
807	if (rc)
808		goto fail;
809	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
810		rc = -EIO;
811		goto fail;
812	}
813
814	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
815	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
816	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
817				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
818	return 0;
819
820fail:
821	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
822	return rc;
823}
824
825static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
826{
827	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
828	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
829	int rc;
830
831	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
832
833	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
834			  outbuf, sizeof(outbuf), NULL);
835	if (rc)
836		return rc;
837
838	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
839	case MC_CMD_NVRAM_TEST_PASS:
840	case MC_CMD_NVRAM_TEST_NOTSUPP:
841		return 0;
842	default:
843		return -EIO;
844	}
845}
846
847int efx_mcdi_nvram_test_all(struct efx_nic *efx)
848{
849	u32 nvram_types;
850	unsigned int type;
851	int rc;
852
853	rc = efx_mcdi_nvram_types(efx, &nvram_types);
854	if (rc)
855		goto fail1;
856
857	type = 0;
858	while (nvram_types != 0) {
859		if (nvram_types & 1) {
860			rc = efx_mcdi_nvram_test(efx, type);
861			if (rc)
862				goto fail2;
863		}
864		type++;
865		nvram_types >>= 1;
866	}
867
868	return 0;
869
870fail2:
871	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
872		  __func__, type);
873fail1:
874	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
875	return rc;
876}
877
878static int efx_mcdi_read_assertion(struct efx_nic *efx)
879{
880	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
881	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
882	unsigned int flags, index;
883	const char *reason;
884	size_t outlen;
885	int retry;
886	int rc;
887
888	/* Attempt to read any stored assertion state before we reboot
889	 * the mcfw out of the assertion handler. Retry twice, once
890	 * because a boot-time assertion might cause this command to fail
891	 * with EINTR. And once again because GET_ASSERTS can race with
892	 * MC_CMD_REBOOT running on the other port. */
893	retry = 2;
894	do {
895		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
896		rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
897				  inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
898				  outbuf, sizeof(outbuf), &outlen);
899	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
900
901	if (rc)
902		return rc;
903	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
904		return -EIO;
905
906	/* Print out any recorded assertion state */
907	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
908	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
909		return 0;
910
911	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
912		? "system-level assertion"
913		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
914		? "thread-level assertion"
915		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
916		? "watchdog reset"
917		: "unknown assertion";
918	netif_err(efx, hw, efx->net_dev,
919		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
920		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
921		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
922
923	/* Print out the registers */
924	for (index = 0;
925	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
926	     index++)
927		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
928			  1 + index,
929			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
930					   index));
931
932	return 0;
933}
934
935static void efx_mcdi_exit_assertion(struct efx_nic *efx)
936{
937	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
938
939	/* If the MC is running debug firmware, it might now be
940	 * waiting for a debugger to attach, but we just want it to
941	 * reboot.  We set a flag that makes the command a no-op if it
942	 * has already done so.  We don't know what return code to
943	 * expect (0 or -EIO), so ignore it.
944	 */
945	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
946	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
947		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
948	(void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
949			    NULL, 0, NULL);
950}
951
952int efx_mcdi_handle_assertion(struct efx_nic *efx)
953{
954	int rc;
955
956	rc = efx_mcdi_read_assertion(efx);
957	if (rc)
958		return rc;
959
960	efx_mcdi_exit_assertion(efx);
961
962	return 0;
963}
964
965void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
966{
967	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
968	int rc;
969
970	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
971	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
972	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
973
974	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
975
976	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
977
978	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
979			  NULL, 0, NULL);
980	if (rc)
981		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
982			  __func__, rc);
983}
984
985static int efx_mcdi_reset_port(struct efx_nic *efx)
986{
987	int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
988	if (rc)
989		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
990			  __func__, rc);
991	return rc;
992}
993
994static int efx_mcdi_reset_mc(struct efx_nic *efx)
995{
996	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
997	int rc;
998
999	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1000	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
1001	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1002			  NULL, 0, NULL);
1003	/* White is black, and up is down */
1004	if (rc == -EIO)
1005		return 0;
1006	if (rc == 0)
1007		rc = -EIO;
1008	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1009	return rc;
1010}
1011
1012enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1013{
1014	return RESET_TYPE_RECOVER_OR_ALL;
1015}
1016
1017int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1018{
1019	int rc;
1020
1021	/* Recover from a failed assertion pre-reset */
1022	rc = efx_mcdi_handle_assertion(efx);
1023	if (rc)
1024		return rc;
1025
1026	if (method == RESET_TYPE_WORLD)
1027		return efx_mcdi_reset_mc(efx);
1028	else
1029		return efx_mcdi_reset_port(efx);
1030}
1031
1032static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1033				   const u8 *mac, int *id_out)
1034{
1035	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1036	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1037	size_t outlen;
1038	int rc;
1039
1040	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1041	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1042		       MC_CMD_FILTER_MODE_SIMPLE);
1043	memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1044
1045	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1046			  outbuf, sizeof(outbuf), &outlen);
1047	if (rc)
1048		goto fail;
1049
1050	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1051		rc = -EIO;
1052		goto fail;
1053	}
1054
1055	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1056
1057	return 0;
1058
1059fail:
1060	*id_out = -1;
1061	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1062	return rc;
1063
1064}
1065
1066
1067int
1068efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
1069{
1070	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1071}
1072
1073
1074int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1075{
1076	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1077	size_t outlen;
1078	int rc;
1079
1080	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1081			  outbuf, sizeof(outbuf), &outlen);
1082	if (rc)
1083		goto fail;
1084
1085	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1086		rc = -EIO;
1087		goto fail;
1088	}
1089
1090	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1091
1092	return 0;
1093
1094fail:
1095	*id_out = -1;
1096	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1097	return rc;
1098}
1099
1100
1101int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1102{
1103	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
1104	int rc;
1105
1106	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1107
1108	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1109			  NULL, 0, NULL);
1110	if (rc)
1111		goto fail;
1112
1113	return 0;
1114
1115fail:
1116	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1117	return rc;
1118}
1119
1120int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1121{
1122	struct efx_channel *channel;
1123	struct efx_rx_queue *rx_queue;
1124	MCDI_DECLARE_BUF(inbuf,
1125			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
1126	int rc, count;
1127
1128	BUILD_BUG_ON(EFX_MAX_CHANNELS >
1129		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1130
1131	count = 0;
1132	efx_for_each_channel(channel, efx) {
1133		efx_for_each_channel_rx_queue(rx_queue, channel) {
1134			if (rx_queue->flush_pending) {
1135				rx_queue->flush_pending = false;
1136				atomic_dec(&efx->rxq_flush_pending);
1137				MCDI_SET_ARRAY_DWORD(
1138					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
1139					count, efx_rx_queue_index(rx_queue));
1140				count++;
1141			}
1142		}
1143	}
1144
1145	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
1146			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
1147	WARN_ON(rc < 0);
1148
1149	return rc;
1150}
1151
1152int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1153{
1154	int rc;
1155
1156	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1157	if (rc)
1158		goto fail;
1159
1160	return 0;
1161
1162fail:
1163	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1164	return rc;
1165}
1166
1167#ifdef CONFIG_SFC_MTD
1168
1169#define EFX_MCDI_NVRAM_LEN_MAX 128
1170
1171static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
1172{
1173	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
1174	int rc;
1175
1176	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
1177
1178	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
1179
1180	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
1181			  NULL, 0, NULL);
1182	if (rc)
1183		goto fail;
1184
1185	return 0;
1186
1187fail:
1188	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1189	return rc;
1190}
1191
1192static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
1193			       loff_t offset, u8 *buffer, size_t length)
1194{
1195	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
1196	MCDI_DECLARE_BUF(outbuf,
1197			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1198	size_t outlen;
1199	int rc;
1200
1201	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
1202	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
1203	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
1204
1205	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
1206			  outbuf, sizeof(outbuf), &outlen);
1207	if (rc)
1208		goto fail;
1209
1210	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
1211	return 0;
1212
1213fail:
1214	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1215	return rc;
1216}
1217
1218static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
1219				loff_t offset, const u8 *buffer, size_t length)
1220{
1221	MCDI_DECLARE_BUF(inbuf,
1222			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
1223	int rc;
1224
1225	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
1226	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
1227	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
1228	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
1229
1230	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
1231
1232	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
1233			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
1234			  NULL, 0, NULL);
1235	if (rc)
1236		goto fail;
1237
1238	return 0;
1239
1240fail:
1241	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1242	return rc;
1243}
1244
1245static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
1246				loff_t offset, size_t length)
1247{
1248	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
1249	int rc;
1250
1251	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
1252	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
1253	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
1254
1255	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
1256
1257	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
1258			  NULL, 0, NULL);
1259	if (rc)
1260		goto fail;
1261
1262	return 0;
1263
1264fail:
1265	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1266	return rc;
1267}
1268
1269static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
1270{
1271	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
1272	int rc;
1273
1274	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
1275
1276	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
1277
1278	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
1279			  NULL, 0, NULL);
1280	if (rc)
1281		goto fail;
1282
1283	return 0;
1284
1285fail:
1286	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1287	return rc;
1288}
1289
1290int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
1291		      size_t len, size_t *retlen, u8 *buffer)
1292{
1293	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1294	struct efx_nic *efx = mtd->priv;
1295	loff_t offset = start;
1296	loff_t end = min_t(loff_t, start + len, mtd->size);
1297	size_t chunk;
1298	int rc = 0;
1299
1300	while (offset < end) {
1301		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1302		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
1303					 buffer, chunk);
1304		if (rc)
1305			goto out;
1306		offset += chunk;
1307		buffer += chunk;
1308	}
1309out:
1310	*retlen = offset - start;
1311	return rc;
1312}
1313
1314int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
1315{
1316	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1317	struct efx_nic *efx = mtd->priv;
1318	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
1319	loff_t end = min_t(loff_t, start + len, mtd->size);
1320	size_t chunk = part->common.mtd.erasesize;
1321	int rc = 0;
1322
1323	if (!part->updating) {
1324		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1325		if (rc)
1326			goto out;
1327		part->updating = true;
1328	}
1329
1330	/* The MCDI interface can in fact do multiple erase blocks at once;
1331	 * but erasing may be slow, so we make multiple calls here to avoid
1332	 * tripping the MCDI RPC timeout. */
1333	while (offset < end) {
1334		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
1335					  chunk);
1336		if (rc)
1337			goto out;
1338		offset += chunk;
1339	}
1340out:
1341	return rc;
1342}
1343
1344int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
1345		       size_t len, size_t *retlen, const u8 *buffer)
1346{
1347	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1348	struct efx_nic *efx = mtd->priv;
1349	loff_t offset = start;
1350	loff_t end = min_t(loff_t, start + len, mtd->size);
1351	size_t chunk;
1352	int rc = 0;
1353
1354	if (!part->updating) {
1355		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
1356		if (rc)
1357			goto out;
1358		part->updating = true;
1359	}
1360
1361	while (offset < end) {
1362		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
1363		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
1364					  buffer, chunk);
1365		if (rc)
1366			goto out;
1367		offset += chunk;
1368		buffer += chunk;
1369	}
1370out:
1371	*retlen = offset - start;
1372	return rc;
1373}
1374
1375int efx_mcdi_mtd_sync(struct mtd_info *mtd)
1376{
1377	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
1378	struct efx_nic *efx = mtd->priv;
1379	int rc = 0;
1380
1381	if (part->updating) {
1382		part->updating = false;
1383		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
1384	}
1385
1386	return rc;
1387}
1388
1389void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
1390{
1391	struct efx_mcdi_mtd_partition *mcdi_part =
1392		container_of(part, struct efx_mcdi_mtd_partition, common);
1393	struct efx_nic *efx = part->mtd.priv;
1394
1395	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
1396		 efx->name, part->type_name, mcdi_part->fw_subtype);
1397}
1398
1399#endif /* CONFIG_SFC_MTD */
1400