mcdi.c revision df2cd8af097850bb3440817fdb6b08922ff4b327
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include "net_driver.h"
12#include "nic.h"
13#include "io.h"
14#include "farch_regs.h"
15#include "mcdi_pcol.h"
16#include "phy.h"
17
18/**************************************************************************
19 *
20 * Management-Controller-to-Driver Interface
21 *
22 **************************************************************************
23 */
24
25#define MCDI_RPC_TIMEOUT       (10 * HZ)
26
27/* A reboot/assertion causes the MCDI status word to be set after the
28 * command word is set or a REBOOT event is sent. If we notice a reboot
29 * via these mechanisms then wait 10ms for the status word to be set. */
30#define MCDI_STATUS_DELAY_US		100
31#define MCDI_STATUS_DELAY_COUNT		100
32#define MCDI_STATUS_SLEEP_MS						\
33	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
34
35#define SEQ_MASK							\
36	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
37
38static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
39{
40	EFX_BUG_ON_PARANOID(!efx->mcdi);
41	return &efx->mcdi->iface;
42}
43
44int efx_mcdi_init(struct efx_nic *efx)
45{
46	struct efx_mcdi_iface *mcdi;
47
48	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
49	if (!efx->mcdi)
50		return -ENOMEM;
51
52	mcdi = efx_mcdi(efx);
53	init_waitqueue_head(&mcdi->wq);
54	spin_lock_init(&mcdi->iface_lock);
55	atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
56	mcdi->mode = MCDI_MODE_POLL;
57
58	(void) efx_mcdi_poll_reboot(efx);
59
60	/* Recover from a failed assertion before probing */
61	return efx_mcdi_handle_assertion(efx);
62}
63
64void efx_mcdi_fini(struct efx_nic *efx)
65{
66	BUG_ON(efx->mcdi &&
67	       atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT);
68	kfree(efx->mcdi);
69}
70
71static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
72			    const efx_dword_t *inbuf, size_t inlen)
73{
74	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
75	efx_dword_t hdr[2];
76	size_t hdr_len;
77	u32 xflags, seqno;
78
79	BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
80
81	seqno = mcdi->seqno & SEQ_MASK;
82	xflags = 0;
83	if (mcdi->mode == MCDI_MODE_EVENTS)
84		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
85
86	if (efx->type->mcdi_max_ver == 1) {
87		/* MCDI v1 */
88		EFX_POPULATE_DWORD_6(hdr[0],
89				     MCDI_HEADER_RESPONSE, 0,
90				     MCDI_HEADER_RESYNC, 1,
91				     MCDI_HEADER_CODE, cmd,
92				     MCDI_HEADER_DATALEN, inlen,
93				     MCDI_HEADER_SEQ, seqno,
94				     MCDI_HEADER_XFLAGS, xflags);
95		hdr_len = 4;
96	} else {
97		/* MCDI v2 */
98		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
99		EFX_POPULATE_DWORD_6(hdr[0],
100				     MCDI_HEADER_RESPONSE, 0,
101				     MCDI_HEADER_RESYNC, 1,
102				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
103				     MCDI_HEADER_DATALEN, 0,
104				     MCDI_HEADER_SEQ, seqno,
105				     MCDI_HEADER_XFLAGS, xflags);
106		EFX_POPULATE_DWORD_2(hdr[1],
107				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
108				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
109		hdr_len = 8;
110	}
111
112	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
113}
114
115static void
116efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen)
117{
118	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
119
120	BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
121
122	efx->type->mcdi_read_response(efx, outbuf, mcdi->resp_hdr_len, outlen);
123}
124
125static int efx_mcdi_errno(unsigned int mcdi_err)
126{
127	switch (mcdi_err) {
128	case 0:
129		return 0;
130#define TRANSLATE_ERROR(name)					\
131	case MC_CMD_ERR_ ## name:				\
132		return -name;
133	TRANSLATE_ERROR(EPERM);
134	TRANSLATE_ERROR(ENOENT);
135	TRANSLATE_ERROR(EINTR);
136	TRANSLATE_ERROR(EAGAIN);
137	TRANSLATE_ERROR(EACCES);
138	TRANSLATE_ERROR(EBUSY);
139	TRANSLATE_ERROR(EINVAL);
140	TRANSLATE_ERROR(EDEADLK);
141	TRANSLATE_ERROR(ENOSYS);
142	TRANSLATE_ERROR(ETIME);
143	TRANSLATE_ERROR(EALREADY);
144	TRANSLATE_ERROR(ENOSPC);
145#undef TRANSLATE_ERROR
146	case MC_CMD_ERR_ALLOC_FAIL:
147		return -ENOBUFS;
148	case MC_CMD_ERR_MAC_EXIST:
149		return -EADDRINUSE;
150	default:
151		return -EPROTO;
152	}
153}
154
155static void efx_mcdi_read_response_header(struct efx_nic *efx)
156{
157	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
158	unsigned int respseq, respcmd, error;
159	efx_dword_t hdr;
160
161	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
162	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
163	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
164	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
165
166	if (respcmd != MC_CMD_V2_EXTN) {
167		mcdi->resp_hdr_len = 4;
168		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
169	} else {
170		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
171		mcdi->resp_hdr_len = 8;
172		mcdi->resp_data_len =
173			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
174	}
175
176	if (error && mcdi->resp_data_len == 0) {
177		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
178		mcdi->resprc = -EIO;
179	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
180		netif_err(efx, hw, efx->net_dev,
181			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
182			  respseq, mcdi->seqno);
183		mcdi->resprc = -EIO;
184	} else if (error) {
185		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
186		mcdi->resprc =
187			efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
188	} else {
189		mcdi->resprc = 0;
190	}
191}
192
193static int efx_mcdi_poll(struct efx_nic *efx)
194{
195	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
196	unsigned long time, finish;
197	unsigned int spins;
198	int rc;
199
200	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
201	rc = efx_mcdi_poll_reboot(efx);
202	if (rc) {
203		mcdi->resprc = rc;
204		mcdi->resp_hdr_len = 0;
205		mcdi->resp_data_len = 0;
206		return 0;
207	}
208
209	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
210	 * because generally mcdi responses are fast. After that, back off
211	 * and poll once a jiffy (approximately)
212	 */
213	spins = TICK_USEC;
214	finish = jiffies + MCDI_RPC_TIMEOUT;
215
216	while (1) {
217		if (spins != 0) {
218			--spins;
219			udelay(1);
220		} else {
221			schedule_timeout_uninterruptible(1);
222		}
223
224		time = jiffies;
225
226		rmb();
227		if (efx->type->mcdi_poll_response(efx))
228			break;
229
230		if (time_after(time, finish))
231			return -ETIMEDOUT;
232	}
233
234	efx_mcdi_read_response_header(efx);
235
236	/* Return rc=0 like wait_event_timeout() */
237	return 0;
238}
239
240/* Test and clear MC-rebooted flag for this port/function; reset
241 * software state as necessary.
242 */
243int efx_mcdi_poll_reboot(struct efx_nic *efx)
244{
245	int rc;
246
247	if (!efx->mcdi)
248		return 0;
249
250	rc = efx->type->mcdi_poll_reboot(efx);
251	if (!rc)
252		return 0;
253
254	/* MAC statistics have been cleared on the NIC; clear our copy
255	 * so that efx_update_diff_stat() can continue to work.
256	 */
257	memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
258
259	return rc;
260}
261
262static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
263{
264	/* Wait until the interface becomes QUIESCENT and we win the race
265	 * to mark it RUNNING. */
266	wait_event(mcdi->wq,
267		   atomic_cmpxchg(&mcdi->state,
268				  MCDI_STATE_QUIESCENT,
269				  MCDI_STATE_RUNNING)
270		   == MCDI_STATE_QUIESCENT);
271}
272
273static int efx_mcdi_await_completion(struct efx_nic *efx)
274{
275	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
276
277	if (wait_event_timeout(
278		    mcdi->wq,
279		    atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
280		    MCDI_RPC_TIMEOUT) == 0)
281		return -ETIMEDOUT;
282
283	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
284	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
285	 * completed the request first, then we'll just end up completing the
286	 * request again, which is safe.
287	 *
288	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
289	 * wait_event_timeout() implicitly provides.
290	 */
291	if (mcdi->mode == MCDI_MODE_POLL)
292		return efx_mcdi_poll(efx);
293
294	return 0;
295}
296
297static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
298{
299	/* If the interface is RUNNING, then move to COMPLETED and wake any
300	 * waiters. If the interface isn't in RUNNING then we've received a
301	 * duplicate completion after we've already transitioned back to
302	 * QUIESCENT. [A subsequent invocation would increment seqno, so would
303	 * have failed the seqno check].
304	 */
305	if (atomic_cmpxchg(&mcdi->state,
306			   MCDI_STATE_RUNNING,
307			   MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
308		wake_up(&mcdi->wq);
309		return true;
310	}
311
312	return false;
313}
314
315static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
316{
317	atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
318	wake_up(&mcdi->wq);
319}
320
321static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
322			    unsigned int datalen, unsigned int mcdi_err)
323{
324	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
325	bool wake = false;
326
327	spin_lock(&mcdi->iface_lock);
328
329	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
330		if (mcdi->credits)
331			/* The request has been cancelled */
332			--mcdi->credits;
333		else
334			netif_err(efx, hw, efx->net_dev,
335				  "MC response mismatch tx seq 0x%x rx "
336				  "seq 0x%x\n", seqno, mcdi->seqno);
337	} else {
338		if (efx->type->mcdi_max_ver >= 2) {
339			/* MCDI v2 responses don't fit in an event */
340			efx_mcdi_read_response_header(efx);
341		} else {
342			mcdi->resprc = efx_mcdi_errno(mcdi_err);
343			mcdi->resp_hdr_len = 4;
344			mcdi->resp_data_len = datalen;
345		}
346
347		wake = true;
348	}
349
350	spin_unlock(&mcdi->iface_lock);
351
352	if (wake)
353		efx_mcdi_complete(mcdi);
354}
355
356int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
357		 const efx_dword_t *inbuf, size_t inlen,
358		 efx_dword_t *outbuf, size_t outlen,
359		 size_t *outlen_actual)
360{
361	int rc;
362
363	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
364	if (rc)
365		return rc;
366	return efx_mcdi_rpc_finish(efx, cmd, inlen,
367				   outbuf, outlen, outlen_actual);
368}
369
370int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
371		       const efx_dword_t *inbuf, size_t inlen)
372{
373	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
374
375	if (efx->type->mcdi_max_ver < 0 ||
376	     (efx->type->mcdi_max_ver < 2 &&
377	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
378		return -EINVAL;
379
380	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
381	    (efx->type->mcdi_max_ver < 2 &&
382	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
383		return -EMSGSIZE;
384
385	efx_mcdi_acquire(mcdi);
386
387	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
388	spin_lock_bh(&mcdi->iface_lock);
389	++mcdi->seqno;
390	spin_unlock_bh(&mcdi->iface_lock);
391
392	efx_mcdi_copyin(efx, cmd, inbuf, inlen);
393	return 0;
394}
395
396int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
397			efx_dword_t *outbuf, size_t outlen,
398			size_t *outlen_actual)
399{
400	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
401	int rc;
402
403	if (mcdi->mode == MCDI_MODE_POLL)
404		rc = efx_mcdi_poll(efx);
405	else
406		rc = efx_mcdi_await_completion(efx);
407
408	if (rc != 0) {
409		/* Close the race with efx_mcdi_ev_cpl() executing just too late
410		 * and completing a request we've just cancelled, by ensuring
411		 * that the seqno check therein fails.
412		 */
413		spin_lock_bh(&mcdi->iface_lock);
414		++mcdi->seqno;
415		++mcdi->credits;
416		spin_unlock_bh(&mcdi->iface_lock);
417
418		netif_err(efx, hw, efx->net_dev,
419			  "MC command 0x%x inlen %d mode %d timed out\n",
420			  cmd, (int)inlen, mcdi->mode);
421	} else {
422		size_t resplen;
423
424		/* At the very least we need a memory barrier here to ensure
425		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
426		 * a spurious efx_mcdi_ev_cpl() running concurrently by
427		 * acquiring the iface_lock. */
428		spin_lock_bh(&mcdi->iface_lock);
429		rc = mcdi->resprc;
430		resplen = mcdi->resp_data_len;
431		spin_unlock_bh(&mcdi->iface_lock);
432
433		BUG_ON(rc > 0);
434
435		if (rc == 0) {
436			efx_mcdi_copyout(efx, outbuf,
437					 min(outlen, mcdi->resp_data_len));
438			if (outlen_actual != NULL)
439				*outlen_actual = resplen;
440		} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
441			; /* Don't reset if MC_CMD_REBOOT returns EIO */
442		else if (rc == -EIO || rc == -EINTR) {
443			netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
444				  -rc);
445			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
446		} else
447			netif_dbg(efx, hw, efx->net_dev,
448				  "MC command 0x%x inlen %d failed rc=%d\n",
449				  cmd, (int)inlen, -rc);
450
451		if (rc == -EIO || rc == -EINTR) {
452			msleep(MCDI_STATUS_SLEEP_MS);
453			efx_mcdi_poll_reboot(efx);
454		}
455	}
456
457	efx_mcdi_release(mcdi);
458	return rc;
459}
460
461void efx_mcdi_mode_poll(struct efx_nic *efx)
462{
463	struct efx_mcdi_iface *mcdi;
464
465	if (!efx->mcdi)
466		return;
467
468	mcdi = efx_mcdi(efx);
469	if (mcdi->mode == MCDI_MODE_POLL)
470		return;
471
472	/* We can switch from event completion to polled completion, because
473	 * mcdi requests are always completed in shared memory. We do this by
474	 * switching the mode to POLL'd then completing the request.
475	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
476	 *
477	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
478	 * which efx_mcdi_complete() provides for us.
479	 */
480	mcdi->mode = MCDI_MODE_POLL;
481
482	efx_mcdi_complete(mcdi);
483}
484
485void efx_mcdi_mode_event(struct efx_nic *efx)
486{
487	struct efx_mcdi_iface *mcdi;
488
489	if (!efx->mcdi)
490		return;
491
492	mcdi = efx_mcdi(efx);
493
494	if (mcdi->mode == MCDI_MODE_EVENTS)
495		return;
496
497	/* We can't switch from polled to event completion in the middle of a
498	 * request, because the completion method is specified in the request.
499	 * So acquire the interface to serialise the requestors. We don't need
500	 * to acquire the iface_lock to change the mode here, but we do need a
501	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
502	 * efx_mcdi_acquire() provides.
503	 */
504	efx_mcdi_acquire(mcdi);
505	mcdi->mode = MCDI_MODE_EVENTS;
506	efx_mcdi_release(mcdi);
507}
508
509static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
510{
511	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
512
513	/* If there is an outstanding MCDI request, it has been terminated
514	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
515	 * in polled mode, then do nothing because the MC reboot handler will
516	 * set the header correctly. However, if the mcdi interface is waiting
517	 * for a CMDDONE event it won't receive it [and since all MCDI events
518	 * are sent to the same queue, we can't be racing with
519	 * efx_mcdi_ev_cpl()]
520	 *
521	 * There's a race here with efx_mcdi_rpc(), because we might receive
522	 * a REBOOT event *before* the request has been copied out. In polled
523	 * mode (during startup) this is irrelevant, because efx_mcdi_complete()
524	 * is ignored. In event mode, this condition is just an edge-case of
525	 * receiving a REBOOT event after posting the MCDI request. Did the mc
526	 * reboot before or after the copyout? The best we can do always is
527	 * just return failure.
528	 */
529	spin_lock(&mcdi->iface_lock);
530	if (efx_mcdi_complete(mcdi)) {
531		if (mcdi->mode == MCDI_MODE_EVENTS) {
532			mcdi->resprc = rc;
533			mcdi->resp_hdr_len = 0;
534			mcdi->resp_data_len = 0;
535			++mcdi->credits;
536		}
537	} else {
538		int count;
539
540		/* Nobody was waiting for an MCDI request, so trigger a reset */
541		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
542
543		/* Consume the status word since efx_mcdi_rpc_finish() won't */
544		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
545			if (efx_mcdi_poll_reboot(efx))
546				break;
547			udelay(MCDI_STATUS_DELAY_US);
548		}
549	}
550
551	spin_unlock(&mcdi->iface_lock);
552}
553
554/* Called from  falcon_process_eventq for MCDI events */
555void efx_mcdi_process_event(struct efx_channel *channel,
556			    efx_qword_t *event)
557{
558	struct efx_nic *efx = channel->efx;
559	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
560	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
561
562	switch (code) {
563	case MCDI_EVENT_CODE_BADSSERT:
564		netif_err(efx, hw, efx->net_dev,
565			  "MC watchdog or assertion failure at 0x%x\n", data);
566		efx_mcdi_ev_death(efx, -EINTR);
567		break;
568
569	case MCDI_EVENT_CODE_PMNOTICE:
570		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
571		break;
572
573	case MCDI_EVENT_CODE_CMDDONE:
574		efx_mcdi_ev_cpl(efx,
575				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
576				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
577				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
578		break;
579
580	case MCDI_EVENT_CODE_LINKCHANGE:
581		efx_mcdi_process_link_change(efx, event);
582		break;
583	case MCDI_EVENT_CODE_SENSOREVT:
584		efx_mcdi_sensor_event(efx, event);
585		break;
586	case MCDI_EVENT_CODE_SCHEDERR:
587		netif_info(efx, hw, efx->net_dev,
588			   "MC Scheduler error address=0x%x\n", data);
589		break;
590	case MCDI_EVENT_CODE_REBOOT:
591		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
592		efx_mcdi_ev_death(efx, -EIO);
593		break;
594	case MCDI_EVENT_CODE_MAC_STATS_DMA:
595		/* MAC stats are gather lazily.  We can ignore this. */
596		break;
597	case MCDI_EVENT_CODE_FLR:
598		efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
599		break;
600	case MCDI_EVENT_CODE_PTP_RX:
601	case MCDI_EVENT_CODE_PTP_FAULT:
602	case MCDI_EVENT_CODE_PTP_PPS:
603		efx_ptp_event(efx, event);
604		break;
605
606	default:
607		netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
608			  code);
609	}
610}
611
612/**************************************************************************
613 *
614 * Specific request functions
615 *
616 **************************************************************************
617 */
618
619void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
620{
621	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
622	size_t outlength;
623	const __le16 *ver_words;
624	int rc;
625
626	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
627
628	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
629			  outbuf, sizeof(outbuf), &outlength);
630	if (rc)
631		goto fail;
632
633	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
634		rc = -EIO;
635		goto fail;
636	}
637
638	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
639	snprintf(buf, len, "%u.%u.%u.%u",
640		 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
641		 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
642	return;
643
644fail:
645	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
646	buf[0] = 0;
647}
648
649int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
650			bool *was_attached)
651{
652	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
653	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
654	size_t outlen;
655	int rc;
656
657	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
658		       driver_operating ? 1 : 0);
659	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
660	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
661
662	rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
663			  outbuf, sizeof(outbuf), &outlen);
664	if (rc)
665		goto fail;
666	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
667		rc = -EIO;
668		goto fail;
669	}
670
671	if (was_attached != NULL)
672		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
673	return 0;
674
675fail:
676	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
677	return rc;
678}
679
680int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
681			   u16 *fw_subtype_list, u32 *capabilities)
682{
683	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
684	size_t outlen, i;
685	int port_num = efx_port_num(efx);
686	int rc;
687
688	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
689
690	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
691			  outbuf, sizeof(outbuf), &outlen);
692	if (rc)
693		goto fail;
694
695	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
696		rc = -EIO;
697		goto fail;
698	}
699
700	if (mac_address)
701		memcpy(mac_address,
702		       port_num ?
703		       MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
704		       MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
705		       ETH_ALEN);
706	if (fw_subtype_list) {
707		for (i = 0;
708		     i < MCDI_VAR_ARRAY_LEN(outlen,
709					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
710		     i++)
711			fw_subtype_list[i] = MCDI_ARRAY_WORD(
712				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
713		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
714			fw_subtype_list[i] = 0;
715	}
716	if (capabilities) {
717		if (port_num)
718			*capabilities = MCDI_DWORD(outbuf,
719					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
720		else
721			*capabilities = MCDI_DWORD(outbuf,
722					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
723	}
724
725	return 0;
726
727fail:
728	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
729		  __func__, rc, (int)outlen);
730
731	return rc;
732}
733
734int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
735{
736	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
737	u32 dest = 0;
738	int rc;
739
740	if (uart)
741		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
742	if (evq)
743		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
744
745	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
746	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
747
748	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
749
750	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
751			  NULL, 0, NULL);
752	if (rc)
753		goto fail;
754
755	return 0;
756
757fail:
758	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
759	return rc;
760}
761
762int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
763{
764	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
765	size_t outlen;
766	int rc;
767
768	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
769
770	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
771			  outbuf, sizeof(outbuf), &outlen);
772	if (rc)
773		goto fail;
774	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
775		rc = -EIO;
776		goto fail;
777	}
778
779	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
780	return 0;
781
782fail:
783	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
784		  __func__, rc);
785	return rc;
786}
787
788int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
789			size_t *size_out, size_t *erase_size_out,
790			bool *protected_out)
791{
792	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
793	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
794	size_t outlen;
795	int rc;
796
797	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
798
799	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
800			  outbuf, sizeof(outbuf), &outlen);
801	if (rc)
802		goto fail;
803	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
804		rc = -EIO;
805		goto fail;
806	}
807
808	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
809	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
810	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
811				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
812	return 0;
813
814fail:
815	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
816	return rc;
817}
818
819int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
820{
821	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
822	int rc;
823
824	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
825
826	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
827
828	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
829			  NULL, 0, NULL);
830	if (rc)
831		goto fail;
832
833	return 0;
834
835fail:
836	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
837	return rc;
838}
839
840int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
841			loff_t offset, u8 *buffer, size_t length)
842{
843	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
844	MCDI_DECLARE_BUF(outbuf,
845			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
846	size_t outlen;
847	int rc;
848
849	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
850	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
851	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
852
853	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
854			  outbuf, sizeof(outbuf), &outlen);
855	if (rc)
856		goto fail;
857
858	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
859	return 0;
860
861fail:
862	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
863	return rc;
864}
865
866int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
867			   loff_t offset, const u8 *buffer, size_t length)
868{
869	MCDI_DECLARE_BUF(inbuf,
870			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
871	int rc;
872
873	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
874	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
875	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
876	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
877
878	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
879
880	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
881			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
882			  NULL, 0, NULL);
883	if (rc)
884		goto fail;
885
886	return 0;
887
888fail:
889	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
890	return rc;
891}
892
893int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
894			 loff_t offset, size_t length)
895{
896	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
897	int rc;
898
899	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
900	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
901	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
902
903	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
904
905	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
906			  NULL, 0, NULL);
907	if (rc)
908		goto fail;
909
910	return 0;
911
912fail:
913	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
914	return rc;
915}
916
917int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
918{
919	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
920	int rc;
921
922	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
923
924	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
925
926	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
927			  NULL, 0, NULL);
928	if (rc)
929		goto fail;
930
931	return 0;
932
933fail:
934	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
935	return rc;
936}
937
938static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
939{
940	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
941	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
942	int rc;
943
944	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
945
946	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
947			  outbuf, sizeof(outbuf), NULL);
948	if (rc)
949		return rc;
950
951	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
952	case MC_CMD_NVRAM_TEST_PASS:
953	case MC_CMD_NVRAM_TEST_NOTSUPP:
954		return 0;
955	default:
956		return -EIO;
957	}
958}
959
960int efx_mcdi_nvram_test_all(struct efx_nic *efx)
961{
962	u32 nvram_types;
963	unsigned int type;
964	int rc;
965
966	rc = efx_mcdi_nvram_types(efx, &nvram_types);
967	if (rc)
968		goto fail1;
969
970	type = 0;
971	while (nvram_types != 0) {
972		if (nvram_types & 1) {
973			rc = efx_mcdi_nvram_test(efx, type);
974			if (rc)
975				goto fail2;
976		}
977		type++;
978		nvram_types >>= 1;
979	}
980
981	return 0;
982
983fail2:
984	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
985		  __func__, type);
986fail1:
987	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
988	return rc;
989}
990
991static int efx_mcdi_read_assertion(struct efx_nic *efx)
992{
993	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
994	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
995	unsigned int flags, index;
996	const char *reason;
997	size_t outlen;
998	int retry;
999	int rc;
1000
1001	/* Attempt to read any stored assertion state before we reboot
1002	 * the mcfw out of the assertion handler. Retry twice, once
1003	 * because a boot-time assertion might cause this command to fail
1004	 * with EINTR. And once again because GET_ASSERTS can race with
1005	 * MC_CMD_REBOOT running on the other port. */
1006	retry = 2;
1007	do {
1008		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
1009		rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
1010				  inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
1011				  outbuf, sizeof(outbuf), &outlen);
1012	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
1013
1014	if (rc)
1015		return rc;
1016	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
1017		return -EIO;
1018
1019	/* Print out any recorded assertion state */
1020	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1021	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1022		return 0;
1023
1024	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1025		? "system-level assertion"
1026		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
1027		? "thread-level assertion"
1028		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
1029		? "watchdog reset"
1030		: "unknown assertion";
1031	netif_err(efx, hw, efx->net_dev,
1032		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
1033		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1034		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1035
1036	/* Print out the registers */
1037	for (index = 0;
1038	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1039	     index++)
1040		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1041			  1 + index,
1042			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
1043					   index));
1044
1045	return 0;
1046}
1047
1048static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1049{
1050	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1051
1052	/* If the MC is running debug firmware, it might now be
1053	 * waiting for a debugger to attach, but we just want it to
1054	 * reboot.  We set a flag that makes the command a no-op if it
1055	 * has already done so.  We don't know what return code to
1056	 * expect (0 or -EIO), so ignore it.
1057	 */
1058	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1059	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1060		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1061	(void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1062			    NULL, 0, NULL);
1063}
1064
1065int efx_mcdi_handle_assertion(struct efx_nic *efx)
1066{
1067	int rc;
1068
1069	rc = efx_mcdi_read_assertion(efx);
1070	if (rc)
1071		return rc;
1072
1073	efx_mcdi_exit_assertion(efx);
1074
1075	return 0;
1076}
1077
1078void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1079{
1080	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
1081	int rc;
1082
1083	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
1084	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
1085	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
1086
1087	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
1088
1089	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
1090
1091	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
1092			  NULL, 0, NULL);
1093	if (rc)
1094		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1095			  __func__, rc);
1096}
1097
1098static int efx_mcdi_reset_port(struct efx_nic *efx)
1099{
1100	int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
1101	if (rc)
1102		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1103			  __func__, rc);
1104	return rc;
1105}
1106
1107static int efx_mcdi_reset_mc(struct efx_nic *efx)
1108{
1109	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1110	int rc;
1111
1112	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1113	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
1114	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1115			  NULL, 0, NULL);
1116	/* White is black, and up is down */
1117	if (rc == -EIO)
1118		return 0;
1119	if (rc == 0)
1120		rc = -EIO;
1121	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1122	return rc;
1123}
1124
1125enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1126{
1127	return RESET_TYPE_RECOVER_OR_ALL;
1128}
1129
1130int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1131{
1132	int rc;
1133
1134	/* Recover from a failed assertion pre-reset */
1135	rc = efx_mcdi_handle_assertion(efx);
1136	if (rc)
1137		return rc;
1138
1139	if (method == RESET_TYPE_WORLD)
1140		return efx_mcdi_reset_mc(efx);
1141	else
1142		return efx_mcdi_reset_port(efx);
1143}
1144
1145static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1146				   const u8 *mac, int *id_out)
1147{
1148	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1149	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1150	size_t outlen;
1151	int rc;
1152
1153	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1154	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1155		       MC_CMD_FILTER_MODE_SIMPLE);
1156	memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1157
1158	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1159			  outbuf, sizeof(outbuf), &outlen);
1160	if (rc)
1161		goto fail;
1162
1163	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1164		rc = -EIO;
1165		goto fail;
1166	}
1167
1168	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1169
1170	return 0;
1171
1172fail:
1173	*id_out = -1;
1174	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1175	return rc;
1176
1177}
1178
1179
1180int
1181efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
1182{
1183	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1184}
1185
1186
1187int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1188{
1189	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1190	size_t outlen;
1191	int rc;
1192
1193	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1194			  outbuf, sizeof(outbuf), &outlen);
1195	if (rc)
1196		goto fail;
1197
1198	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1199		rc = -EIO;
1200		goto fail;
1201	}
1202
1203	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1204
1205	return 0;
1206
1207fail:
1208	*id_out = -1;
1209	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1210	return rc;
1211}
1212
1213
1214int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1215{
1216	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
1217	int rc;
1218
1219	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1220
1221	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1222			  NULL, 0, NULL);
1223	if (rc)
1224		goto fail;
1225
1226	return 0;
1227
1228fail:
1229	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1230	return rc;
1231}
1232
1233int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1234{
1235	struct efx_channel *channel;
1236	struct efx_rx_queue *rx_queue;
1237	MCDI_DECLARE_BUF(inbuf,
1238			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
1239	int rc, count;
1240
1241	BUILD_BUG_ON(EFX_MAX_CHANNELS >
1242		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1243
1244	count = 0;
1245	efx_for_each_channel(channel, efx) {
1246		efx_for_each_channel_rx_queue(rx_queue, channel) {
1247			if (rx_queue->flush_pending) {
1248				rx_queue->flush_pending = false;
1249				atomic_dec(&efx->rxq_flush_pending);
1250				MCDI_SET_ARRAY_DWORD(
1251					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
1252					count, efx_rx_queue_index(rx_queue));
1253				count++;
1254			}
1255		}
1256	}
1257
1258	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
1259			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
1260	WARN_ON(rc < 0);
1261
1262	return rc;
1263}
1264
1265int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1266{
1267	int rc;
1268
1269	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1270	if (rc)
1271		goto fail;
1272
1273	return 0;
1274
1275fail:
1276	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1277	return rc;
1278}
1279
1280