mcdi.c revision 369327fa65f20118571643d673b90d3700166e2d
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/delay.h>
11#include "net_driver.h"
12#include "nic.h"
13#include "io.h"
14#include "farch_regs.h"
15#include "mcdi_pcol.h"
16#include "phy.h"
17
18/**************************************************************************
19 *
20 * Management-Controller-to-Driver Interface
21 *
22 **************************************************************************
23 */
24
25#define MCDI_RPC_TIMEOUT       (10 * HZ)
26
27/* A reboot/assertion causes the MCDI status word to be set after the
28 * command word is set or a REBOOT event is sent. If we notice a reboot
29 * via these mechanisms then wait 10ms for the status word to be set. */
30#define MCDI_STATUS_DELAY_US		100
31#define MCDI_STATUS_DELAY_COUNT		100
32#define MCDI_STATUS_SLEEP_MS						\
33	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
34
35#define SEQ_MASK							\
36	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
37
38static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
39{
40	EFX_BUG_ON_PARANOID(!efx->mcdi);
41	return &efx->mcdi->iface;
42}
43
44int efx_mcdi_init(struct efx_nic *efx)
45{
46	struct efx_mcdi_iface *mcdi;
47
48	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
49	if (!efx->mcdi)
50		return -ENOMEM;
51
52	mcdi = efx_mcdi(efx);
53	init_waitqueue_head(&mcdi->wq);
54	spin_lock_init(&mcdi->iface_lock);
55	atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
56	mcdi->mode = MCDI_MODE_POLL;
57
58	(void) efx_mcdi_poll_reboot(efx);
59
60	/* Recover from a failed assertion before probing */
61	return efx_mcdi_handle_assertion(efx);
62}
63
64void efx_mcdi_fini(struct efx_nic *efx)
65{
66	BUG_ON(efx->mcdi &&
67	       atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT);
68	kfree(efx->mcdi);
69}
70
71static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
72			    const efx_dword_t *inbuf, size_t inlen)
73{
74	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
75	efx_dword_t hdr[2];
76	size_t hdr_len;
77	u32 xflags, seqno;
78
79	BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
80
81	seqno = mcdi->seqno & SEQ_MASK;
82	xflags = 0;
83	if (mcdi->mode == MCDI_MODE_EVENTS)
84		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
85
86	if (efx->type->mcdi_max_ver == 1) {
87		/* MCDI v1 */
88		EFX_POPULATE_DWORD_6(hdr[0],
89				     MCDI_HEADER_RESPONSE, 0,
90				     MCDI_HEADER_RESYNC, 1,
91				     MCDI_HEADER_CODE, cmd,
92				     MCDI_HEADER_DATALEN, inlen,
93				     MCDI_HEADER_SEQ, seqno,
94				     MCDI_HEADER_XFLAGS, xflags);
95		hdr_len = 4;
96	} else {
97		/* MCDI v2 */
98		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
99		EFX_POPULATE_DWORD_6(hdr[0],
100				     MCDI_HEADER_RESPONSE, 0,
101				     MCDI_HEADER_RESYNC, 1,
102				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
103				     MCDI_HEADER_DATALEN, 0,
104				     MCDI_HEADER_SEQ, seqno,
105				     MCDI_HEADER_XFLAGS, xflags);
106		EFX_POPULATE_DWORD_2(hdr[1],
107				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
108				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
109		hdr_len = 8;
110	}
111
112	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
113}
114
115static int efx_mcdi_errno(unsigned int mcdi_err)
116{
117	switch (mcdi_err) {
118	case 0:
119		return 0;
120#define TRANSLATE_ERROR(name)					\
121	case MC_CMD_ERR_ ## name:				\
122		return -name;
123	TRANSLATE_ERROR(EPERM);
124	TRANSLATE_ERROR(ENOENT);
125	TRANSLATE_ERROR(EINTR);
126	TRANSLATE_ERROR(EAGAIN);
127	TRANSLATE_ERROR(EACCES);
128	TRANSLATE_ERROR(EBUSY);
129	TRANSLATE_ERROR(EINVAL);
130	TRANSLATE_ERROR(EDEADLK);
131	TRANSLATE_ERROR(ENOSYS);
132	TRANSLATE_ERROR(ETIME);
133	TRANSLATE_ERROR(EALREADY);
134	TRANSLATE_ERROR(ENOSPC);
135#undef TRANSLATE_ERROR
136	case MC_CMD_ERR_ALLOC_FAIL:
137		return -ENOBUFS;
138	case MC_CMD_ERR_MAC_EXIST:
139		return -EADDRINUSE;
140	default:
141		return -EPROTO;
142	}
143}
144
145static void efx_mcdi_read_response_header(struct efx_nic *efx)
146{
147	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
148	unsigned int respseq, respcmd, error;
149	efx_dword_t hdr;
150
151	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
152	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
153	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
154	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
155
156	if (respcmd != MC_CMD_V2_EXTN) {
157		mcdi->resp_hdr_len = 4;
158		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
159	} else {
160		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
161		mcdi->resp_hdr_len = 8;
162		mcdi->resp_data_len =
163			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
164	}
165
166	if (error && mcdi->resp_data_len == 0) {
167		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
168		mcdi->resprc = -EIO;
169	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
170		netif_err(efx, hw, efx->net_dev,
171			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
172			  respseq, mcdi->seqno);
173		mcdi->resprc = -EIO;
174	} else if (error) {
175		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
176		mcdi->resprc =
177			efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
178	} else {
179		mcdi->resprc = 0;
180	}
181}
182
183static int efx_mcdi_poll(struct efx_nic *efx)
184{
185	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
186	unsigned long time, finish;
187	unsigned int spins;
188	int rc;
189
190	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
191	rc = efx_mcdi_poll_reboot(efx);
192	if (rc) {
193		spin_lock_bh(&mcdi->iface_lock);
194		mcdi->resprc = rc;
195		mcdi->resp_hdr_len = 0;
196		mcdi->resp_data_len = 0;
197		spin_unlock_bh(&mcdi->iface_lock);
198		return 0;
199	}
200
201	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
202	 * because generally mcdi responses are fast. After that, back off
203	 * and poll once a jiffy (approximately)
204	 */
205	spins = TICK_USEC;
206	finish = jiffies + MCDI_RPC_TIMEOUT;
207
208	while (1) {
209		if (spins != 0) {
210			--spins;
211			udelay(1);
212		} else {
213			schedule_timeout_uninterruptible(1);
214		}
215
216		time = jiffies;
217
218		rmb();
219		if (efx->type->mcdi_poll_response(efx))
220			break;
221
222		if (time_after(time, finish))
223			return -ETIMEDOUT;
224	}
225
226	spin_lock_bh(&mcdi->iface_lock);
227	efx_mcdi_read_response_header(efx);
228	spin_unlock_bh(&mcdi->iface_lock);
229
230	/* Return rc=0 like wait_event_timeout() */
231	return 0;
232}
233
234/* Test and clear MC-rebooted flag for this port/function; reset
235 * software state as necessary.
236 */
237int efx_mcdi_poll_reboot(struct efx_nic *efx)
238{
239	int rc;
240
241	if (!efx->mcdi)
242		return 0;
243
244	rc = efx->type->mcdi_poll_reboot(efx);
245	if (!rc)
246		return 0;
247
248	/* MAC statistics have been cleared on the NIC; clear our copy
249	 * so that efx_update_diff_stat() can continue to work.
250	 */
251	memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
252
253	return rc;
254}
255
256static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
257{
258	/* Wait until the interface becomes QUIESCENT and we win the race
259	 * to mark it RUNNING. */
260	wait_event(mcdi->wq,
261		   atomic_cmpxchg(&mcdi->state,
262				  MCDI_STATE_QUIESCENT,
263				  MCDI_STATE_RUNNING)
264		   == MCDI_STATE_QUIESCENT);
265}
266
267static int efx_mcdi_await_completion(struct efx_nic *efx)
268{
269	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
270
271	if (wait_event_timeout(
272		    mcdi->wq,
273		    atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
274		    MCDI_RPC_TIMEOUT) == 0)
275		return -ETIMEDOUT;
276
277	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
278	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
279	 * completed the request first, then we'll just end up completing the
280	 * request again, which is safe.
281	 *
282	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
283	 * wait_event_timeout() implicitly provides.
284	 */
285	if (mcdi->mode == MCDI_MODE_POLL)
286		return efx_mcdi_poll(efx);
287
288	return 0;
289}
290
291static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
292{
293	/* If the interface is RUNNING, then move to COMPLETED and wake any
294	 * waiters. If the interface isn't in RUNNING then we've received a
295	 * duplicate completion after we've already transitioned back to
296	 * QUIESCENT. [A subsequent invocation would increment seqno, so would
297	 * have failed the seqno check].
298	 */
299	if (atomic_cmpxchg(&mcdi->state,
300			   MCDI_STATE_RUNNING,
301			   MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
302		wake_up(&mcdi->wq);
303		return true;
304	}
305
306	return false;
307}
308
309static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
310{
311	atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
312	wake_up(&mcdi->wq);
313}
314
315static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
316			    unsigned int datalen, unsigned int mcdi_err)
317{
318	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
319	bool wake = false;
320
321	spin_lock(&mcdi->iface_lock);
322
323	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
324		if (mcdi->credits)
325			/* The request has been cancelled */
326			--mcdi->credits;
327		else
328			netif_err(efx, hw, efx->net_dev,
329				  "MC response mismatch tx seq 0x%x rx "
330				  "seq 0x%x\n", seqno, mcdi->seqno);
331	} else {
332		if (efx->type->mcdi_max_ver >= 2) {
333			/* MCDI v2 responses don't fit in an event */
334			efx_mcdi_read_response_header(efx);
335		} else {
336			mcdi->resprc = efx_mcdi_errno(mcdi_err);
337			mcdi->resp_hdr_len = 4;
338			mcdi->resp_data_len = datalen;
339		}
340
341		wake = true;
342	}
343
344	spin_unlock(&mcdi->iface_lock);
345
346	if (wake)
347		efx_mcdi_complete(mcdi);
348}
349
350int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
351		 const efx_dword_t *inbuf, size_t inlen,
352		 efx_dword_t *outbuf, size_t outlen,
353		 size_t *outlen_actual)
354{
355	int rc;
356
357	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
358	if (rc)
359		return rc;
360	return efx_mcdi_rpc_finish(efx, cmd, inlen,
361				   outbuf, outlen, outlen_actual);
362}
363
364int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
365		       const efx_dword_t *inbuf, size_t inlen)
366{
367	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
368
369	if (efx->type->mcdi_max_ver < 0 ||
370	     (efx->type->mcdi_max_ver < 2 &&
371	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
372		return -EINVAL;
373
374	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
375	    (efx->type->mcdi_max_ver < 2 &&
376	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
377		return -EMSGSIZE;
378
379	efx_mcdi_acquire(mcdi);
380
381	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
382	spin_lock_bh(&mcdi->iface_lock);
383	++mcdi->seqno;
384	spin_unlock_bh(&mcdi->iface_lock);
385
386	efx_mcdi_copyin(efx, cmd, inbuf, inlen);
387	return 0;
388}
389
390int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
391			efx_dword_t *outbuf, size_t outlen,
392			size_t *outlen_actual)
393{
394	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
395	int rc;
396
397	if (mcdi->mode == MCDI_MODE_POLL)
398		rc = efx_mcdi_poll(efx);
399	else
400		rc = efx_mcdi_await_completion(efx);
401
402	if (rc != 0) {
403		/* Close the race with efx_mcdi_ev_cpl() executing just too late
404		 * and completing a request we've just cancelled, by ensuring
405		 * that the seqno check therein fails.
406		 */
407		spin_lock_bh(&mcdi->iface_lock);
408		++mcdi->seqno;
409		++mcdi->credits;
410		spin_unlock_bh(&mcdi->iface_lock);
411
412		netif_err(efx, hw, efx->net_dev,
413			  "MC command 0x%x inlen %d mode %d timed out\n",
414			  cmd, (int)inlen, mcdi->mode);
415	} else {
416		size_t hdr_len, data_len;
417
418		/* At the very least we need a memory barrier here to ensure
419		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
420		 * a spurious efx_mcdi_ev_cpl() running concurrently by
421		 * acquiring the iface_lock. */
422		spin_lock_bh(&mcdi->iface_lock);
423		rc = mcdi->resprc;
424		hdr_len = mcdi->resp_hdr_len;
425		data_len = mcdi->resp_data_len;
426		spin_unlock_bh(&mcdi->iface_lock);
427
428		BUG_ON(rc > 0);
429
430		if (rc == 0) {
431			efx->type->mcdi_read_response(efx, outbuf, hdr_len,
432						      min(outlen, data_len));
433			if (outlen_actual != NULL)
434				*outlen_actual = data_len;
435		} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
436			; /* Don't reset if MC_CMD_REBOOT returns EIO */
437		else if (rc == -EIO || rc == -EINTR) {
438			netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
439				  -rc);
440			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
441		} else
442			netif_dbg(efx, hw, efx->net_dev,
443				  "MC command 0x%x inlen %d failed rc=%d\n",
444				  cmd, (int)inlen, -rc);
445
446		if (rc == -EIO || rc == -EINTR) {
447			msleep(MCDI_STATUS_SLEEP_MS);
448			efx_mcdi_poll_reboot(efx);
449		}
450	}
451
452	efx_mcdi_release(mcdi);
453	return rc;
454}
455
456void efx_mcdi_mode_poll(struct efx_nic *efx)
457{
458	struct efx_mcdi_iface *mcdi;
459
460	if (!efx->mcdi)
461		return;
462
463	mcdi = efx_mcdi(efx);
464	if (mcdi->mode == MCDI_MODE_POLL)
465		return;
466
467	/* We can switch from event completion to polled completion, because
468	 * mcdi requests are always completed in shared memory. We do this by
469	 * switching the mode to POLL'd then completing the request.
470	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
471	 *
472	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
473	 * which efx_mcdi_complete() provides for us.
474	 */
475	mcdi->mode = MCDI_MODE_POLL;
476
477	efx_mcdi_complete(mcdi);
478}
479
480void efx_mcdi_mode_event(struct efx_nic *efx)
481{
482	struct efx_mcdi_iface *mcdi;
483
484	if (!efx->mcdi)
485		return;
486
487	mcdi = efx_mcdi(efx);
488
489	if (mcdi->mode == MCDI_MODE_EVENTS)
490		return;
491
492	/* We can't switch from polled to event completion in the middle of a
493	 * request, because the completion method is specified in the request.
494	 * So acquire the interface to serialise the requestors. We don't need
495	 * to acquire the iface_lock to change the mode here, but we do need a
496	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
497	 * efx_mcdi_acquire() provides.
498	 */
499	efx_mcdi_acquire(mcdi);
500	mcdi->mode = MCDI_MODE_EVENTS;
501	efx_mcdi_release(mcdi);
502}
503
504static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
505{
506	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
507
508	/* If there is an outstanding MCDI request, it has been terminated
509	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
510	 * in polled mode, then do nothing because the MC reboot handler will
511	 * set the header correctly. However, if the mcdi interface is waiting
512	 * for a CMDDONE event it won't receive it [and since all MCDI events
513	 * are sent to the same queue, we can't be racing with
514	 * efx_mcdi_ev_cpl()]
515	 *
516	 * There's a race here with efx_mcdi_rpc(), because we might receive
517	 * a REBOOT event *before* the request has been copied out. In polled
518	 * mode (during startup) this is irrelevant, because efx_mcdi_complete()
519	 * is ignored. In event mode, this condition is just an edge-case of
520	 * receiving a REBOOT event after posting the MCDI request. Did the mc
521	 * reboot before or after the copyout? The best we can do always is
522	 * just return failure.
523	 */
524	spin_lock(&mcdi->iface_lock);
525	if (efx_mcdi_complete(mcdi)) {
526		if (mcdi->mode == MCDI_MODE_EVENTS) {
527			mcdi->resprc = rc;
528			mcdi->resp_hdr_len = 0;
529			mcdi->resp_data_len = 0;
530			++mcdi->credits;
531		}
532	} else {
533		int count;
534
535		/* Nobody was waiting for an MCDI request, so trigger a reset */
536		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
537
538		/* Consume the status word since efx_mcdi_rpc_finish() won't */
539		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
540			if (efx_mcdi_poll_reboot(efx))
541				break;
542			udelay(MCDI_STATUS_DELAY_US);
543		}
544	}
545
546	spin_unlock(&mcdi->iface_lock);
547}
548
549/* Called from  falcon_process_eventq for MCDI events */
550void efx_mcdi_process_event(struct efx_channel *channel,
551			    efx_qword_t *event)
552{
553	struct efx_nic *efx = channel->efx;
554	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
555	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
556
557	switch (code) {
558	case MCDI_EVENT_CODE_BADSSERT:
559		netif_err(efx, hw, efx->net_dev,
560			  "MC watchdog or assertion failure at 0x%x\n", data);
561		efx_mcdi_ev_death(efx, -EINTR);
562		break;
563
564	case MCDI_EVENT_CODE_PMNOTICE:
565		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
566		break;
567
568	case MCDI_EVENT_CODE_CMDDONE:
569		efx_mcdi_ev_cpl(efx,
570				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
571				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
572				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
573		break;
574
575	case MCDI_EVENT_CODE_LINKCHANGE:
576		efx_mcdi_process_link_change(efx, event);
577		break;
578	case MCDI_EVENT_CODE_SENSOREVT:
579		efx_mcdi_sensor_event(efx, event);
580		break;
581	case MCDI_EVENT_CODE_SCHEDERR:
582		netif_info(efx, hw, efx->net_dev,
583			   "MC Scheduler error address=0x%x\n", data);
584		break;
585	case MCDI_EVENT_CODE_REBOOT:
586		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
587		efx_mcdi_ev_death(efx, -EIO);
588		break;
589	case MCDI_EVENT_CODE_MAC_STATS_DMA:
590		/* MAC stats are gather lazily.  We can ignore this. */
591		break;
592	case MCDI_EVENT_CODE_FLR:
593		efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
594		break;
595	case MCDI_EVENT_CODE_PTP_RX:
596	case MCDI_EVENT_CODE_PTP_FAULT:
597	case MCDI_EVENT_CODE_PTP_PPS:
598		efx_ptp_event(efx, event);
599		break;
600
601	default:
602		netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
603			  code);
604	}
605}
606
607/**************************************************************************
608 *
609 * Specific request functions
610 *
611 **************************************************************************
612 */
613
614void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
615{
616	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
617	size_t outlength;
618	const __le16 *ver_words;
619	int rc;
620
621	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
622
623	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
624			  outbuf, sizeof(outbuf), &outlength);
625	if (rc)
626		goto fail;
627
628	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
629		rc = -EIO;
630		goto fail;
631	}
632
633	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
634	snprintf(buf, len, "%u.%u.%u.%u",
635		 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
636		 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
637	return;
638
639fail:
640	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
641	buf[0] = 0;
642}
643
644int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
645			bool *was_attached)
646{
647	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
648	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
649	size_t outlen;
650	int rc;
651
652	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
653		       driver_operating ? 1 : 0);
654	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
655	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
656
657	rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
658			  outbuf, sizeof(outbuf), &outlen);
659	if (rc)
660		goto fail;
661	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
662		rc = -EIO;
663		goto fail;
664	}
665
666	if (was_attached != NULL)
667		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
668	return 0;
669
670fail:
671	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
672	return rc;
673}
674
675int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
676			   u16 *fw_subtype_list, u32 *capabilities)
677{
678	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
679	size_t outlen, i;
680	int port_num = efx_port_num(efx);
681	int rc;
682
683	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
684
685	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
686			  outbuf, sizeof(outbuf), &outlen);
687	if (rc)
688		goto fail;
689
690	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
691		rc = -EIO;
692		goto fail;
693	}
694
695	if (mac_address)
696		memcpy(mac_address,
697		       port_num ?
698		       MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
699		       MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
700		       ETH_ALEN);
701	if (fw_subtype_list) {
702		for (i = 0;
703		     i < MCDI_VAR_ARRAY_LEN(outlen,
704					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
705		     i++)
706			fw_subtype_list[i] = MCDI_ARRAY_WORD(
707				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
708		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
709			fw_subtype_list[i] = 0;
710	}
711	if (capabilities) {
712		if (port_num)
713			*capabilities = MCDI_DWORD(outbuf,
714					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
715		else
716			*capabilities = MCDI_DWORD(outbuf,
717					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
718	}
719
720	return 0;
721
722fail:
723	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
724		  __func__, rc, (int)outlen);
725
726	return rc;
727}
728
729int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
730{
731	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
732	u32 dest = 0;
733	int rc;
734
735	if (uart)
736		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
737	if (evq)
738		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
739
740	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
741	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
742
743	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
744
745	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
746			  NULL, 0, NULL);
747	if (rc)
748		goto fail;
749
750	return 0;
751
752fail:
753	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
754	return rc;
755}
756
757int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
758{
759	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
760	size_t outlen;
761	int rc;
762
763	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
764
765	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
766			  outbuf, sizeof(outbuf), &outlen);
767	if (rc)
768		goto fail;
769	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
770		rc = -EIO;
771		goto fail;
772	}
773
774	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
775	return 0;
776
777fail:
778	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
779		  __func__, rc);
780	return rc;
781}
782
783int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
784			size_t *size_out, size_t *erase_size_out,
785			bool *protected_out)
786{
787	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
788	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
789	size_t outlen;
790	int rc;
791
792	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
793
794	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
795			  outbuf, sizeof(outbuf), &outlen);
796	if (rc)
797		goto fail;
798	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
799		rc = -EIO;
800		goto fail;
801	}
802
803	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
804	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
805	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
806				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
807	return 0;
808
809fail:
810	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
811	return rc;
812}
813
814int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
815{
816	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
817	int rc;
818
819	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
820
821	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
822
823	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
824			  NULL, 0, NULL);
825	if (rc)
826		goto fail;
827
828	return 0;
829
830fail:
831	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
832	return rc;
833}
834
835int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
836			loff_t offset, u8 *buffer, size_t length)
837{
838	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
839	MCDI_DECLARE_BUF(outbuf,
840			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
841	size_t outlen;
842	int rc;
843
844	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
845	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
846	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
847
848	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
849			  outbuf, sizeof(outbuf), &outlen);
850	if (rc)
851		goto fail;
852
853	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
854	return 0;
855
856fail:
857	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
858	return rc;
859}
860
861int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
862			   loff_t offset, const u8 *buffer, size_t length)
863{
864	MCDI_DECLARE_BUF(inbuf,
865			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
866	int rc;
867
868	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
869	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
870	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
871	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
872
873	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
874
875	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
876			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
877			  NULL, 0, NULL);
878	if (rc)
879		goto fail;
880
881	return 0;
882
883fail:
884	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
885	return rc;
886}
887
888int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
889			 loff_t offset, size_t length)
890{
891	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
892	int rc;
893
894	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
895	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
896	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
897
898	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
899
900	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
901			  NULL, 0, NULL);
902	if (rc)
903		goto fail;
904
905	return 0;
906
907fail:
908	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
909	return rc;
910}
911
912int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
913{
914	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
915	int rc;
916
917	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
918
919	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
920
921	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
922			  NULL, 0, NULL);
923	if (rc)
924		goto fail;
925
926	return 0;
927
928fail:
929	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
930	return rc;
931}
932
933static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
934{
935	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
936	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
937	int rc;
938
939	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
940
941	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
942			  outbuf, sizeof(outbuf), NULL);
943	if (rc)
944		return rc;
945
946	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
947	case MC_CMD_NVRAM_TEST_PASS:
948	case MC_CMD_NVRAM_TEST_NOTSUPP:
949		return 0;
950	default:
951		return -EIO;
952	}
953}
954
955int efx_mcdi_nvram_test_all(struct efx_nic *efx)
956{
957	u32 nvram_types;
958	unsigned int type;
959	int rc;
960
961	rc = efx_mcdi_nvram_types(efx, &nvram_types);
962	if (rc)
963		goto fail1;
964
965	type = 0;
966	while (nvram_types != 0) {
967		if (nvram_types & 1) {
968			rc = efx_mcdi_nvram_test(efx, type);
969			if (rc)
970				goto fail2;
971		}
972		type++;
973		nvram_types >>= 1;
974	}
975
976	return 0;
977
978fail2:
979	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
980		  __func__, type);
981fail1:
982	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
983	return rc;
984}
985
986static int efx_mcdi_read_assertion(struct efx_nic *efx)
987{
988	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
989	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
990	unsigned int flags, index;
991	const char *reason;
992	size_t outlen;
993	int retry;
994	int rc;
995
996	/* Attempt to read any stored assertion state before we reboot
997	 * the mcfw out of the assertion handler. Retry twice, once
998	 * because a boot-time assertion might cause this command to fail
999	 * with EINTR. And once again because GET_ASSERTS can race with
1000	 * MC_CMD_REBOOT running on the other port. */
1001	retry = 2;
1002	do {
1003		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
1004		rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
1005				  inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
1006				  outbuf, sizeof(outbuf), &outlen);
1007	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
1008
1009	if (rc)
1010		return rc;
1011	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
1012		return -EIO;
1013
1014	/* Print out any recorded assertion state */
1015	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1016	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1017		return 0;
1018
1019	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1020		? "system-level assertion"
1021		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
1022		? "thread-level assertion"
1023		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
1024		? "watchdog reset"
1025		: "unknown assertion";
1026	netif_err(efx, hw, efx->net_dev,
1027		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
1028		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1029		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
1030
1031	/* Print out the registers */
1032	for (index = 0;
1033	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1034	     index++)
1035		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1036			  1 + index,
1037			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
1038					   index));
1039
1040	return 0;
1041}
1042
1043static void efx_mcdi_exit_assertion(struct efx_nic *efx)
1044{
1045	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1046
1047	/* If the MC is running debug firmware, it might now be
1048	 * waiting for a debugger to attach, but we just want it to
1049	 * reboot.  We set a flag that makes the command a no-op if it
1050	 * has already done so.  We don't know what return code to
1051	 * expect (0 or -EIO), so ignore it.
1052	 */
1053	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1054	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
1055		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
1056	(void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
1057			    NULL, 0, NULL);
1058}
1059
1060int efx_mcdi_handle_assertion(struct efx_nic *efx)
1061{
1062	int rc;
1063
1064	rc = efx_mcdi_read_assertion(efx);
1065	if (rc)
1066		return rc;
1067
1068	efx_mcdi_exit_assertion(efx);
1069
1070	return 0;
1071}
1072
1073void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1074{
1075	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
1076	int rc;
1077
1078	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
1079	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
1080	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
1081
1082	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
1083
1084	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
1085
1086	rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
1087			  NULL, 0, NULL);
1088	if (rc)
1089		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1090			  __func__, rc);
1091}
1092
1093static int efx_mcdi_reset_port(struct efx_nic *efx)
1094{
1095	int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
1096	if (rc)
1097		netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
1098			  __func__, rc);
1099	return rc;
1100}
1101
1102static int efx_mcdi_reset_mc(struct efx_nic *efx)
1103{
1104	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
1105	int rc;
1106
1107	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
1108	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
1109	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
1110			  NULL, 0, NULL);
1111	/* White is black, and up is down */
1112	if (rc == -EIO)
1113		return 0;
1114	if (rc == 0)
1115		rc = -EIO;
1116	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1117	return rc;
1118}
1119
1120enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
1121{
1122	return RESET_TYPE_RECOVER_OR_ALL;
1123}
1124
1125int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1126{
1127	int rc;
1128
1129	/* Recover from a failed assertion pre-reset */
1130	rc = efx_mcdi_handle_assertion(efx);
1131	if (rc)
1132		return rc;
1133
1134	if (method == RESET_TYPE_WORLD)
1135		return efx_mcdi_reset_mc(efx);
1136	else
1137		return efx_mcdi_reset_port(efx);
1138}
1139
1140static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1141				   const u8 *mac, int *id_out)
1142{
1143	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
1144	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
1145	size_t outlen;
1146	int rc;
1147
1148	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1149	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1150		       MC_CMD_FILTER_MODE_SIMPLE);
1151	memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
1152
1153	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1154			  outbuf, sizeof(outbuf), &outlen);
1155	if (rc)
1156		goto fail;
1157
1158	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1159		rc = -EIO;
1160		goto fail;
1161	}
1162
1163	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
1164
1165	return 0;
1166
1167fail:
1168	*id_out = -1;
1169	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1170	return rc;
1171
1172}
1173
1174
1175int
1176efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
1177{
1178	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
1179}
1180
1181
1182int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1183{
1184	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
1185	size_t outlen;
1186	int rc;
1187
1188	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
1189			  outbuf, sizeof(outbuf), &outlen);
1190	if (rc)
1191		goto fail;
1192
1193	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1194		rc = -EIO;
1195		goto fail;
1196	}
1197
1198	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
1199
1200	return 0;
1201
1202fail:
1203	*id_out = -1;
1204	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1205	return rc;
1206}
1207
1208
1209int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
1210{
1211	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
1212	int rc;
1213
1214	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
1215
1216	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
1217			  NULL, 0, NULL);
1218	if (rc)
1219		goto fail;
1220
1221	return 0;
1222
1223fail:
1224	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1225	return rc;
1226}
1227
1228int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1229{
1230	struct efx_channel *channel;
1231	struct efx_rx_queue *rx_queue;
1232	MCDI_DECLARE_BUF(inbuf,
1233			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
1234	int rc, count;
1235
1236	BUILD_BUG_ON(EFX_MAX_CHANNELS >
1237		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
1238
1239	count = 0;
1240	efx_for_each_channel(channel, efx) {
1241		efx_for_each_channel_rx_queue(rx_queue, channel) {
1242			if (rx_queue->flush_pending) {
1243				rx_queue->flush_pending = false;
1244				atomic_dec(&efx->rxq_flush_pending);
1245				MCDI_SET_ARRAY_DWORD(
1246					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
1247					count, efx_rx_queue_index(rx_queue));
1248				count++;
1249			}
1250		}
1251	}
1252
1253	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
1254			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
1255	WARN_ON(rc < 0);
1256
1257	return rc;
1258}
1259
1260int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
1261{
1262	int rc;
1263
1264	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
1265	if (rc)
1266		goto fail;
1267
1268	return 0;
1269
1270fail:
1271	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1272	return rc;
1273}
1274
1275