mcdi.c revision c3cba721f1b761ca96f6fe437dec738db1069909
1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2008-2011 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10#include <linux/delay.h> 11#include "net_driver.h" 12#include "nic.h" 13#include "io.h" 14#include "regs.h" 15#include "mcdi_pcol.h" 16#include "phy.h" 17 18/************************************************************************** 19 * 20 * Management-Controller-to-Driver Interface 21 * 22 ************************************************************************** 23 */ 24 25#define MCDI_RPC_TIMEOUT 10 /*seconds */ 26 27#define MCDI_PDU(efx) \ 28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) 29#define MCDI_DOORBELL(efx) \ 30 (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) 31#define MCDI_STATUS(efx) \ 32 (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) 33 34/* A reboot/assertion causes the MCDI status word to be set after the 35 * command word is set or a REBOOT event is sent. If we notice a reboot 36 * via these mechanisms then wait 10ms for the status word to be set. */ 37#define MCDI_STATUS_DELAY_US 100 38#define MCDI_STATUS_DELAY_COUNT 100 39#define MCDI_STATUS_SLEEP_MS \ 40 (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) 41 42#define SEQ_MASK \ 43 EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) 44 45static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) 46{ 47 struct siena_nic_data *nic_data; 48 EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 49 nic_data = efx->nic_data; 50 return &nic_data->mcdi; 51} 52 53void efx_mcdi_init(struct efx_nic *efx) 54{ 55 struct efx_mcdi_iface *mcdi; 56 57 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 58 return; 59 60 mcdi = efx_mcdi(efx); 61 init_waitqueue_head(&mcdi->wq); 62 spin_lock_init(&mcdi->iface_lock); 63 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 64 mcdi->mode = MCDI_MODE_POLL; 65 66 (void) efx_mcdi_poll_reboot(efx); 67} 68 69static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, 70 const u8 *inbuf, size_t inlen) 71{ 72 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 73 unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 74 unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); 75 unsigned int i; 76 efx_dword_t hdr; 77 u32 xflags, seqno; 78 79 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 80 BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN); 81 82 seqno = mcdi->seqno & SEQ_MASK; 83 xflags = 0; 84 if (mcdi->mode == MCDI_MODE_EVENTS) 85 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 86 87 EFX_POPULATE_DWORD_6(hdr, 88 MCDI_HEADER_RESPONSE, 0, 89 MCDI_HEADER_RESYNC, 1, 90 MCDI_HEADER_CODE, cmd, 91 MCDI_HEADER_DATALEN, inlen, 92 MCDI_HEADER_SEQ, seqno, 93 MCDI_HEADER_XFLAGS, xflags); 94 95 efx_writed(efx, &hdr, pdu); 96 97 for (i = 0; i < inlen; i += 4) 98 _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); 99 100 /* Ensure the payload is written out before the header */ 101 wmb(); 102 103 /* ring the doorbell with a distinctive value */ 104 _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); 105} 106 107static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) 108{ 109 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 110 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 111 int i; 112 113 BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); 114 BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN); 115 116 for (i = 0; i < outlen; i += 4) 117 *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); 118} 119 120static int efx_mcdi_poll(struct efx_nic *efx) 121{ 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 123 unsigned int time, finish; 124 unsigned int respseq, respcmd, error; 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 126 unsigned int rc, spins; 127 efx_dword_t reg; 128 129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 130 rc = -efx_mcdi_poll_reboot(efx); 131 if (rc) 132 goto out; 133 134 /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, 135 * because generally mcdi responses are fast. After that, back off 136 * and poll once a jiffy (approximately) 137 */ 138 spins = TICK_USEC; 139 finish = get_seconds() + MCDI_RPC_TIMEOUT; 140 141 while (1) { 142 if (spins != 0) { 143 --spins; 144 udelay(1); 145 } else { 146 schedule_timeout_uninterruptible(1); 147 } 148 149 time = get_seconds(); 150 151 rmb(); 152 efx_readd(efx, ®, pdu); 153 154 /* All 1's indicates that shared memory is in reset (and is 155 * not a valid header). Wait for it to come out reset before 156 * completing the command */ 157 if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && 158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) 159 break; 160 161 if (time >= finish) 162 return -ETIMEDOUT; 163 } 164 165 mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); 166 respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); 167 respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); 168 error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); 169 170 if (error && mcdi->resplen == 0) { 171 netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); 172 rc = EIO; 173 } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { 174 netif_err(efx, hw, efx->net_dev, 175 "MC response mismatch tx seq 0x%x rx seq 0x%x\n", 176 respseq, mcdi->seqno); 177 rc = EIO; 178 } else if (error) { 179 efx_readd(efx, ®, pdu + 4); 180 switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { 181#define TRANSLATE_ERROR(name) \ 182 case MC_CMD_ERR_ ## name: \ 183 rc = name; \ 184 break 185 TRANSLATE_ERROR(ENOENT); 186 TRANSLATE_ERROR(EINTR); 187 TRANSLATE_ERROR(EACCES); 188 TRANSLATE_ERROR(EBUSY); 189 TRANSLATE_ERROR(EINVAL); 190 TRANSLATE_ERROR(EDEADLK); 191 TRANSLATE_ERROR(ENOSYS); 192 TRANSLATE_ERROR(ETIME); 193#undef TRANSLATE_ERROR 194 default: 195 rc = EIO; 196 break; 197 } 198 } else 199 rc = 0; 200 201out: 202 mcdi->resprc = rc; 203 if (rc) 204 mcdi->resplen = 0; 205 206 /* Return rc=0 like wait_event_timeout() */ 207 return 0; 208} 209 210/* Test and clear MC-rebooted flag for this port/function */ 211int efx_mcdi_poll_reboot(struct efx_nic *efx) 212{ 213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); 214 efx_dword_t reg; 215 uint32_t value; 216 217 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 218 return false; 219 220 efx_readd(efx, ®, addr); 221 value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); 222 223 if (value == 0) 224 return 0; 225 226 EFX_ZERO_DWORD(reg); 227 efx_writed(efx, ®, addr); 228 229 if (value == MC_STATUS_DWORD_ASSERT) 230 return -EINTR; 231 else 232 return -EIO; 233} 234 235static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) 236{ 237 /* Wait until the interface becomes QUIESCENT and we win the race 238 * to mark it RUNNING. */ 239 wait_event(mcdi->wq, 240 atomic_cmpxchg(&mcdi->state, 241 MCDI_STATE_QUIESCENT, 242 MCDI_STATE_RUNNING) 243 == MCDI_STATE_QUIESCENT); 244} 245 246static int efx_mcdi_await_completion(struct efx_nic *efx) 247{ 248 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 249 250 if (wait_event_timeout( 251 mcdi->wq, 252 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, 253 msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) 254 return -ETIMEDOUT; 255 256 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 257 * In which case, poll for completions directly. If efx_mcdi_ev_cpl() 258 * completed the request first, then we'll just end up completing the 259 * request again, which is safe. 260 * 261 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which 262 * wait_event_timeout() implicitly provides. 263 */ 264 if (mcdi->mode == MCDI_MODE_POLL) 265 return efx_mcdi_poll(efx); 266 267 return 0; 268} 269 270static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) 271{ 272 /* If the interface is RUNNING, then move to COMPLETED and wake any 273 * waiters. If the interface isn't in RUNNING then we've received a 274 * duplicate completion after we've already transitioned back to 275 * QUIESCENT. [A subsequent invocation would increment seqno, so would 276 * have failed the seqno check]. 277 */ 278 if (atomic_cmpxchg(&mcdi->state, 279 MCDI_STATE_RUNNING, 280 MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { 281 wake_up(&mcdi->wq); 282 return true; 283 } 284 285 return false; 286} 287 288static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) 289{ 290 atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); 291 wake_up(&mcdi->wq); 292} 293 294static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, 295 unsigned int datalen, unsigned int errno) 296{ 297 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 298 bool wake = false; 299 300 spin_lock(&mcdi->iface_lock); 301 302 if ((seqno ^ mcdi->seqno) & SEQ_MASK) { 303 if (mcdi->credits) 304 /* The request has been cancelled */ 305 --mcdi->credits; 306 else 307 netif_err(efx, hw, efx->net_dev, 308 "MC response mismatch tx seq 0x%x rx " 309 "seq 0x%x\n", seqno, mcdi->seqno); 310 } else { 311 mcdi->resprc = errno; 312 mcdi->resplen = datalen; 313 314 wake = true; 315 } 316 317 spin_unlock(&mcdi->iface_lock); 318 319 if (wake) 320 efx_mcdi_complete(mcdi); 321} 322 323int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, 324 const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, 325 size_t *outlen_actual) 326{ 327 efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); 328 return efx_mcdi_rpc_finish(efx, cmd, inlen, 329 outbuf, outlen, outlen_actual); 330} 331 332void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, 333 size_t inlen) 334{ 335 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 336 337 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 338 339 efx_mcdi_acquire(mcdi); 340 341 /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ 342 spin_lock_bh(&mcdi->iface_lock); 343 ++mcdi->seqno; 344 spin_unlock_bh(&mcdi->iface_lock); 345 346 efx_mcdi_copyin(efx, cmd, inbuf, inlen); 347} 348 349int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, 350 u8 *outbuf, size_t outlen, size_t *outlen_actual) 351{ 352 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 353 int rc; 354 355 BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); 356 357 if (mcdi->mode == MCDI_MODE_POLL) 358 rc = efx_mcdi_poll(efx); 359 else 360 rc = efx_mcdi_await_completion(efx); 361 362 if (rc != 0) { 363 /* Close the race with efx_mcdi_ev_cpl() executing just too late 364 * and completing a request we've just cancelled, by ensuring 365 * that the seqno check therein fails. 366 */ 367 spin_lock_bh(&mcdi->iface_lock); 368 ++mcdi->seqno; 369 ++mcdi->credits; 370 spin_unlock_bh(&mcdi->iface_lock); 371 372 netif_err(efx, hw, efx->net_dev, 373 "MC command 0x%x inlen %d mode %d timed out\n", 374 cmd, (int)inlen, mcdi->mode); 375 } else { 376 size_t resplen; 377 378 /* At the very least we need a memory barrier here to ensure 379 * we pick up changes from efx_mcdi_ev_cpl(). Protect against 380 * a spurious efx_mcdi_ev_cpl() running concurrently by 381 * acquiring the iface_lock. */ 382 spin_lock_bh(&mcdi->iface_lock); 383 rc = -mcdi->resprc; 384 resplen = mcdi->resplen; 385 spin_unlock_bh(&mcdi->iface_lock); 386 387 if (rc == 0) { 388 efx_mcdi_copyout(efx, outbuf, 389 min(outlen, mcdi->resplen + 3) & ~0x3); 390 if (outlen_actual != NULL) 391 *outlen_actual = resplen; 392 } else if (cmd == MC_CMD_REBOOT && rc == -EIO) 393 ; /* Don't reset if MC_CMD_REBOOT returns EIO */ 394 else if (rc == -EIO || rc == -EINTR) { 395 netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", 396 -rc); 397 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 398 } else 399 netif_dbg(efx, hw, efx->net_dev, 400 "MC command 0x%x inlen %d failed rc=%d\n", 401 cmd, (int)inlen, -rc); 402 403 if (rc == -EIO || rc == -EINTR) { 404 msleep(MCDI_STATUS_SLEEP_MS); 405 efx_mcdi_poll_reboot(efx); 406 } 407 } 408 409 efx_mcdi_release(mcdi); 410 return rc; 411} 412 413void efx_mcdi_mode_poll(struct efx_nic *efx) 414{ 415 struct efx_mcdi_iface *mcdi; 416 417 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 418 return; 419 420 mcdi = efx_mcdi(efx); 421 if (mcdi->mode == MCDI_MODE_POLL) 422 return; 423 424 /* We can switch from event completion to polled completion, because 425 * mcdi requests are always completed in shared memory. We do this by 426 * switching the mode to POLL'd then completing the request. 427 * efx_mcdi_await_completion() will then call efx_mcdi_poll(). 428 * 429 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), 430 * which efx_mcdi_complete() provides for us. 431 */ 432 mcdi->mode = MCDI_MODE_POLL; 433 434 efx_mcdi_complete(mcdi); 435} 436 437void efx_mcdi_mode_event(struct efx_nic *efx) 438{ 439 struct efx_mcdi_iface *mcdi; 440 441 if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) 442 return; 443 444 mcdi = efx_mcdi(efx); 445 446 if (mcdi->mode == MCDI_MODE_EVENTS) 447 return; 448 449 /* We can't switch from polled to event completion in the middle of a 450 * request, because the completion method is specified in the request. 451 * So acquire the interface to serialise the requestors. We don't need 452 * to acquire the iface_lock to change the mode here, but we do need a 453 * write memory barrier ensure that efx_mcdi_rpc() sees it, which 454 * efx_mcdi_acquire() provides. 455 */ 456 efx_mcdi_acquire(mcdi); 457 mcdi->mode = MCDI_MODE_EVENTS; 458 efx_mcdi_release(mcdi); 459} 460 461static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) 462{ 463 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 464 465 /* If there is an outstanding MCDI request, it has been terminated 466 * either by a BADASSERT or REBOOT event. If the mcdi interface is 467 * in polled mode, then do nothing because the MC reboot handler will 468 * set the header correctly. However, if the mcdi interface is waiting 469 * for a CMDDONE event it won't receive it [and since all MCDI events 470 * are sent to the same queue, we can't be racing with 471 * efx_mcdi_ev_cpl()] 472 * 473 * There's a race here with efx_mcdi_rpc(), because we might receive 474 * a REBOOT event *before* the request has been copied out. In polled 475 * mode (during startup) this is irrelevant, because efx_mcdi_complete() 476 * is ignored. In event mode, this condition is just an edge-case of 477 * receiving a REBOOT event after posting the MCDI request. Did the mc 478 * reboot before or after the copyout? The best we can do always is 479 * just return failure. 480 */ 481 spin_lock(&mcdi->iface_lock); 482 if (efx_mcdi_complete(mcdi)) { 483 if (mcdi->mode == MCDI_MODE_EVENTS) { 484 mcdi->resprc = rc; 485 mcdi->resplen = 0; 486 ++mcdi->credits; 487 } 488 } else { 489 int count; 490 491 /* Nobody was waiting for an MCDI request, so trigger a reset */ 492 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 493 494 /* Consume the status word since efx_mcdi_rpc_finish() won't */ 495 for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { 496 if (efx_mcdi_poll_reboot(efx)) 497 break; 498 udelay(MCDI_STATUS_DELAY_US); 499 } 500 } 501 502 spin_unlock(&mcdi->iface_lock); 503} 504 505static unsigned int efx_mcdi_event_link_speed[] = { 506 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, 507 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, 508 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, 509}; 510 511 512static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) 513{ 514 u32 flags, fcntl, speed, lpa; 515 516 speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); 517 EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); 518 speed = efx_mcdi_event_link_speed[speed]; 519 520 flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); 521 fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); 522 lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); 523 524 /* efx->link_state is only modified by efx_mcdi_phy_get_link(), 525 * which is only run after flushing the event queues. Therefore, it 526 * is safe to modify the link state outside of the mac_lock here. 527 */ 528 efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); 529 530 efx_mcdi_phy_check_fcntl(efx, lpa); 531 532 efx_link_status_changed(efx); 533} 534 535/* Called from falcon_process_eventq for MCDI events */ 536void efx_mcdi_process_event(struct efx_channel *channel, 537 efx_qword_t *event) 538{ 539 struct efx_nic *efx = channel->efx; 540 int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); 541 u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); 542 543 switch (code) { 544 case MCDI_EVENT_CODE_BADSSERT: 545 netif_err(efx, hw, efx->net_dev, 546 "MC watchdog or assertion failure at 0x%x\n", data); 547 efx_mcdi_ev_death(efx, EINTR); 548 break; 549 550 case MCDI_EVENT_CODE_PMNOTICE: 551 netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); 552 break; 553 554 case MCDI_EVENT_CODE_CMDDONE: 555 efx_mcdi_ev_cpl(efx, 556 MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), 557 MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), 558 MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); 559 break; 560 561 case MCDI_EVENT_CODE_LINKCHANGE: 562 efx_mcdi_process_link_change(efx, event); 563 break; 564 case MCDI_EVENT_CODE_SENSOREVT: 565 efx_mcdi_sensor_event(efx, event); 566 break; 567 case MCDI_EVENT_CODE_SCHEDERR: 568 netif_info(efx, hw, efx->net_dev, 569 "MC Scheduler error address=0x%x\n", data); 570 break; 571 case MCDI_EVENT_CODE_REBOOT: 572 netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); 573 efx_mcdi_ev_death(efx, EIO); 574 break; 575 case MCDI_EVENT_CODE_MAC_STATS_DMA: 576 /* MAC stats are gather lazily. We can ignore this. */ 577 break; 578 case MCDI_EVENT_CODE_FLR: 579 efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); 580 break; 581 582 default: 583 netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", 584 code); 585 } 586} 587 588/************************************************************************** 589 * 590 * Specific request functions 591 * 592 ************************************************************************** 593 */ 594 595void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) 596{ 597 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)]; 598 size_t outlength; 599 const __le16 *ver_words; 600 int rc; 601 602 BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); 603 604 rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, 605 outbuf, sizeof(outbuf), &outlength); 606 if (rc) 607 goto fail; 608 609 if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { 610 rc = -EIO; 611 goto fail; 612 } 613 614 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 615 snprintf(buf, len, "%u.%u.%u.%u", 616 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), 617 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); 618 return; 619 620fail: 621 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 622 buf[0] = 0; 623} 624 625int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 626 bool *was_attached) 627{ 628 u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; 629 u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; 630 size_t outlen; 631 int rc; 632 633 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, 634 driver_operating ? 1 : 0); 635 MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); 636 637 rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), 638 outbuf, sizeof(outbuf), &outlen); 639 if (rc) 640 goto fail; 641 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { 642 rc = -EIO; 643 goto fail; 644 } 645 646 if (was_attached != NULL) 647 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 648 return 0; 649 650fail: 651 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 652 return rc; 653} 654 655int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 656 u16 *fw_subtype_list, u32 *capabilities) 657{ 658 uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN]; 659 size_t outlen; 660 int port_num = efx_port_num(efx); 661 int offset; 662 int rc; 663 664 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 665 666 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, 667 outbuf, sizeof(outbuf), &outlen); 668 if (rc) 669 goto fail; 670 671 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 672 rc = -EIO; 673 goto fail; 674 } 675 676 offset = (port_num) 677 ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 678 : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; 679 if (mac_address) 680 memcpy(mac_address, outbuf + offset, ETH_ALEN); 681 if (fw_subtype_list) 682 memcpy(fw_subtype_list, 683 outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, 684 MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM * 685 sizeof(fw_subtype_list[0])); 686 if (capabilities) { 687 if (port_num) 688 *capabilities = MCDI_DWORD(outbuf, 689 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 690 else 691 *capabilities = MCDI_DWORD(outbuf, 692 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 693 } 694 695 return 0; 696 697fail: 698 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", 699 __func__, rc, (int)outlen); 700 701 return rc; 702} 703 704int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) 705{ 706 u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; 707 u32 dest = 0; 708 int rc; 709 710 if (uart) 711 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; 712 if (evq) 713 dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; 714 715 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); 716 MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); 717 718 BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); 719 720 rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), 721 NULL, 0, NULL); 722 if (rc) 723 goto fail; 724 725 return 0; 726 727fail: 728 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 729 return rc; 730} 731 732int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) 733{ 734 u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; 735 size_t outlen; 736 int rc; 737 738 BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); 739 740 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, 741 outbuf, sizeof(outbuf), &outlen); 742 if (rc) 743 goto fail; 744 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { 745 rc = -EIO; 746 goto fail; 747 } 748 749 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 750 return 0; 751 752fail: 753 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 754 __func__, rc); 755 return rc; 756} 757 758int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, 759 size_t *size_out, size_t *erase_size_out, 760 bool *protected_out) 761{ 762 u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; 763 u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; 764 size_t outlen; 765 int rc; 766 767 MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); 768 769 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), 770 outbuf, sizeof(outbuf), &outlen); 771 if (rc) 772 goto fail; 773 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { 774 rc = -EIO; 775 goto fail; 776 } 777 778 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 779 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 780 *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & 781 (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); 782 return 0; 783 784fail: 785 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 786 return rc; 787} 788 789int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) 790{ 791 u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; 792 int rc; 793 794 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); 795 796 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); 797 798 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), 799 NULL, 0, NULL); 800 if (rc) 801 goto fail; 802 803 return 0; 804 805fail: 806 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 807 return rc; 808} 809 810int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, 811 loff_t offset, u8 *buffer, size_t length) 812{ 813 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 814 u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 815 size_t outlen; 816 int rc; 817 818 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); 819 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); 820 MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); 821 822 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), 823 outbuf, sizeof(outbuf), &outlen); 824 if (rc) 825 goto fail; 826 827 memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); 828 return 0; 829 830fail: 831 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 832 return rc; 833} 834 835int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 836 loff_t offset, const u8 *buffer, size_t length) 837{ 838 u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 839 int rc; 840 841 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); 842 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); 843 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); 844 memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); 845 846 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 847 848 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, 849 ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), 850 NULL, 0, NULL); 851 if (rc) 852 goto fail; 853 854 return 0; 855 856fail: 857 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 858 return rc; 859} 860 861int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 862 loff_t offset, size_t length) 863{ 864 u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; 865 int rc; 866 867 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); 868 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); 869 MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); 870 871 BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); 872 873 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), 874 NULL, 0, NULL); 875 if (rc) 876 goto fail; 877 878 return 0; 879 880fail: 881 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 882 return rc; 883} 884 885int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) 886{ 887 u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; 888 int rc; 889 890 MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); 891 892 BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); 893 894 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), 895 NULL, 0, NULL); 896 if (rc) 897 goto fail; 898 899 return 0; 900 901fail: 902 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 903 return rc; 904} 905 906static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) 907{ 908 u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; 909 u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; 910 int rc; 911 912 MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); 913 914 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), 915 outbuf, sizeof(outbuf), NULL); 916 if (rc) 917 return rc; 918 919 switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { 920 case MC_CMD_NVRAM_TEST_PASS: 921 case MC_CMD_NVRAM_TEST_NOTSUPP: 922 return 0; 923 default: 924 return -EIO; 925 } 926} 927 928int efx_mcdi_nvram_test_all(struct efx_nic *efx) 929{ 930 u32 nvram_types; 931 unsigned int type; 932 int rc; 933 934 rc = efx_mcdi_nvram_types(efx, &nvram_types); 935 if (rc) 936 goto fail1; 937 938 type = 0; 939 while (nvram_types != 0) { 940 if (nvram_types & 1) { 941 rc = efx_mcdi_nvram_test(efx, type); 942 if (rc) 943 goto fail2; 944 } 945 type++; 946 nvram_types >>= 1; 947 } 948 949 return 0; 950 951fail2: 952 netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", 953 __func__, type); 954fail1: 955 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 956 return rc; 957} 958 959static int efx_mcdi_read_assertion(struct efx_nic *efx) 960{ 961 u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; 962 u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; 963 unsigned int flags, index, ofst; 964 const char *reason; 965 size_t outlen; 966 int retry; 967 int rc; 968 969 /* Attempt to read any stored assertion state before we reboot 970 * the mcfw out of the assertion handler. Retry twice, once 971 * because a boot-time assertion might cause this command to fail 972 * with EINTR. And once again because GET_ASSERTS can race with 973 * MC_CMD_REBOOT running on the other port. */ 974 retry = 2; 975 do { 976 MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); 977 rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, 978 inbuf, MC_CMD_GET_ASSERTS_IN_LEN, 979 outbuf, sizeof(outbuf), &outlen); 980 } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); 981 982 if (rc) 983 return rc; 984 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 985 return -EIO; 986 987 /* Print out any recorded assertion state */ 988 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 989 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 990 return 0; 991 992 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 993 ? "system-level assertion" 994 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 995 ? "thread-level assertion" 996 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 997 ? "watchdog reset" 998 : "unknown assertion"; 999 netif_err(efx, hw, efx->net_dev, 1000 "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, 1001 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1002 MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); 1003 1004 /* Print out the registers */ 1005 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1006 for (index = 1; index < 32; index++) { 1007 netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, 1008 MCDI_DWORD2(outbuf, ofst)); 1009 ofst += sizeof(efx_dword_t); 1010 } 1011 1012 return 0; 1013} 1014 1015static void efx_mcdi_exit_assertion(struct efx_nic *efx) 1016{ 1017 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1018 1019 /* If the MC is running debug firmware, it might now be 1020 * waiting for a debugger to attach, but we just want it to 1021 * reboot. We set a flag that makes the command a no-op if it 1022 * has already done so. We don't know what return code to 1023 * expect (0 or -EIO), so ignore it. 1024 */ 1025 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1026 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 1027 MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); 1028 (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, 1029 NULL, 0, NULL); 1030} 1031 1032int efx_mcdi_handle_assertion(struct efx_nic *efx) 1033{ 1034 int rc; 1035 1036 rc = efx_mcdi_read_assertion(efx); 1037 if (rc) 1038 return rc; 1039 1040 efx_mcdi_exit_assertion(efx); 1041 1042 return 0; 1043} 1044 1045void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 1046{ 1047 u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; 1048 int rc; 1049 1050 BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); 1051 BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); 1052 BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); 1053 1054 BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); 1055 1056 MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); 1057 1058 rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), 1059 NULL, 0, NULL); 1060 if (rc) 1061 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1062 __func__, rc); 1063} 1064 1065int efx_mcdi_reset_port(struct efx_nic *efx) 1066{ 1067 int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); 1068 if (rc) 1069 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", 1070 __func__, rc); 1071 return rc; 1072} 1073 1074int efx_mcdi_reset_mc(struct efx_nic *efx) 1075{ 1076 u8 inbuf[MC_CMD_REBOOT_IN_LEN]; 1077 int rc; 1078 1079 BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); 1080 MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); 1081 rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), 1082 NULL, 0, NULL); 1083 /* White is black, and up is down */ 1084 if (rc == -EIO) 1085 return 0; 1086 if (rc == 0) 1087 rc = -EIO; 1088 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1089 return rc; 1090} 1091 1092static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1093 const u8 *mac, int *id_out) 1094{ 1095 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; 1096 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; 1097 size_t outlen; 1098 int rc; 1099 1100 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); 1101 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, 1102 MC_CMD_FILTER_MODE_SIMPLE); 1103 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); 1104 1105 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), 1106 outbuf, sizeof(outbuf), &outlen); 1107 if (rc) 1108 goto fail; 1109 1110 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1111 rc = -EIO; 1112 goto fail; 1113 } 1114 1115 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); 1116 1117 return 0; 1118 1119fail: 1120 *id_out = -1; 1121 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1122 return rc; 1123 1124} 1125 1126 1127int 1128efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) 1129{ 1130 return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); 1131} 1132 1133 1134int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) 1135{ 1136 u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; 1137 size_t outlen; 1138 int rc; 1139 1140 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, 1141 outbuf, sizeof(outbuf), &outlen); 1142 if (rc) 1143 goto fail; 1144 1145 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1146 rc = -EIO; 1147 goto fail; 1148 } 1149 1150 *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); 1151 1152 return 0; 1153 1154fail: 1155 *id_out = -1; 1156 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1157 return rc; 1158} 1159 1160 1161int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) 1162{ 1163 u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; 1164 int rc; 1165 1166 MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); 1167 1168 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), 1169 NULL, 0, NULL); 1170 if (rc) 1171 goto fail; 1172 1173 return 0; 1174 1175fail: 1176 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1177 return rc; 1178} 1179 1180int efx_mcdi_flush_rxqs(struct efx_nic *efx) 1181{ 1182 struct efx_channel *channel; 1183 struct efx_rx_queue *rx_queue; 1184 __le32 *qid; 1185 int rc, count; 1186 1187 qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL); 1188 if (qid == NULL) 1189 return -ENOMEM; 1190 1191 count = 0; 1192 efx_for_each_channel(channel, efx) { 1193 efx_for_each_channel_rx_queue(rx_queue, channel) { 1194 if (rx_queue->flush_pending) { 1195 rx_queue->flush_pending = false; 1196 atomic_dec(&efx->rxq_flush_pending); 1197 qid[count++] = cpu_to_le32( 1198 efx_rx_queue_index(rx_queue)); 1199 } 1200 } 1201 } 1202 1203 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, 1204 count * sizeof(*qid), NULL, 0, NULL); 1205 WARN_ON(rc > 0); 1206 1207 kfree(qid); 1208 1209 return rc; 1210} 1211 1212int efx_mcdi_wol_filter_reset(struct efx_nic *efx) 1213{ 1214 int rc; 1215 1216 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); 1217 if (rc) 1218 goto fail; 1219 1220 return 0; 1221 1222fail: 1223 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 1224 return rc; 1225} 1226 1227