efx.c revision f8f3b5ae3ea45ef6b00b471fed0fc90552a3c4af
1/**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11#include <linux/module.h> 12#include <linux/pci.h> 13#include <linux/netdevice.h> 14#include <linux/etherdevice.h> 15#include <linux/delay.h> 16#include <linux/notifier.h> 17#include <linux/ip.h> 18#include <linux/tcp.h> 19#include <linux/in.h> 20#include <linux/ethtool.h> 21#include <linux/topology.h> 22#include <linux/gfp.h> 23#include <linux/aer.h> 24#include <linux/interrupt.h> 25#include "net_driver.h" 26#include "efx.h" 27#include "nic.h" 28#include "selftest.h" 29 30#include "mcdi.h" 31#include "workarounds.h" 32 33/************************************************************************** 34 * 35 * Type name strings 36 * 37 ************************************************************************** 38 */ 39 40/* Loopback mode names (see LOOPBACK_MODE()) */ 41const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; 42const char *const efx_loopback_mode_names[] = { 43 [LOOPBACK_NONE] = "NONE", 44 [LOOPBACK_DATA] = "DATAPATH", 45 [LOOPBACK_GMAC] = "GMAC", 46 [LOOPBACK_XGMII] = "XGMII", 47 [LOOPBACK_XGXS] = "XGXS", 48 [LOOPBACK_XAUI] = "XAUI", 49 [LOOPBACK_GMII] = "GMII", 50 [LOOPBACK_SGMII] = "SGMII", 51 [LOOPBACK_XGBR] = "XGBR", 52 [LOOPBACK_XFI] = "XFI", 53 [LOOPBACK_XAUI_FAR] = "XAUI_FAR", 54 [LOOPBACK_GMII_FAR] = "GMII_FAR", 55 [LOOPBACK_SGMII_FAR] = "SGMII_FAR", 56 [LOOPBACK_XFI_FAR] = "XFI_FAR", 57 [LOOPBACK_GPHY] = "GPHY", 58 [LOOPBACK_PHYXS] = "PHYXS", 59 [LOOPBACK_PCS] = "PCS", 60 [LOOPBACK_PMAPMD] = "PMA/PMD", 61 [LOOPBACK_XPORT] = "XPORT", 62 [LOOPBACK_XGMII_WS] = "XGMII_WS", 63 [LOOPBACK_XAUI_WS] = "XAUI_WS", 64 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", 65 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", 66 [LOOPBACK_GMII_WS] = "GMII_WS", 67 [LOOPBACK_XFI_WS] = "XFI_WS", 68 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", 69 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 70}; 71 72const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 73const char *const efx_reset_type_names[] = { 74 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 75 [RESET_TYPE_ALL] = "ALL", 76 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", 77 [RESET_TYPE_WORLD] = "WORLD", 78 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", 79 [RESET_TYPE_DISABLE] = "DISABLE", 80 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 81 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 82 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 83 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", 84 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 85 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 86 [RESET_TYPE_MC_BIST] = "MC_BIST", 87}; 88 89/* Reset workqueue. If any NIC has a hardware failure then a reset will be 90 * queued onto this work queue. This is not a per-nic work queue, because 91 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 92 */ 93static struct workqueue_struct *reset_workqueue; 94 95/* How often and how many times to poll for a reset while waiting for a 96 * BIST that another function started to complete. 97 */ 98#define BIST_WAIT_DELAY_MS 100 99#define BIST_WAIT_DELAY_COUNT 100 100 101/************************************************************************** 102 * 103 * Configurable values 104 * 105 *************************************************************************/ 106 107/* 108 * Use separate channels for TX and RX events 109 * 110 * Set this to 1 to use separate channels for TX and RX. It allows us 111 * to control interrupt affinity separately for TX and RX. 112 * 113 * This is only used in MSI-X interrupt mode 114 */ 115static bool separate_tx_channels; 116module_param(separate_tx_channels, bool, 0444); 117MODULE_PARM_DESC(separate_tx_channels, 118 "Use separate channels for TX and RX"); 119 120/* This is the weight assigned to each of the (per-channel) virtual 121 * NAPI devices. 122 */ 123static int napi_weight = 64; 124 125/* This is the time (in jiffies) between invocations of the hardware 126 * monitor. 127 * On Falcon-based NICs, this will: 128 * - Check the on-board hardware monitor; 129 * - Poll the link state and reconfigure the hardware as necessary. 130 * On Siena-based NICs for power systems with EEH support, this will give EEH a 131 * chance to start. 132 */ 133static unsigned int efx_monitor_interval = 1 * HZ; 134 135/* Initial interrupt moderation settings. They can be modified after 136 * module load with ethtool. 137 * 138 * The default for RX should strike a balance between increasing the 139 * round-trip latency and reducing overhead. 140 */ 141static unsigned int rx_irq_mod_usec = 60; 142 143/* Initial interrupt moderation settings. They can be modified after 144 * module load with ethtool. 145 * 146 * This default is chosen to ensure that a 10G link does not go idle 147 * while a TX queue is stopped after it has become full. A queue is 148 * restarted when it drops below half full. The time this takes (assuming 149 * worst case 3 descriptors per packet and 1024 descriptors) is 150 * 512 / 3 * 1.2 = 205 usec. 151 */ 152static unsigned int tx_irq_mod_usec = 150; 153 154/* This is the first interrupt mode to try out of: 155 * 0 => MSI-X 156 * 1 => MSI 157 * 2 => legacy 158 */ 159static unsigned int interrupt_mode; 160 161/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), 162 * i.e. the number of CPUs among which we may distribute simultaneous 163 * interrupt handling. 164 * 165 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. 166 * The default (0) means to assign an interrupt to each core. 167 */ 168static unsigned int rss_cpus; 169module_param(rss_cpus, uint, 0444); 170MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 171 172static bool phy_flash_cfg; 173module_param(phy_flash_cfg, bool, 0644); 174MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 175 176static unsigned irq_adapt_low_thresh = 8000; 177module_param(irq_adapt_low_thresh, uint, 0644); 178MODULE_PARM_DESC(irq_adapt_low_thresh, 179 "Threshold score for reducing IRQ moderation"); 180 181static unsigned irq_adapt_high_thresh = 16000; 182module_param(irq_adapt_high_thresh, uint, 0644); 183MODULE_PARM_DESC(irq_adapt_high_thresh, 184 "Threshold score for increasing IRQ moderation"); 185 186static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 187 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 188 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | 189 NETIF_MSG_TX_ERR | NETIF_MSG_HW); 190module_param(debug, uint, 0); 191MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); 192 193/************************************************************************** 194 * 195 * Utility functions and prototypes 196 * 197 *************************************************************************/ 198 199static int efx_soft_enable_interrupts(struct efx_nic *efx); 200static void efx_soft_disable_interrupts(struct efx_nic *efx); 201static void efx_remove_channel(struct efx_channel *channel); 202static void efx_remove_channels(struct efx_nic *efx); 203static const struct efx_channel_type efx_default_channel_type; 204static void efx_remove_port(struct efx_nic *efx); 205static void efx_init_napi_channel(struct efx_channel *channel); 206static void efx_fini_napi(struct efx_nic *efx); 207static void efx_fini_napi_channel(struct efx_channel *channel); 208static void efx_fini_struct(struct efx_nic *efx); 209static void efx_start_all(struct efx_nic *efx); 210static void efx_stop_all(struct efx_nic *efx); 211 212#define EFX_ASSERT_RESET_SERIALISED(efx) \ 213 do { \ 214 if ((efx->state == STATE_READY) || \ 215 (efx->state == STATE_RECOVERY) || \ 216 (efx->state == STATE_DISABLED)) \ 217 ASSERT_RTNL(); \ 218 } while (0) 219 220static int efx_check_disabled(struct efx_nic *efx) 221{ 222 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { 223 netif_err(efx, drv, efx->net_dev, 224 "device is disabled due to earlier errors\n"); 225 return -EIO; 226 } 227 return 0; 228} 229 230/************************************************************************** 231 * 232 * Event queue processing 233 * 234 *************************************************************************/ 235 236/* Process channel's event queue 237 * 238 * This function is responsible for processing the event queue of a 239 * single channel. The caller must guarantee that this function will 240 * never be concurrently called more than once on the same channel, 241 * though different channels may be being processed concurrently. 242 */ 243static int efx_process_channel(struct efx_channel *channel, int budget) 244{ 245 int spent; 246 247 if (unlikely(!channel->enabled)) 248 return 0; 249 250 spent = efx_nic_process_eventq(channel, budget); 251 if (spent && efx_channel_has_rx_queue(channel)) { 252 struct efx_rx_queue *rx_queue = 253 efx_channel_get_rx_queue(channel); 254 255 efx_rx_flush_packet(channel); 256 efx_fast_push_rx_descriptors(rx_queue, true); 257 } 258 259 return spent; 260} 261 262/* NAPI poll handler 263 * 264 * NAPI guarantees serialisation of polls of the same device, which 265 * provides the guarantee required by efx_process_channel(). 266 */ 267static int efx_poll(struct napi_struct *napi, int budget) 268{ 269 struct efx_channel *channel = 270 container_of(napi, struct efx_channel, napi_str); 271 struct efx_nic *efx = channel->efx; 272 int spent; 273 274 netif_vdbg(efx, intr, efx->net_dev, 275 "channel %d NAPI poll executing on CPU %d\n", 276 channel->channel, raw_smp_processor_id()); 277 278 spent = efx_process_channel(channel, budget); 279 280 if (spent < budget) { 281 if (efx_channel_has_rx_queue(channel) && 282 efx->irq_rx_adaptive && 283 unlikely(++channel->irq_count == 1000)) { 284 if (unlikely(channel->irq_mod_score < 285 irq_adapt_low_thresh)) { 286 if (channel->irq_moderation > 1) { 287 channel->irq_moderation -= 1; 288 efx->type->push_irq_moderation(channel); 289 } 290 } else if (unlikely(channel->irq_mod_score > 291 irq_adapt_high_thresh)) { 292 if (channel->irq_moderation < 293 efx->irq_rx_moderation) { 294 channel->irq_moderation += 1; 295 efx->type->push_irq_moderation(channel); 296 } 297 } 298 channel->irq_count = 0; 299 channel->irq_mod_score = 0; 300 } 301 302 efx_filter_rfs_expire(channel); 303 304 /* There is no race here; although napi_disable() will 305 * only wait for napi_complete(), this isn't a problem 306 * since efx_nic_eventq_read_ack() will have no effect if 307 * interrupts have already been disabled. 308 */ 309 napi_complete(napi); 310 efx_nic_eventq_read_ack(channel); 311 } 312 313 return spent; 314} 315 316/* Create event queue 317 * Event queue memory allocations are done only once. If the channel 318 * is reset, the memory buffer will be reused; this guards against 319 * errors during channel reset and also simplifies interrupt handling. 320 */ 321static int efx_probe_eventq(struct efx_channel *channel) 322{ 323 struct efx_nic *efx = channel->efx; 324 unsigned long entries; 325 326 netif_dbg(efx, probe, efx->net_dev, 327 "chan %d create event queue\n", channel->channel); 328 329 /* Build an event queue with room for one event per tx and rx buffer, 330 * plus some extra for link state events and MCDI completions. */ 331 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); 332 EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); 333 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; 334 335 return efx_nic_probe_eventq(channel); 336} 337 338/* Prepare channel's event queue */ 339static int efx_init_eventq(struct efx_channel *channel) 340{ 341 struct efx_nic *efx = channel->efx; 342 int rc; 343 344 EFX_WARN_ON_PARANOID(channel->eventq_init); 345 346 netif_dbg(efx, drv, efx->net_dev, 347 "chan %d init event queue\n", channel->channel); 348 349 rc = efx_nic_init_eventq(channel); 350 if (rc == 0) { 351 efx->type->push_irq_moderation(channel); 352 channel->eventq_read_ptr = 0; 353 channel->eventq_init = true; 354 } 355 return rc; 356} 357 358/* Enable event queue processing and NAPI */ 359static void efx_start_eventq(struct efx_channel *channel) 360{ 361 netif_dbg(channel->efx, ifup, channel->efx->net_dev, 362 "chan %d start event queue\n", channel->channel); 363 364 /* Make sure the NAPI handler sees the enabled flag set */ 365 channel->enabled = true; 366 smp_wmb(); 367 368 napi_enable(&channel->napi_str); 369 efx_nic_eventq_read_ack(channel); 370} 371 372/* Disable event queue processing and NAPI */ 373static void efx_stop_eventq(struct efx_channel *channel) 374{ 375 if (!channel->enabled) 376 return; 377 378 napi_disable(&channel->napi_str); 379 channel->enabled = false; 380} 381 382static void efx_fini_eventq(struct efx_channel *channel) 383{ 384 if (!channel->eventq_init) 385 return; 386 387 netif_dbg(channel->efx, drv, channel->efx->net_dev, 388 "chan %d fini event queue\n", channel->channel); 389 390 efx_nic_fini_eventq(channel); 391 channel->eventq_init = false; 392} 393 394static void efx_remove_eventq(struct efx_channel *channel) 395{ 396 netif_dbg(channel->efx, drv, channel->efx->net_dev, 397 "chan %d remove event queue\n", channel->channel); 398 399 efx_nic_remove_eventq(channel); 400} 401 402/************************************************************************** 403 * 404 * Channel handling 405 * 406 *************************************************************************/ 407 408/* Allocate and initialise a channel structure. */ 409static struct efx_channel * 410efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) 411{ 412 struct efx_channel *channel; 413 struct efx_rx_queue *rx_queue; 414 struct efx_tx_queue *tx_queue; 415 int j; 416 417 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 418 if (!channel) 419 return NULL; 420 421 channel->efx = efx; 422 channel->channel = i; 423 channel->type = &efx_default_channel_type; 424 425 for (j = 0; j < EFX_TXQ_TYPES; j++) { 426 tx_queue = &channel->tx_queue[j]; 427 tx_queue->efx = efx; 428 tx_queue->queue = i * EFX_TXQ_TYPES + j; 429 tx_queue->channel = channel; 430 } 431 432 rx_queue = &channel->rx_queue; 433 rx_queue->efx = efx; 434 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 435 (unsigned long)rx_queue); 436 437 return channel; 438} 439 440/* Allocate and initialise a channel structure, copying parameters 441 * (but not resources) from an old channel structure. 442 */ 443static struct efx_channel * 444efx_copy_channel(const struct efx_channel *old_channel) 445{ 446 struct efx_channel *channel; 447 struct efx_rx_queue *rx_queue; 448 struct efx_tx_queue *tx_queue; 449 int j; 450 451 channel = kmalloc(sizeof(*channel), GFP_KERNEL); 452 if (!channel) 453 return NULL; 454 455 *channel = *old_channel; 456 457 channel->napi_dev = NULL; 458 memset(&channel->eventq, 0, sizeof(channel->eventq)); 459 460 for (j = 0; j < EFX_TXQ_TYPES; j++) { 461 tx_queue = &channel->tx_queue[j]; 462 if (tx_queue->channel) 463 tx_queue->channel = channel; 464 tx_queue->buffer = NULL; 465 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); 466 } 467 468 rx_queue = &channel->rx_queue; 469 rx_queue->buffer = NULL; 470 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); 471 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 472 (unsigned long)rx_queue); 473 474 return channel; 475} 476 477static int efx_probe_channel(struct efx_channel *channel) 478{ 479 struct efx_tx_queue *tx_queue; 480 struct efx_rx_queue *rx_queue; 481 int rc; 482 483 netif_dbg(channel->efx, probe, channel->efx->net_dev, 484 "creating channel %d\n", channel->channel); 485 486 rc = channel->type->pre_probe(channel); 487 if (rc) 488 goto fail; 489 490 rc = efx_probe_eventq(channel); 491 if (rc) 492 goto fail; 493 494 efx_for_each_channel_tx_queue(tx_queue, channel) { 495 rc = efx_probe_tx_queue(tx_queue); 496 if (rc) 497 goto fail; 498 } 499 500 efx_for_each_channel_rx_queue(rx_queue, channel) { 501 rc = efx_probe_rx_queue(rx_queue); 502 if (rc) 503 goto fail; 504 } 505 506 channel->n_rx_frm_trunc = 0; 507 508 return 0; 509 510fail: 511 efx_remove_channel(channel); 512 return rc; 513} 514 515static void 516efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) 517{ 518 struct efx_nic *efx = channel->efx; 519 const char *type; 520 int number; 521 522 number = channel->channel; 523 if (efx->tx_channel_offset == 0) { 524 type = ""; 525 } else if (channel->channel < efx->tx_channel_offset) { 526 type = "-rx"; 527 } else { 528 type = "-tx"; 529 number -= efx->tx_channel_offset; 530 } 531 snprintf(buf, len, "%s%s-%d", efx->name, type, number); 532} 533 534static void efx_set_channel_names(struct efx_nic *efx) 535{ 536 struct efx_channel *channel; 537 538 efx_for_each_channel(channel, efx) 539 channel->type->get_name(channel, 540 efx->msi_context[channel->channel].name, 541 sizeof(efx->msi_context[0].name)); 542} 543 544static int efx_probe_channels(struct efx_nic *efx) 545{ 546 struct efx_channel *channel; 547 int rc; 548 549 /* Restart special buffer allocation */ 550 efx->next_buffer_table = 0; 551 552 /* Probe channels in reverse, so that any 'extra' channels 553 * use the start of the buffer table. This allows the traffic 554 * channels to be resized without moving them or wasting the 555 * entries before them. 556 */ 557 efx_for_each_channel_rev(channel, efx) { 558 rc = efx_probe_channel(channel); 559 if (rc) { 560 netif_err(efx, probe, efx->net_dev, 561 "failed to create channel %d\n", 562 channel->channel); 563 goto fail; 564 } 565 } 566 efx_set_channel_names(efx); 567 568 return 0; 569 570fail: 571 efx_remove_channels(efx); 572 return rc; 573} 574 575/* Channels are shutdown and reinitialised whilst the NIC is running 576 * to propagate configuration changes (mtu, checksum offload), or 577 * to clear hardware error conditions 578 */ 579static void efx_start_datapath(struct efx_nic *efx) 580{ 581 bool old_rx_scatter = efx->rx_scatter; 582 struct efx_tx_queue *tx_queue; 583 struct efx_rx_queue *rx_queue; 584 struct efx_channel *channel; 585 size_t rx_buf_len; 586 587 /* Calculate the rx buffer allocation parameters required to 588 * support the current MTU, including padding for header 589 * alignment and overruns. 590 */ 591 efx->rx_dma_len = (efx->rx_prefix_size + 592 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 593 efx->type->rx_buffer_padding); 594 rx_buf_len = (sizeof(struct efx_rx_page_state) + 595 efx->rx_ip_align + efx->rx_dma_len); 596 if (rx_buf_len <= PAGE_SIZE) { 597 efx->rx_scatter = efx->type->always_rx_scatter; 598 efx->rx_buffer_order = 0; 599 } else if (efx->type->can_rx_scatter) { 600 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); 601 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + 602 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, 603 EFX_RX_BUF_ALIGNMENT) > 604 PAGE_SIZE); 605 efx->rx_scatter = true; 606 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; 607 efx->rx_buffer_order = 0; 608 } else { 609 efx->rx_scatter = false; 610 efx->rx_buffer_order = get_order(rx_buf_len); 611 } 612 613 efx_rx_config_page_split(efx); 614 if (efx->rx_buffer_order) 615 netif_dbg(efx, drv, efx->net_dev, 616 "RX buf len=%u; page order=%u batch=%u\n", 617 efx->rx_dma_len, efx->rx_buffer_order, 618 efx->rx_pages_per_batch); 619 else 620 netif_dbg(efx, drv, efx->net_dev, 621 "RX buf len=%u step=%u bpp=%u; page batch=%u\n", 622 efx->rx_dma_len, efx->rx_page_buf_step, 623 efx->rx_bufs_per_page, efx->rx_pages_per_batch); 624 625 /* RX filters may also have scatter-enabled flags */ 626 if (efx->rx_scatter != old_rx_scatter) 627 efx->type->filter_update_rx_scatter(efx); 628 629 /* We must keep at least one descriptor in a TX ring empty. 630 * We could avoid this when the queue size does not exactly 631 * match the hardware ring size, but it's not that important. 632 * Therefore we stop the queue when one more skb might fill 633 * the ring completely. We wake it when half way back to 634 * empty. 635 */ 636 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); 637 efx->txq_wake_thresh = efx->txq_stop_thresh / 2; 638 639 /* Initialise the channels */ 640 efx_for_each_channel(channel, efx) { 641 efx_for_each_channel_tx_queue(tx_queue, channel) { 642 efx_init_tx_queue(tx_queue); 643 atomic_inc(&efx->active_queues); 644 } 645 646 efx_for_each_channel_rx_queue(rx_queue, channel) { 647 efx_init_rx_queue(rx_queue); 648 atomic_inc(&efx->active_queues); 649 efx_stop_eventq(channel); 650 efx_fast_push_rx_descriptors(rx_queue, false); 651 efx_start_eventq(channel); 652 } 653 654 WARN_ON(channel->rx_pkt_n_frags); 655 } 656 657 efx_ptp_start_datapath(efx); 658 659 if (netif_device_present(efx->net_dev)) 660 netif_tx_wake_all_queues(efx->net_dev); 661} 662 663static void efx_stop_datapath(struct efx_nic *efx) 664{ 665 struct efx_channel *channel; 666 struct efx_tx_queue *tx_queue; 667 struct efx_rx_queue *rx_queue; 668 int rc; 669 670 EFX_ASSERT_RESET_SERIALISED(efx); 671 BUG_ON(efx->port_enabled); 672 673 efx_ptp_stop_datapath(efx); 674 675 /* Stop RX refill */ 676 efx_for_each_channel(channel, efx) { 677 efx_for_each_channel_rx_queue(rx_queue, channel) 678 rx_queue->refill_enabled = false; 679 } 680 681 efx_for_each_channel(channel, efx) { 682 /* RX packet processing is pipelined, so wait for the 683 * NAPI handler to complete. At least event queue 0 684 * might be kept active by non-data events, so don't 685 * use napi_synchronize() but actually disable NAPI 686 * temporarily. 687 */ 688 if (efx_channel_has_rx_queue(channel)) { 689 efx_stop_eventq(channel); 690 efx_start_eventq(channel); 691 } 692 } 693 694 rc = efx->type->fini_dmaq(efx); 695 if (rc && EFX_WORKAROUND_7803(efx)) { 696 /* Schedule a reset to recover from the flush failure. The 697 * descriptor caches reference memory we're about to free, 698 * but falcon_reconfigure_mac_wrapper() won't reconnect 699 * the MACs because of the pending reset. 700 */ 701 netif_err(efx, drv, efx->net_dev, 702 "Resetting to recover from flush failure\n"); 703 efx_schedule_reset(efx, RESET_TYPE_ALL); 704 } else if (rc) { 705 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); 706 } else { 707 netif_dbg(efx, drv, efx->net_dev, 708 "successfully flushed all queues\n"); 709 } 710 711 efx_for_each_channel(channel, efx) { 712 efx_for_each_channel_rx_queue(rx_queue, channel) 713 efx_fini_rx_queue(rx_queue); 714 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 715 efx_fini_tx_queue(tx_queue); 716 } 717} 718 719static void efx_remove_channel(struct efx_channel *channel) 720{ 721 struct efx_tx_queue *tx_queue; 722 struct efx_rx_queue *rx_queue; 723 724 netif_dbg(channel->efx, drv, channel->efx->net_dev, 725 "destroy chan %d\n", channel->channel); 726 727 efx_for_each_channel_rx_queue(rx_queue, channel) 728 efx_remove_rx_queue(rx_queue); 729 efx_for_each_possible_channel_tx_queue(tx_queue, channel) 730 efx_remove_tx_queue(tx_queue); 731 efx_remove_eventq(channel); 732 channel->type->post_remove(channel); 733} 734 735static void efx_remove_channels(struct efx_nic *efx) 736{ 737 struct efx_channel *channel; 738 739 efx_for_each_channel(channel, efx) 740 efx_remove_channel(channel); 741} 742 743int 744efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) 745{ 746 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; 747 u32 old_rxq_entries, old_txq_entries; 748 unsigned i, next_buffer_table = 0; 749 int rc, rc2; 750 751 rc = efx_check_disabled(efx); 752 if (rc) 753 return rc; 754 755 /* Not all channels should be reallocated. We must avoid 756 * reallocating their buffer table entries. 757 */ 758 efx_for_each_channel(channel, efx) { 759 struct efx_rx_queue *rx_queue; 760 struct efx_tx_queue *tx_queue; 761 762 if (channel->type->copy) 763 continue; 764 next_buffer_table = max(next_buffer_table, 765 channel->eventq.index + 766 channel->eventq.entries); 767 efx_for_each_channel_rx_queue(rx_queue, channel) 768 next_buffer_table = max(next_buffer_table, 769 rx_queue->rxd.index + 770 rx_queue->rxd.entries); 771 efx_for_each_channel_tx_queue(tx_queue, channel) 772 next_buffer_table = max(next_buffer_table, 773 tx_queue->txd.index + 774 tx_queue->txd.entries); 775 } 776 777 efx_device_detach_sync(efx); 778 efx_stop_all(efx); 779 efx_soft_disable_interrupts(efx); 780 781 /* Clone channels (where possible) */ 782 memset(other_channel, 0, sizeof(other_channel)); 783 for (i = 0; i < efx->n_channels; i++) { 784 channel = efx->channel[i]; 785 if (channel->type->copy) 786 channel = channel->type->copy(channel); 787 if (!channel) { 788 rc = -ENOMEM; 789 goto out; 790 } 791 other_channel[i] = channel; 792 } 793 794 /* Swap entry counts and channel pointers */ 795 old_rxq_entries = efx->rxq_entries; 796 old_txq_entries = efx->txq_entries; 797 efx->rxq_entries = rxq_entries; 798 efx->txq_entries = txq_entries; 799 for (i = 0; i < efx->n_channels; i++) { 800 channel = efx->channel[i]; 801 efx->channel[i] = other_channel[i]; 802 other_channel[i] = channel; 803 } 804 805 /* Restart buffer table allocation */ 806 efx->next_buffer_table = next_buffer_table; 807 808 for (i = 0; i < efx->n_channels; i++) { 809 channel = efx->channel[i]; 810 if (!channel->type->copy) 811 continue; 812 rc = efx_probe_channel(channel); 813 if (rc) 814 goto rollback; 815 efx_init_napi_channel(efx->channel[i]); 816 } 817 818out: 819 /* Destroy unused channel structures */ 820 for (i = 0; i < efx->n_channels; i++) { 821 channel = other_channel[i]; 822 if (channel && channel->type->copy) { 823 efx_fini_napi_channel(channel); 824 efx_remove_channel(channel); 825 kfree(channel); 826 } 827 } 828 829 rc2 = efx_soft_enable_interrupts(efx); 830 if (rc2) { 831 rc = rc ? rc : rc2; 832 netif_err(efx, drv, efx->net_dev, 833 "unable to restart interrupts on channel reallocation\n"); 834 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 835 } else { 836 efx_start_all(efx); 837 netif_device_attach(efx->net_dev); 838 } 839 return rc; 840 841rollback: 842 /* Swap back */ 843 efx->rxq_entries = old_rxq_entries; 844 efx->txq_entries = old_txq_entries; 845 for (i = 0; i < efx->n_channels; i++) { 846 channel = efx->channel[i]; 847 efx->channel[i] = other_channel[i]; 848 other_channel[i] = channel; 849 } 850 goto out; 851} 852 853void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) 854{ 855 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 856} 857 858static const struct efx_channel_type efx_default_channel_type = { 859 .pre_probe = efx_channel_dummy_op_int, 860 .post_remove = efx_channel_dummy_op_void, 861 .get_name = efx_get_channel_name, 862 .copy = efx_copy_channel, 863 .keep_eventq = false, 864}; 865 866int efx_channel_dummy_op_int(struct efx_channel *channel) 867{ 868 return 0; 869} 870 871void efx_channel_dummy_op_void(struct efx_channel *channel) 872{ 873} 874 875/************************************************************************** 876 * 877 * Port handling 878 * 879 **************************************************************************/ 880 881/* This ensures that the kernel is kept informed (via 882 * netif_carrier_on/off) of the link status, and also maintains the 883 * link status's stop on the port's TX queue. 884 */ 885void efx_link_status_changed(struct efx_nic *efx) 886{ 887 struct efx_link_state *link_state = &efx->link_state; 888 889 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 890 * that no events are triggered between unregister_netdev() and the 891 * driver unloading. A more general condition is that NETDEV_CHANGE 892 * can only be generated between NETDEV_UP and NETDEV_DOWN */ 893 if (!netif_running(efx->net_dev)) 894 return; 895 896 if (link_state->up != netif_carrier_ok(efx->net_dev)) { 897 efx->n_link_state_changes++; 898 899 if (link_state->up) 900 netif_carrier_on(efx->net_dev); 901 else 902 netif_carrier_off(efx->net_dev); 903 } 904 905 /* Status message for kernel log */ 906 if (link_state->up) 907 netif_info(efx, link, efx->net_dev, 908 "link up at %uMbps %s-duplex (MTU %d)\n", 909 link_state->speed, link_state->fd ? "full" : "half", 910 efx->net_dev->mtu); 911 else 912 netif_info(efx, link, efx->net_dev, "link down\n"); 913} 914 915void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) 916{ 917 efx->link_advertising = advertising; 918 if (advertising) { 919 if (advertising & ADVERTISED_Pause) 920 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); 921 else 922 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); 923 if (advertising & ADVERTISED_Asym_Pause) 924 efx->wanted_fc ^= EFX_FC_TX; 925 } 926} 927 928void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) 929{ 930 efx->wanted_fc = wanted_fc; 931 if (efx->link_advertising) { 932 if (wanted_fc & EFX_FC_RX) 933 efx->link_advertising |= (ADVERTISED_Pause | 934 ADVERTISED_Asym_Pause); 935 else 936 efx->link_advertising &= ~(ADVERTISED_Pause | 937 ADVERTISED_Asym_Pause); 938 if (wanted_fc & EFX_FC_TX) 939 efx->link_advertising ^= ADVERTISED_Asym_Pause; 940 } 941} 942 943static void efx_fini_port(struct efx_nic *efx); 944 945/* Push loopback/power/transmit disable settings to the PHY, and reconfigure 946 * the MAC appropriately. All other PHY configuration changes are pushed 947 * through phy_op->set_settings(), and pushed asynchronously to the MAC 948 * through efx_monitor(). 949 * 950 * Callers must hold the mac_lock 951 */ 952int __efx_reconfigure_port(struct efx_nic *efx) 953{ 954 enum efx_phy_mode phy_mode; 955 int rc; 956 957 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 958 959 /* Disable PHY transmit in mac level loopbacks */ 960 phy_mode = efx->phy_mode; 961 if (LOOPBACK_INTERNAL(efx)) 962 efx->phy_mode |= PHY_MODE_TX_DISABLED; 963 else 964 efx->phy_mode &= ~PHY_MODE_TX_DISABLED; 965 966 rc = efx->type->reconfigure_port(efx); 967 968 if (rc) 969 efx->phy_mode = phy_mode; 970 971 return rc; 972} 973 974/* Reinitialise the MAC to pick up new PHY settings, even if the port is 975 * disabled. */ 976int efx_reconfigure_port(struct efx_nic *efx) 977{ 978 int rc; 979 980 EFX_ASSERT_RESET_SERIALISED(efx); 981 982 mutex_lock(&efx->mac_lock); 983 rc = __efx_reconfigure_port(efx); 984 mutex_unlock(&efx->mac_lock); 985 986 return rc; 987} 988 989/* Asynchronous work item for changing MAC promiscuity and multicast 990 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current 991 * MAC directly. */ 992static void efx_mac_work(struct work_struct *data) 993{ 994 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 995 996 mutex_lock(&efx->mac_lock); 997 if (efx->port_enabled) 998 efx->type->reconfigure_mac(efx); 999 mutex_unlock(&efx->mac_lock); 1000} 1001 1002static int efx_probe_port(struct efx_nic *efx) 1003{ 1004 int rc; 1005 1006 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 1007 1008 if (phy_flash_cfg) 1009 efx->phy_mode = PHY_MODE_SPECIAL; 1010 1011 /* Connect up MAC/PHY operations table */ 1012 rc = efx->type->probe_port(efx); 1013 if (rc) 1014 return rc; 1015 1016 /* Initialise MAC address to permanent address */ 1017 memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN); 1018 1019 return 0; 1020} 1021 1022static int efx_init_port(struct efx_nic *efx) 1023{ 1024 int rc; 1025 1026 netif_dbg(efx, drv, efx->net_dev, "init port\n"); 1027 1028 mutex_lock(&efx->mac_lock); 1029 1030 rc = efx->phy_op->init(efx); 1031 if (rc) 1032 goto fail1; 1033 1034 efx->port_initialized = true; 1035 1036 /* Reconfigure the MAC before creating dma queues (required for 1037 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ 1038 efx->type->reconfigure_mac(efx); 1039 1040 /* Ensure the PHY advertises the correct flow control settings */ 1041 rc = efx->phy_op->reconfigure(efx); 1042 if (rc) 1043 goto fail2; 1044 1045 mutex_unlock(&efx->mac_lock); 1046 return 0; 1047 1048fail2: 1049 efx->phy_op->fini(efx); 1050fail1: 1051 mutex_unlock(&efx->mac_lock); 1052 return rc; 1053} 1054 1055static void efx_start_port(struct efx_nic *efx) 1056{ 1057 netif_dbg(efx, ifup, efx->net_dev, "start port\n"); 1058 BUG_ON(efx->port_enabled); 1059 1060 mutex_lock(&efx->mac_lock); 1061 efx->port_enabled = true; 1062 1063 /* efx_mac_work() might have been scheduled after efx_stop_port(), 1064 * and then cancelled by efx_flush_all() */ 1065 efx->type->reconfigure_mac(efx); 1066 1067 mutex_unlock(&efx->mac_lock); 1068} 1069 1070/* Prevent efx_mac_work() and efx_monitor() from working */ 1071static void efx_stop_port(struct efx_nic *efx) 1072{ 1073 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); 1074 1075 mutex_lock(&efx->mac_lock); 1076 efx->port_enabled = false; 1077 mutex_unlock(&efx->mac_lock); 1078 1079 /* Serialise against efx_set_multicast_list() */ 1080 netif_addr_lock_bh(efx->net_dev); 1081 netif_addr_unlock_bh(efx->net_dev); 1082} 1083 1084static void efx_fini_port(struct efx_nic *efx) 1085{ 1086 netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); 1087 1088 if (!efx->port_initialized) 1089 return; 1090 1091 efx->phy_op->fini(efx); 1092 efx->port_initialized = false; 1093 1094 efx->link_state.up = false; 1095 efx_link_status_changed(efx); 1096} 1097 1098static void efx_remove_port(struct efx_nic *efx) 1099{ 1100 netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); 1101 1102 efx->type->remove_port(efx); 1103} 1104 1105/************************************************************************** 1106 * 1107 * NIC handling 1108 * 1109 **************************************************************************/ 1110 1111/* This configures the PCI device to enable I/O and DMA. */ 1112static int efx_init_io(struct efx_nic *efx) 1113{ 1114 struct pci_dev *pci_dev = efx->pci_dev; 1115 dma_addr_t dma_mask = efx->type->max_dma_mask; 1116 unsigned int mem_map_size = efx->type->mem_map_size(efx); 1117 int rc; 1118 1119 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); 1120 1121 rc = pci_enable_device(pci_dev); 1122 if (rc) { 1123 netif_err(efx, probe, efx->net_dev, 1124 "failed to enable PCI device\n"); 1125 goto fail1; 1126 } 1127 1128 pci_set_master(pci_dev); 1129 1130 /* Set the PCI DMA mask. Try all possibilities from our 1131 * genuine mask down to 32 bits, because some architectures 1132 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit 1133 * masks event though they reject 46 bit masks. 1134 */ 1135 while (dma_mask > 0x7fffffffUL) { 1136 if (dma_supported(&pci_dev->dev, dma_mask)) { 1137 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); 1138 if (rc == 0) 1139 break; 1140 } 1141 dma_mask >>= 1; 1142 } 1143 if (rc) { 1144 netif_err(efx, probe, efx->net_dev, 1145 "could not find a suitable DMA mask\n"); 1146 goto fail2; 1147 } 1148 netif_dbg(efx, probe, efx->net_dev, 1149 "using DMA mask %llx\n", (unsigned long long) dma_mask); 1150 1151 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); 1152 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); 1153 if (rc) { 1154 netif_err(efx, probe, efx->net_dev, 1155 "request for memory BAR failed\n"); 1156 rc = -EIO; 1157 goto fail3; 1158 } 1159 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); 1160 if (!efx->membase) { 1161 netif_err(efx, probe, efx->net_dev, 1162 "could not map memory BAR at %llx+%x\n", 1163 (unsigned long long)efx->membase_phys, mem_map_size); 1164 rc = -ENOMEM; 1165 goto fail4; 1166 } 1167 netif_dbg(efx, probe, efx->net_dev, 1168 "memory BAR at %llx+%x (virtual %p)\n", 1169 (unsigned long long)efx->membase_phys, mem_map_size, 1170 efx->membase); 1171 1172 return 0; 1173 1174 fail4: 1175 pci_release_region(efx->pci_dev, EFX_MEM_BAR); 1176 fail3: 1177 efx->membase_phys = 0; 1178 fail2: 1179 pci_disable_device(efx->pci_dev); 1180 fail1: 1181 return rc; 1182} 1183 1184static void efx_fini_io(struct efx_nic *efx) 1185{ 1186 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); 1187 1188 if (efx->membase) { 1189 iounmap(efx->membase); 1190 efx->membase = NULL; 1191 } 1192 1193 if (efx->membase_phys) { 1194 pci_release_region(efx->pci_dev, EFX_MEM_BAR); 1195 efx->membase_phys = 0; 1196 } 1197 1198 pci_disable_device(efx->pci_dev); 1199} 1200 1201static unsigned int efx_wanted_parallelism(struct efx_nic *efx) 1202{ 1203 cpumask_var_t thread_mask; 1204 unsigned int count; 1205 int cpu; 1206 1207 if (rss_cpus) { 1208 count = rss_cpus; 1209 } else { 1210 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { 1211 netif_warn(efx, probe, efx->net_dev, 1212 "RSS disabled due to allocation failure\n"); 1213 return 1; 1214 } 1215 1216 count = 0; 1217 for_each_online_cpu(cpu) { 1218 if (!cpumask_test_cpu(cpu, thread_mask)) { 1219 ++count; 1220 cpumask_or(thread_mask, thread_mask, 1221 topology_thread_cpumask(cpu)); 1222 } 1223 } 1224 1225 free_cpumask_var(thread_mask); 1226 } 1227 1228 /* If RSS is requested for the PF *and* VFs then we can't write RSS 1229 * table entries that are inaccessible to VFs 1230 */ 1231 if (efx_sriov_wanted(efx) && efx_vf_size(efx) > 1 && 1232 count > efx_vf_size(efx)) { 1233 netif_warn(efx, probe, efx->net_dev, 1234 "Reducing number of RSS channels from %u to %u for " 1235 "VF support. Increase vf-msix-limit to use more " 1236 "channels on the PF.\n", 1237 count, efx_vf_size(efx)); 1238 count = efx_vf_size(efx); 1239 } 1240 1241 return count; 1242} 1243 1244/* Probe the number and type of interrupts we are able to obtain, and 1245 * the resulting numbers of channels and RX queues. 1246 */ 1247static int efx_probe_interrupts(struct efx_nic *efx) 1248{ 1249 unsigned int extra_channels = 0; 1250 unsigned int i, j; 1251 int rc; 1252 1253 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) 1254 if (efx->extra_channel_type[i]) 1255 ++extra_channels; 1256 1257 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 1258 struct msix_entry xentries[EFX_MAX_CHANNELS]; 1259 unsigned int n_channels; 1260 1261 n_channels = efx_wanted_parallelism(efx); 1262 if (separate_tx_channels) 1263 n_channels *= 2; 1264 n_channels += extra_channels; 1265 n_channels = min(n_channels, efx->max_channels); 1266 1267 for (i = 0; i < n_channels; i++) 1268 xentries[i].entry = i; 1269 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); 1270 if (rc > 0) { 1271 netif_err(efx, drv, efx->net_dev, 1272 "WARNING: Insufficient MSI-X vectors" 1273 " available (%d < %u).\n", rc, n_channels); 1274 netif_err(efx, drv, efx->net_dev, 1275 "WARNING: Performance may be reduced.\n"); 1276 EFX_BUG_ON_PARANOID(rc >= n_channels); 1277 n_channels = rc; 1278 rc = pci_enable_msix(efx->pci_dev, xentries, 1279 n_channels); 1280 } 1281 1282 if (rc == 0) { 1283 efx->n_channels = n_channels; 1284 if (n_channels > extra_channels) 1285 n_channels -= extra_channels; 1286 if (separate_tx_channels) { 1287 efx->n_tx_channels = max(n_channels / 2, 1U); 1288 efx->n_rx_channels = max(n_channels - 1289 efx->n_tx_channels, 1290 1U); 1291 } else { 1292 efx->n_tx_channels = n_channels; 1293 efx->n_rx_channels = n_channels; 1294 } 1295 for (i = 0; i < efx->n_channels; i++) 1296 efx_get_channel(efx, i)->irq = 1297 xentries[i].vector; 1298 } else { 1299 /* Fall back to single channel MSI */ 1300 efx->interrupt_mode = EFX_INT_MODE_MSI; 1301 netif_err(efx, drv, efx->net_dev, 1302 "could not enable MSI-X\n"); 1303 } 1304 } 1305 1306 /* Try single interrupt MSI */ 1307 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1308 efx->n_channels = 1; 1309 efx->n_rx_channels = 1; 1310 efx->n_tx_channels = 1; 1311 rc = pci_enable_msi(efx->pci_dev); 1312 if (rc == 0) { 1313 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; 1314 } else { 1315 netif_err(efx, drv, efx->net_dev, 1316 "could not enable MSI\n"); 1317 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 1318 } 1319 } 1320 1321 /* Assume legacy interrupts */ 1322 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1323 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1324 efx->n_rx_channels = 1; 1325 efx->n_tx_channels = 1; 1326 efx->legacy_irq = efx->pci_dev->irq; 1327 } 1328 1329 /* Assign extra channels if possible */ 1330 j = efx->n_channels; 1331 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { 1332 if (!efx->extra_channel_type[i]) 1333 continue; 1334 if (efx->interrupt_mode != EFX_INT_MODE_MSIX || 1335 efx->n_channels <= extra_channels) { 1336 efx->extra_channel_type[i]->handle_no_channel(efx); 1337 } else { 1338 --j; 1339 efx_get_channel(efx, j)->type = 1340 efx->extra_channel_type[i]; 1341 } 1342 } 1343 1344 /* RSS might be usable on VFs even if it is disabled on the PF */ 1345 efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? 1346 efx->n_rx_channels : efx_vf_size(efx)); 1347 1348 return 0; 1349} 1350 1351static int efx_soft_enable_interrupts(struct efx_nic *efx) 1352{ 1353 struct efx_channel *channel, *end_channel; 1354 int rc; 1355 1356 BUG_ON(efx->state == STATE_DISABLED); 1357 1358 efx->irq_soft_enabled = true; 1359 smp_wmb(); 1360 1361 efx_for_each_channel(channel, efx) { 1362 if (!channel->type->keep_eventq) { 1363 rc = efx_init_eventq(channel); 1364 if (rc) 1365 goto fail; 1366 } 1367 efx_start_eventq(channel); 1368 } 1369 1370 efx_mcdi_mode_event(efx); 1371 1372 return 0; 1373fail: 1374 end_channel = channel; 1375 efx_for_each_channel(channel, efx) { 1376 if (channel == end_channel) 1377 break; 1378 efx_stop_eventq(channel); 1379 if (!channel->type->keep_eventq) 1380 efx_fini_eventq(channel); 1381 } 1382 1383 return rc; 1384} 1385 1386static void efx_soft_disable_interrupts(struct efx_nic *efx) 1387{ 1388 struct efx_channel *channel; 1389 1390 if (efx->state == STATE_DISABLED) 1391 return; 1392 1393 efx_mcdi_mode_poll(efx); 1394 1395 efx->irq_soft_enabled = false; 1396 smp_wmb(); 1397 1398 if (efx->legacy_irq) 1399 synchronize_irq(efx->legacy_irq); 1400 1401 efx_for_each_channel(channel, efx) { 1402 if (channel->irq) 1403 synchronize_irq(channel->irq); 1404 1405 efx_stop_eventq(channel); 1406 if (!channel->type->keep_eventq) 1407 efx_fini_eventq(channel); 1408 } 1409 1410 /* Flush the asynchronous MCDI request queue */ 1411 efx_mcdi_flush_async(efx); 1412} 1413 1414static int efx_enable_interrupts(struct efx_nic *efx) 1415{ 1416 struct efx_channel *channel, *end_channel; 1417 int rc; 1418 1419 BUG_ON(efx->state == STATE_DISABLED); 1420 1421 if (efx->eeh_disabled_legacy_irq) { 1422 enable_irq(efx->legacy_irq); 1423 efx->eeh_disabled_legacy_irq = false; 1424 } 1425 1426 efx->type->irq_enable_master(efx); 1427 1428 efx_for_each_channel(channel, efx) { 1429 if (channel->type->keep_eventq) { 1430 rc = efx_init_eventq(channel); 1431 if (rc) 1432 goto fail; 1433 } 1434 } 1435 1436 rc = efx_soft_enable_interrupts(efx); 1437 if (rc) 1438 goto fail; 1439 1440 return 0; 1441 1442fail: 1443 end_channel = channel; 1444 efx_for_each_channel(channel, efx) { 1445 if (channel == end_channel) 1446 break; 1447 if (channel->type->keep_eventq) 1448 efx_fini_eventq(channel); 1449 } 1450 1451 efx->type->irq_disable_non_ev(efx); 1452 1453 return rc; 1454} 1455 1456static void efx_disable_interrupts(struct efx_nic *efx) 1457{ 1458 struct efx_channel *channel; 1459 1460 efx_soft_disable_interrupts(efx); 1461 1462 efx_for_each_channel(channel, efx) { 1463 if (channel->type->keep_eventq) 1464 efx_fini_eventq(channel); 1465 } 1466 1467 efx->type->irq_disable_non_ev(efx); 1468} 1469 1470static void efx_remove_interrupts(struct efx_nic *efx) 1471{ 1472 struct efx_channel *channel; 1473 1474 /* Remove MSI/MSI-X interrupts */ 1475 efx_for_each_channel(channel, efx) 1476 channel->irq = 0; 1477 pci_disable_msi(efx->pci_dev); 1478 pci_disable_msix(efx->pci_dev); 1479 1480 /* Remove legacy interrupt */ 1481 efx->legacy_irq = 0; 1482} 1483 1484static void efx_set_channels(struct efx_nic *efx) 1485{ 1486 struct efx_channel *channel; 1487 struct efx_tx_queue *tx_queue; 1488 1489 efx->tx_channel_offset = 1490 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1491 1492 /* We need to mark which channels really have RX and TX 1493 * queues, and adjust the TX queue numbers if we have separate 1494 * RX-only and TX-only channels. 1495 */ 1496 efx_for_each_channel(channel, efx) { 1497 if (channel->channel < efx->n_rx_channels) 1498 channel->rx_queue.core_index = channel->channel; 1499 else 1500 channel->rx_queue.core_index = -1; 1501 1502 efx_for_each_channel_tx_queue(tx_queue, channel) 1503 tx_queue->queue -= (efx->tx_channel_offset * 1504 EFX_TXQ_TYPES); 1505 } 1506} 1507 1508static int efx_probe_nic(struct efx_nic *efx) 1509{ 1510 size_t i; 1511 int rc; 1512 1513 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); 1514 1515 /* Carry out hardware-type specific initialisation */ 1516 rc = efx->type->probe(efx); 1517 if (rc) 1518 return rc; 1519 1520 /* Determine the number of channels and queues by trying to hook 1521 * in MSI-X interrupts. */ 1522 rc = efx_probe_interrupts(efx); 1523 if (rc) 1524 goto fail1; 1525 1526 rc = efx->type->dimension_resources(efx); 1527 if (rc) 1528 goto fail2; 1529 1530 if (efx->n_channels > 1) 1531 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1532 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 1533 efx->rx_indir_table[i] = 1534 ethtool_rxfh_indir_default(i, efx->rss_spread); 1535 1536 efx_set_channels(efx); 1537 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); 1538 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); 1539 1540 /* Initialise the interrupt moderation settings */ 1541 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, 1542 true); 1543 1544 return 0; 1545 1546fail2: 1547 efx_remove_interrupts(efx); 1548fail1: 1549 efx->type->remove(efx); 1550 return rc; 1551} 1552 1553static void efx_remove_nic(struct efx_nic *efx) 1554{ 1555 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); 1556 1557 efx_remove_interrupts(efx); 1558 efx->type->remove(efx); 1559} 1560 1561static int efx_probe_filters(struct efx_nic *efx) 1562{ 1563 int rc; 1564 1565 spin_lock_init(&efx->filter_lock); 1566 1567 rc = efx->type->filter_table_probe(efx); 1568 if (rc) 1569 return rc; 1570 1571#ifdef CONFIG_RFS_ACCEL 1572 if (efx->type->offload_features & NETIF_F_NTUPLE) { 1573 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, 1574 sizeof(*efx->rps_flow_id), 1575 GFP_KERNEL); 1576 if (!efx->rps_flow_id) { 1577 efx->type->filter_table_remove(efx); 1578 return -ENOMEM; 1579 } 1580 } 1581#endif 1582 1583 return 0; 1584} 1585 1586static void efx_remove_filters(struct efx_nic *efx) 1587{ 1588#ifdef CONFIG_RFS_ACCEL 1589 kfree(efx->rps_flow_id); 1590#endif 1591 efx->type->filter_table_remove(efx); 1592} 1593 1594static void efx_restore_filters(struct efx_nic *efx) 1595{ 1596 efx->type->filter_table_restore(efx); 1597} 1598 1599/************************************************************************** 1600 * 1601 * NIC startup/shutdown 1602 * 1603 *************************************************************************/ 1604 1605static int efx_probe_all(struct efx_nic *efx) 1606{ 1607 int rc; 1608 1609 rc = efx_probe_nic(efx); 1610 if (rc) { 1611 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 1612 goto fail1; 1613 } 1614 1615 rc = efx_probe_port(efx); 1616 if (rc) { 1617 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 1618 goto fail2; 1619 } 1620 1621 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); 1622 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { 1623 rc = -EINVAL; 1624 goto fail3; 1625 } 1626 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1627 1628 rc = efx_probe_filters(efx); 1629 if (rc) { 1630 netif_err(efx, probe, efx->net_dev, 1631 "failed to create filter tables\n"); 1632 goto fail3; 1633 } 1634 1635 rc = efx_probe_channels(efx); 1636 if (rc) 1637 goto fail4; 1638 1639 return 0; 1640 1641 fail4: 1642 efx_remove_filters(efx); 1643 fail3: 1644 efx_remove_port(efx); 1645 fail2: 1646 efx_remove_nic(efx); 1647 fail1: 1648 return rc; 1649} 1650 1651/* If the interface is supposed to be running but is not, start 1652 * the hardware and software data path, regular activity for the port 1653 * (MAC statistics, link polling, etc.) and schedule the port to be 1654 * reconfigured. Interrupts must already be enabled. This function 1655 * is safe to call multiple times, so long as the NIC is not disabled. 1656 * Requires the RTNL lock. 1657 */ 1658static void efx_start_all(struct efx_nic *efx) 1659{ 1660 EFX_ASSERT_RESET_SERIALISED(efx); 1661 BUG_ON(efx->state == STATE_DISABLED); 1662 1663 /* Check that it is appropriate to restart the interface. All 1664 * of these flags are safe to read under just the rtnl lock */ 1665 if (efx->port_enabled || !netif_running(efx->net_dev)) 1666 return; 1667 1668 efx_start_port(efx); 1669 efx_start_datapath(efx); 1670 1671 /* Start the hardware monitor if there is one */ 1672 if (efx->type->monitor != NULL) 1673 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1674 efx_monitor_interval); 1675 1676 /* If link state detection is normally event-driven, we have 1677 * to poll now because we could have missed a change 1678 */ 1679 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1680 mutex_lock(&efx->mac_lock); 1681 if (efx->phy_op->poll(efx)) 1682 efx_link_status_changed(efx); 1683 mutex_unlock(&efx->mac_lock); 1684 } 1685 1686 efx->type->start_stats(efx); 1687 efx->type->pull_stats(efx); 1688 spin_lock_bh(&efx->stats_lock); 1689 efx->type->update_stats(efx, NULL, NULL); 1690 spin_unlock_bh(&efx->stats_lock); 1691} 1692 1693/* Flush all delayed work. Should only be called when no more delayed work 1694 * will be scheduled. This doesn't flush pending online resets (efx_reset), 1695 * since we're holding the rtnl_lock at this point. */ 1696static void efx_flush_all(struct efx_nic *efx) 1697{ 1698 /* Make sure the hardware monitor and event self-test are stopped */ 1699 cancel_delayed_work_sync(&efx->monitor_work); 1700 efx_selftest_async_cancel(efx); 1701 /* Stop scheduled port reconfigurations */ 1702 cancel_work_sync(&efx->mac_work); 1703} 1704 1705/* Quiesce the hardware and software data path, and regular activity 1706 * for the port without bringing the link down. Safe to call multiple 1707 * times with the NIC in almost any state, but interrupts should be 1708 * enabled. Requires the RTNL lock. 1709 */ 1710static void efx_stop_all(struct efx_nic *efx) 1711{ 1712 EFX_ASSERT_RESET_SERIALISED(efx); 1713 1714 /* port_enabled can be read safely under the rtnl lock */ 1715 if (!efx->port_enabled) 1716 return; 1717 1718 /* update stats before we go down so we can accurately count 1719 * rx_nodesc_drops 1720 */ 1721 efx->type->pull_stats(efx); 1722 spin_lock_bh(&efx->stats_lock); 1723 efx->type->update_stats(efx, NULL, NULL); 1724 spin_unlock_bh(&efx->stats_lock); 1725 efx->type->stop_stats(efx); 1726 efx_stop_port(efx); 1727 1728 /* Flush efx_mac_work(), refill_workqueue, monitor_work */ 1729 efx_flush_all(efx); 1730 1731 /* Stop the kernel transmit interface. This is only valid if 1732 * the device is stopped or detached; otherwise the watchdog 1733 * may fire immediately. 1734 */ 1735 WARN_ON(netif_running(efx->net_dev) && 1736 netif_device_present(efx->net_dev)); 1737 netif_tx_disable(efx->net_dev); 1738 1739 efx_stop_datapath(efx); 1740} 1741 1742static void efx_remove_all(struct efx_nic *efx) 1743{ 1744 efx_remove_channels(efx); 1745 efx_remove_filters(efx); 1746 efx_remove_port(efx); 1747 efx_remove_nic(efx); 1748} 1749 1750/************************************************************************** 1751 * 1752 * Interrupt moderation 1753 * 1754 **************************************************************************/ 1755 1756static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns) 1757{ 1758 if (usecs == 0) 1759 return 0; 1760 if (usecs * 1000 < quantum_ns) 1761 return 1; /* never round down to 0 */ 1762 return usecs * 1000 / quantum_ns; 1763} 1764 1765/* Set interrupt moderation parameters */ 1766int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, 1767 unsigned int rx_usecs, bool rx_adaptive, 1768 bool rx_may_override_tx) 1769{ 1770 struct efx_channel *channel; 1771 unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max * 1772 efx->timer_quantum_ns, 1773 1000); 1774 unsigned int tx_ticks; 1775 unsigned int rx_ticks; 1776 1777 EFX_ASSERT_RESET_SERIALISED(efx); 1778 1779 if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max) 1780 return -EINVAL; 1781 1782 tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns); 1783 rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns); 1784 1785 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && 1786 !rx_may_override_tx) { 1787 netif_err(efx, drv, efx->net_dev, "Channels are shared. " 1788 "RX and TX IRQ moderation must be equal\n"); 1789 return -EINVAL; 1790 } 1791 1792 efx->irq_rx_adaptive = rx_adaptive; 1793 efx->irq_rx_moderation = rx_ticks; 1794 efx_for_each_channel(channel, efx) { 1795 if (efx_channel_has_rx_queue(channel)) 1796 channel->irq_moderation = rx_ticks; 1797 else if (efx_channel_has_tx_queues(channel)) 1798 channel->irq_moderation = tx_ticks; 1799 } 1800 1801 return 0; 1802} 1803 1804void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, 1805 unsigned int *rx_usecs, bool *rx_adaptive) 1806{ 1807 /* We must round up when converting ticks to microseconds 1808 * because we round down when converting the other way. 1809 */ 1810 1811 *rx_adaptive = efx->irq_rx_adaptive; 1812 *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation * 1813 efx->timer_quantum_ns, 1814 1000); 1815 1816 /* If channels are shared between RX and TX, so is IRQ 1817 * moderation. Otherwise, IRQ moderation is the same for all 1818 * TX channels and is not adaptive. 1819 */ 1820 if (efx->tx_channel_offset == 0) 1821 *tx_usecs = *rx_usecs; 1822 else 1823 *tx_usecs = DIV_ROUND_UP( 1824 efx->channel[efx->tx_channel_offset]->irq_moderation * 1825 efx->timer_quantum_ns, 1826 1000); 1827} 1828 1829/************************************************************************** 1830 * 1831 * Hardware monitor 1832 * 1833 **************************************************************************/ 1834 1835/* Run periodically off the general workqueue */ 1836static void efx_monitor(struct work_struct *data) 1837{ 1838 struct efx_nic *efx = container_of(data, struct efx_nic, 1839 monitor_work.work); 1840 1841 netif_vdbg(efx, timer, efx->net_dev, 1842 "hardware monitor executing on CPU %d\n", 1843 raw_smp_processor_id()); 1844 BUG_ON(efx->type->monitor == NULL); 1845 1846 /* If the mac_lock is already held then it is likely a port 1847 * reconfiguration is already in place, which will likely do 1848 * most of the work of monitor() anyway. */ 1849 if (mutex_trylock(&efx->mac_lock)) { 1850 if (efx->port_enabled) 1851 efx->type->monitor(efx); 1852 mutex_unlock(&efx->mac_lock); 1853 } 1854 1855 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1856 efx_monitor_interval); 1857} 1858 1859/************************************************************************** 1860 * 1861 * ioctls 1862 * 1863 *************************************************************************/ 1864 1865/* Net device ioctl 1866 * Context: process, rtnl_lock() held. 1867 */ 1868static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1869{ 1870 struct efx_nic *efx = netdev_priv(net_dev); 1871 struct mii_ioctl_data *data = if_mii(ifr); 1872 1873 if (cmd == SIOCSHWTSTAMP) 1874 return efx_ptp_set_ts_config(efx, ifr); 1875 if (cmd == SIOCGHWTSTAMP) 1876 return efx_ptp_get_ts_config(efx, ifr); 1877 1878 /* Convert phy_id from older PRTAD/DEVAD format */ 1879 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && 1880 (data->phy_id & 0xfc00) == 0x0400) 1881 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; 1882 1883 return mdio_mii_ioctl(&efx->mdio, data, cmd); 1884} 1885 1886/************************************************************************** 1887 * 1888 * NAPI interface 1889 * 1890 **************************************************************************/ 1891 1892static void efx_init_napi_channel(struct efx_channel *channel) 1893{ 1894 struct efx_nic *efx = channel->efx; 1895 1896 channel->napi_dev = efx->net_dev; 1897 netif_napi_add(channel->napi_dev, &channel->napi_str, 1898 efx_poll, napi_weight); 1899} 1900 1901static void efx_init_napi(struct efx_nic *efx) 1902{ 1903 struct efx_channel *channel; 1904 1905 efx_for_each_channel(channel, efx) 1906 efx_init_napi_channel(channel); 1907} 1908 1909static void efx_fini_napi_channel(struct efx_channel *channel) 1910{ 1911 if (channel->napi_dev) 1912 netif_napi_del(&channel->napi_str); 1913 channel->napi_dev = NULL; 1914} 1915 1916static void efx_fini_napi(struct efx_nic *efx) 1917{ 1918 struct efx_channel *channel; 1919 1920 efx_for_each_channel(channel, efx) 1921 efx_fini_napi_channel(channel); 1922} 1923 1924/************************************************************************** 1925 * 1926 * Kernel netpoll interface 1927 * 1928 *************************************************************************/ 1929 1930#ifdef CONFIG_NET_POLL_CONTROLLER 1931 1932/* Although in the common case interrupts will be disabled, this is not 1933 * guaranteed. However, all our work happens inside the NAPI callback, 1934 * so no locking is required. 1935 */ 1936static void efx_netpoll(struct net_device *net_dev) 1937{ 1938 struct efx_nic *efx = netdev_priv(net_dev); 1939 struct efx_channel *channel; 1940 1941 efx_for_each_channel(channel, efx) 1942 efx_schedule_channel(channel); 1943} 1944 1945#endif 1946 1947/************************************************************************** 1948 * 1949 * Kernel net device interface 1950 * 1951 *************************************************************************/ 1952 1953/* Context: process, rtnl_lock() held. */ 1954static int efx_net_open(struct net_device *net_dev) 1955{ 1956 struct efx_nic *efx = netdev_priv(net_dev); 1957 int rc; 1958 1959 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", 1960 raw_smp_processor_id()); 1961 1962 rc = efx_check_disabled(efx); 1963 if (rc) 1964 return rc; 1965 if (efx->phy_mode & PHY_MODE_SPECIAL) 1966 return -EBUSY; 1967 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) 1968 return -EIO; 1969 1970 /* Notify the kernel of the link state polled during driver load, 1971 * before the monitor starts running */ 1972 efx_link_status_changed(efx); 1973 1974 efx_start_all(efx); 1975 efx_selftest_async_start(efx); 1976 return 0; 1977} 1978 1979/* Context: process, rtnl_lock() held. 1980 * Note that the kernel will ignore our return code; this method 1981 * should really be a void. 1982 */ 1983static int efx_net_stop(struct net_device *net_dev) 1984{ 1985 struct efx_nic *efx = netdev_priv(net_dev); 1986 1987 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", 1988 raw_smp_processor_id()); 1989 1990 /* Stop the device and flush all the channels */ 1991 efx_stop_all(efx); 1992 1993 return 0; 1994} 1995 1996/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1997static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, 1998 struct rtnl_link_stats64 *stats) 1999{ 2000 struct efx_nic *efx = netdev_priv(net_dev); 2001 2002 spin_lock_bh(&efx->stats_lock); 2003 efx->type->update_stats(efx, NULL, stats); 2004 spin_unlock_bh(&efx->stats_lock); 2005 2006 return stats; 2007} 2008 2009/* Context: netif_tx_lock held, BHs disabled. */ 2010static void efx_watchdog(struct net_device *net_dev) 2011{ 2012 struct efx_nic *efx = netdev_priv(net_dev); 2013 2014 netif_err(efx, tx_err, efx->net_dev, 2015 "TX stuck with port_enabled=%d: resetting channels\n", 2016 efx->port_enabled); 2017 2018 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 2019} 2020 2021 2022/* Context: process, rtnl_lock() held. */ 2023static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 2024{ 2025 struct efx_nic *efx = netdev_priv(net_dev); 2026 int rc; 2027 2028 rc = efx_check_disabled(efx); 2029 if (rc) 2030 return rc; 2031 if (new_mtu > EFX_MAX_MTU) 2032 return -EINVAL; 2033 2034 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); 2035 2036 efx_device_detach_sync(efx); 2037 efx_stop_all(efx); 2038 2039 mutex_lock(&efx->mac_lock); 2040 net_dev->mtu = new_mtu; 2041 efx->type->reconfigure_mac(efx); 2042 mutex_unlock(&efx->mac_lock); 2043 2044 efx_start_all(efx); 2045 netif_device_attach(efx->net_dev); 2046 return 0; 2047} 2048 2049static int efx_set_mac_address(struct net_device *net_dev, void *data) 2050{ 2051 struct efx_nic *efx = netdev_priv(net_dev); 2052 struct sockaddr *addr = data; 2053 char *new_addr = addr->sa_data; 2054 2055 if (!is_valid_ether_addr(new_addr)) { 2056 netif_err(efx, drv, efx->net_dev, 2057 "invalid ethernet MAC address requested: %pM\n", 2058 new_addr); 2059 return -EADDRNOTAVAIL; 2060 } 2061 2062 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 2063 efx_sriov_mac_address_changed(efx); 2064 2065 /* Reconfigure the MAC */ 2066 mutex_lock(&efx->mac_lock); 2067 efx->type->reconfigure_mac(efx); 2068 mutex_unlock(&efx->mac_lock); 2069 2070 return 0; 2071} 2072 2073/* Context: netif_addr_lock held, BHs disabled. */ 2074static void efx_set_rx_mode(struct net_device *net_dev) 2075{ 2076 struct efx_nic *efx = netdev_priv(net_dev); 2077 2078 if (efx->port_enabled) 2079 queue_work(efx->workqueue, &efx->mac_work); 2080 /* Otherwise efx_start_port() will do this */ 2081} 2082 2083static int efx_set_features(struct net_device *net_dev, netdev_features_t data) 2084{ 2085 struct efx_nic *efx = netdev_priv(net_dev); 2086 2087 /* If disabling RX n-tuple filtering, clear existing filters */ 2088 if (net_dev->features & ~data & NETIF_F_NTUPLE) 2089 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); 2090 2091 return 0; 2092} 2093 2094static const struct net_device_ops efx_farch_netdev_ops = { 2095 .ndo_open = efx_net_open, 2096 .ndo_stop = efx_net_stop, 2097 .ndo_get_stats64 = efx_net_stats, 2098 .ndo_tx_timeout = efx_watchdog, 2099 .ndo_start_xmit = efx_hard_start_xmit, 2100 .ndo_validate_addr = eth_validate_addr, 2101 .ndo_do_ioctl = efx_ioctl, 2102 .ndo_change_mtu = efx_change_mtu, 2103 .ndo_set_mac_address = efx_set_mac_address, 2104 .ndo_set_rx_mode = efx_set_rx_mode, 2105 .ndo_set_features = efx_set_features, 2106#ifdef CONFIG_SFC_SRIOV 2107 .ndo_set_vf_mac = efx_sriov_set_vf_mac, 2108 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, 2109 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, 2110 .ndo_get_vf_config = efx_sriov_get_vf_config, 2111#endif 2112#ifdef CONFIG_NET_POLL_CONTROLLER 2113 .ndo_poll_controller = efx_netpoll, 2114#endif 2115 .ndo_setup_tc = efx_setup_tc, 2116#ifdef CONFIG_RFS_ACCEL 2117 .ndo_rx_flow_steer = efx_filter_rfs, 2118#endif 2119}; 2120 2121static const struct net_device_ops efx_ef10_netdev_ops = { 2122 .ndo_open = efx_net_open, 2123 .ndo_stop = efx_net_stop, 2124 .ndo_get_stats64 = efx_net_stats, 2125 .ndo_tx_timeout = efx_watchdog, 2126 .ndo_start_xmit = efx_hard_start_xmit, 2127 .ndo_validate_addr = eth_validate_addr, 2128 .ndo_do_ioctl = efx_ioctl, 2129 .ndo_change_mtu = efx_change_mtu, 2130 .ndo_set_mac_address = efx_set_mac_address, 2131 .ndo_set_rx_mode = efx_set_rx_mode, 2132 .ndo_set_features = efx_set_features, 2133#ifdef CONFIG_NET_POLL_CONTROLLER 2134 .ndo_poll_controller = efx_netpoll, 2135#endif 2136#ifdef CONFIG_RFS_ACCEL 2137 .ndo_rx_flow_steer = efx_filter_rfs, 2138#endif 2139}; 2140 2141static void efx_update_name(struct efx_nic *efx) 2142{ 2143 strcpy(efx->name, efx->net_dev->name); 2144 efx_mtd_rename(efx); 2145 efx_set_channel_names(efx); 2146} 2147 2148static int efx_netdev_event(struct notifier_block *this, 2149 unsigned long event, void *ptr) 2150{ 2151 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); 2152 2153 if ((net_dev->netdev_ops == &efx_farch_netdev_ops || 2154 net_dev->netdev_ops == &efx_ef10_netdev_ops) && 2155 event == NETDEV_CHANGENAME) 2156 efx_update_name(netdev_priv(net_dev)); 2157 2158 return NOTIFY_DONE; 2159} 2160 2161static struct notifier_block efx_netdev_notifier = { 2162 .notifier_call = efx_netdev_event, 2163}; 2164 2165static ssize_t 2166show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) 2167{ 2168 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2169 return sprintf(buf, "%d\n", efx->phy_type); 2170} 2171static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); 2172 2173static int efx_register_netdev(struct efx_nic *efx) 2174{ 2175 struct net_device *net_dev = efx->net_dev; 2176 struct efx_channel *channel; 2177 int rc; 2178 2179 net_dev->watchdog_timeo = 5 * HZ; 2180 net_dev->irq = efx->pci_dev->irq; 2181 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { 2182 net_dev->netdev_ops = &efx_ef10_netdev_ops; 2183 net_dev->priv_flags |= IFF_UNICAST_FLT; 2184 } else { 2185 net_dev->netdev_ops = &efx_farch_netdev_ops; 2186 } 2187 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2188 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; 2189 2190 rtnl_lock(); 2191 2192 /* Enable resets to be scheduled and check whether any were 2193 * already requested. If so, the NIC is probably hosed so we 2194 * abort. 2195 */ 2196 efx->state = STATE_READY; 2197 smp_mb(); /* ensure we change state before checking reset_pending */ 2198 if (efx->reset_pending) { 2199 netif_err(efx, probe, efx->net_dev, 2200 "aborting probe due to scheduled reset\n"); 2201 rc = -EIO; 2202 goto fail_locked; 2203 } 2204 2205 rc = dev_alloc_name(net_dev, net_dev->name); 2206 if (rc < 0) 2207 goto fail_locked; 2208 efx_update_name(efx); 2209 2210 /* Always start with carrier off; PHY events will detect the link */ 2211 netif_carrier_off(net_dev); 2212 2213 rc = register_netdevice(net_dev); 2214 if (rc) 2215 goto fail_locked; 2216 2217 efx_for_each_channel(channel, efx) { 2218 struct efx_tx_queue *tx_queue; 2219 efx_for_each_channel_tx_queue(tx_queue, channel) 2220 efx_init_tx_queue_core_txq(tx_queue); 2221 } 2222 2223 rtnl_unlock(); 2224 2225 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2226 if (rc) { 2227 netif_err(efx, drv, efx->net_dev, 2228 "failed to init net dev attributes\n"); 2229 goto fail_registered; 2230 } 2231 2232 return 0; 2233 2234fail_registered: 2235 rtnl_lock(); 2236 unregister_netdevice(net_dev); 2237fail_locked: 2238 efx->state = STATE_UNINIT; 2239 rtnl_unlock(); 2240 netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); 2241 return rc; 2242} 2243 2244static void efx_unregister_netdev(struct efx_nic *efx) 2245{ 2246 if (!efx->net_dev) 2247 return; 2248 2249 BUG_ON(netdev_priv(efx->net_dev) != efx); 2250 2251 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 2252 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); 2253 2254 rtnl_lock(); 2255 unregister_netdevice(efx->net_dev); 2256 efx->state = STATE_UNINIT; 2257 rtnl_unlock(); 2258} 2259 2260/************************************************************************** 2261 * 2262 * Device reset and suspend 2263 * 2264 **************************************************************************/ 2265 2266/* Tears down the entire software state and most of the hardware state 2267 * before reset. */ 2268void efx_reset_down(struct efx_nic *efx, enum reset_type method) 2269{ 2270 EFX_ASSERT_RESET_SERIALISED(efx); 2271 2272 efx_stop_all(efx); 2273 efx_disable_interrupts(efx); 2274 2275 mutex_lock(&efx->mac_lock); 2276 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 2277 efx->phy_op->fini(efx); 2278 efx->type->fini(efx); 2279} 2280 2281/* This function will always ensure that the locks acquired in 2282 * efx_reset_down() are released. A failure return code indicates 2283 * that we were unable to reinitialise the hardware, and the 2284 * driver should be disabled. If ok is false, then the rx and tx 2285 * engines are not restarted, pending a RESET_DISABLE. */ 2286int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) 2287{ 2288 int rc; 2289 2290 EFX_ASSERT_RESET_SERIALISED(efx); 2291 2292 rc = efx->type->init(efx); 2293 if (rc) { 2294 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 2295 goto fail; 2296 } 2297 2298 if (!ok) 2299 goto fail; 2300 2301 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { 2302 rc = efx->phy_op->init(efx); 2303 if (rc) 2304 goto fail; 2305 if (efx->phy_op->reconfigure(efx)) 2306 netif_err(efx, drv, efx->net_dev, 2307 "could not restore PHY settings\n"); 2308 } 2309 2310 rc = efx_enable_interrupts(efx); 2311 if (rc) 2312 goto fail; 2313 efx_restore_filters(efx); 2314 efx_sriov_reset(efx); 2315 2316 mutex_unlock(&efx->mac_lock); 2317 2318 efx_start_all(efx); 2319 2320 return 0; 2321 2322fail: 2323 efx->port_initialized = false; 2324 2325 mutex_unlock(&efx->mac_lock); 2326 2327 return rc; 2328} 2329 2330/* Reset the NIC using the specified method. Note that the reset may 2331 * fail, in which case the card will be left in an unusable state. 2332 * 2333 * Caller must hold the rtnl_lock. 2334 */ 2335int efx_reset(struct efx_nic *efx, enum reset_type method) 2336{ 2337 int rc, rc2; 2338 bool disabled; 2339 2340 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2341 RESET_TYPE(method)); 2342 2343 efx_device_detach_sync(efx); 2344 efx_reset_down(efx, method); 2345 2346 rc = efx->type->reset(efx, method); 2347 if (rc) { 2348 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); 2349 goto out; 2350 } 2351 2352 /* Clear flags for the scopes we covered. We assume the NIC and 2353 * driver are now quiescent so that there is no race here. 2354 */ 2355 efx->reset_pending &= -(1 << (method + 1)); 2356 2357 /* Reinitialise bus-mastering, which may have been turned off before 2358 * the reset was scheduled. This is still appropriate, even in the 2359 * RESET_TYPE_DISABLE since this driver generally assumes the hardware 2360 * can respond to requests. */ 2361 pci_set_master(efx->pci_dev); 2362 2363out: 2364 /* Leave device stopped if necessary */ 2365 disabled = rc || 2366 method == RESET_TYPE_DISABLE || 2367 method == RESET_TYPE_RECOVER_OR_DISABLE; 2368 rc2 = efx_reset_up(efx, method, !disabled); 2369 if (rc2) { 2370 disabled = true; 2371 if (!rc) 2372 rc = rc2; 2373 } 2374 2375 if (disabled) { 2376 dev_close(efx->net_dev); 2377 netif_err(efx, drv, efx->net_dev, "has been disabled\n"); 2378 efx->state = STATE_DISABLED; 2379 } else { 2380 netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); 2381 netif_device_attach(efx->net_dev); 2382 } 2383 return rc; 2384} 2385 2386/* Try recovery mechanisms. 2387 * For now only EEH is supported. 2388 * Returns 0 if the recovery mechanisms are unsuccessful. 2389 * Returns a non-zero value otherwise. 2390 */ 2391int efx_try_recovery(struct efx_nic *efx) 2392{ 2393#ifdef CONFIG_EEH 2394 /* A PCI error can occur and not be seen by EEH because nothing 2395 * happens on the PCI bus. In this case the driver may fail and 2396 * schedule a 'recover or reset', leading to this recovery handler. 2397 * Manually call the eeh failure check function. 2398 */ 2399 struct eeh_dev *eehdev = 2400 of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); 2401 2402 if (eeh_dev_check_failure(eehdev)) { 2403 /* The EEH mechanisms will handle the error and reset the 2404 * device if necessary. 2405 */ 2406 return 1; 2407 } 2408#endif 2409 return 0; 2410} 2411 2412static void efx_wait_for_bist_end(struct efx_nic *efx) 2413{ 2414 int i; 2415 2416 for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { 2417 if (efx_mcdi_poll_reboot(efx)) 2418 goto out; 2419 msleep(BIST_WAIT_DELAY_MS); 2420 } 2421 2422 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); 2423out: 2424 /* Either way unset the BIST flag. If we found no reboot we probably 2425 * won't recover, but we should try. 2426 */ 2427 efx->mc_bist_for_other_fn = false; 2428} 2429 2430/* The worker thread exists so that code that cannot sleep can 2431 * schedule a reset for later. 2432 */ 2433static void efx_reset_work(struct work_struct *data) 2434{ 2435 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 2436 unsigned long pending; 2437 enum reset_type method; 2438 2439 pending = ACCESS_ONCE(efx->reset_pending); 2440 method = fls(pending) - 1; 2441 2442 if (method == RESET_TYPE_MC_BIST) 2443 efx_wait_for_bist_end(efx); 2444 2445 if ((method == RESET_TYPE_RECOVER_OR_DISABLE || 2446 method == RESET_TYPE_RECOVER_OR_ALL) && 2447 efx_try_recovery(efx)) 2448 return; 2449 2450 if (!pending) 2451 return; 2452 2453 rtnl_lock(); 2454 2455 /* We checked the state in efx_schedule_reset() but it may 2456 * have changed by now. Now that we have the RTNL lock, 2457 * it cannot change again. 2458 */ 2459 if (efx->state == STATE_READY) 2460 (void)efx_reset(efx, method); 2461 2462 rtnl_unlock(); 2463} 2464 2465void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) 2466{ 2467 enum reset_type method; 2468 2469 if (efx->state == STATE_RECOVERY) { 2470 netif_dbg(efx, drv, efx->net_dev, 2471 "recovering: skip scheduling %s reset\n", 2472 RESET_TYPE(type)); 2473 return; 2474 } 2475 2476 switch (type) { 2477 case RESET_TYPE_INVISIBLE: 2478 case RESET_TYPE_ALL: 2479 case RESET_TYPE_RECOVER_OR_ALL: 2480 case RESET_TYPE_WORLD: 2481 case RESET_TYPE_DISABLE: 2482 case RESET_TYPE_RECOVER_OR_DISABLE: 2483 case RESET_TYPE_MC_BIST: 2484 method = type; 2485 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2486 RESET_TYPE(method)); 2487 break; 2488 default: 2489 method = efx->type->map_reset_reason(type); 2490 netif_dbg(efx, drv, efx->net_dev, 2491 "scheduling %s reset for %s\n", 2492 RESET_TYPE(method), RESET_TYPE(type)); 2493 break; 2494 } 2495 2496 set_bit(method, &efx->reset_pending); 2497 smp_mb(); /* ensure we change reset_pending before checking state */ 2498 2499 /* If we're not READY then just leave the flags set as the cue 2500 * to abort probing or reschedule the reset later. 2501 */ 2502 if (ACCESS_ONCE(efx->state) != STATE_READY) 2503 return; 2504 2505 /* efx_process_channel() will no longer read events once a 2506 * reset is scheduled. So switch back to poll'd MCDI completions. */ 2507 efx_mcdi_mode_poll(efx); 2508 2509 queue_work(reset_workqueue, &efx->reset_work); 2510} 2511 2512/************************************************************************** 2513 * 2514 * List of NICs we support 2515 * 2516 **************************************************************************/ 2517 2518/* PCI device ID table */ 2519static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { 2520 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 2521 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), 2522 .driver_data = (unsigned long) &falcon_a1_nic_type}, 2523 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 2524 PCI_DEVICE_ID_SOLARFLARE_SFC4000B), 2525 .driver_data = (unsigned long) &falcon_b0_nic_type}, 2526 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ 2527 .driver_data = (unsigned long) &siena_a0_nic_type}, 2528 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ 2529 .driver_data = (unsigned long) &siena_a0_nic_type}, 2530 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ 2531 .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, 2532 {0} /* end of list */ 2533}; 2534 2535/************************************************************************** 2536 * 2537 * Dummy PHY/MAC operations 2538 * 2539 * Can be used for some unimplemented operations 2540 * Needed so all function pointers are valid and do not have to be tested 2541 * before use 2542 * 2543 **************************************************************************/ 2544int efx_port_dummy_op_int(struct efx_nic *efx) 2545{ 2546 return 0; 2547} 2548void efx_port_dummy_op_void(struct efx_nic *efx) {} 2549 2550static bool efx_port_dummy_op_poll(struct efx_nic *efx) 2551{ 2552 return false; 2553} 2554 2555static const struct efx_phy_operations efx_dummy_phy_operations = { 2556 .init = efx_port_dummy_op_int, 2557 .reconfigure = efx_port_dummy_op_int, 2558 .poll = efx_port_dummy_op_poll, 2559 .fini = efx_port_dummy_op_void, 2560}; 2561 2562/************************************************************************** 2563 * 2564 * Data housekeeping 2565 * 2566 **************************************************************************/ 2567 2568/* This zeroes out and then fills in the invariants in a struct 2569 * efx_nic (including all sub-structures). 2570 */ 2571static int efx_init_struct(struct efx_nic *efx, 2572 struct pci_dev *pci_dev, struct net_device *net_dev) 2573{ 2574 int i; 2575 2576 /* Initialise common structures */ 2577 spin_lock_init(&efx->biu_lock); 2578#ifdef CONFIG_SFC_MTD 2579 INIT_LIST_HEAD(&efx->mtd_list); 2580#endif 2581 INIT_WORK(&efx->reset_work, efx_reset_work); 2582 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 2583 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); 2584 efx->pci_dev = pci_dev; 2585 efx->msg_enable = debug; 2586 efx->state = STATE_UNINIT; 2587 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 2588 2589 efx->net_dev = net_dev; 2590 efx->rx_prefix_size = efx->type->rx_prefix_size; 2591 efx->rx_ip_align = 2592 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; 2593 efx->rx_packet_hash_offset = 2594 efx->type->rx_hash_offset - efx->type->rx_prefix_size; 2595 spin_lock_init(&efx->stats_lock); 2596 mutex_init(&efx->mac_lock); 2597 efx->phy_op = &efx_dummy_phy_operations; 2598 efx->mdio.dev = net_dev; 2599 INIT_WORK(&efx->mac_work, efx_mac_work); 2600 init_waitqueue_head(&efx->flush_wq); 2601 2602 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2603 efx->channel[i] = efx_alloc_channel(efx, i, NULL); 2604 if (!efx->channel[i]) 2605 goto fail; 2606 efx->msi_context[i].efx = efx; 2607 efx->msi_context[i].index = i; 2608 } 2609 2610 /* Higher numbered interrupt modes are less capable! */ 2611 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 2612 interrupt_mode); 2613 2614 /* Would be good to use the net_dev name, but we're too early */ 2615 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", 2616 pci_name(pci_dev)); 2617 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 2618 if (!efx->workqueue) 2619 goto fail; 2620 2621 return 0; 2622 2623fail: 2624 efx_fini_struct(efx); 2625 return -ENOMEM; 2626} 2627 2628static void efx_fini_struct(struct efx_nic *efx) 2629{ 2630 int i; 2631 2632 for (i = 0; i < EFX_MAX_CHANNELS; i++) 2633 kfree(efx->channel[i]); 2634 2635 if (efx->workqueue) { 2636 destroy_workqueue(efx->workqueue); 2637 efx->workqueue = NULL; 2638 } 2639} 2640 2641/************************************************************************** 2642 * 2643 * PCI interface 2644 * 2645 **************************************************************************/ 2646 2647/* Main body of final NIC shutdown code 2648 * This is called only at module unload (or hotplug removal). 2649 */ 2650static void efx_pci_remove_main(struct efx_nic *efx) 2651{ 2652 /* Flush reset_work. It can no longer be scheduled since we 2653 * are not READY. 2654 */ 2655 BUG_ON(efx->state == STATE_READY); 2656 cancel_work_sync(&efx->reset_work); 2657 2658 efx_disable_interrupts(efx); 2659 efx_nic_fini_interrupt(efx); 2660 efx_fini_port(efx); 2661 efx->type->fini(efx); 2662 efx_fini_napi(efx); 2663 efx_remove_all(efx); 2664} 2665 2666/* Final NIC shutdown 2667 * This is called only at module unload (or hotplug removal). 2668 */ 2669static void efx_pci_remove(struct pci_dev *pci_dev) 2670{ 2671 struct efx_nic *efx; 2672 2673 efx = pci_get_drvdata(pci_dev); 2674 if (!efx) 2675 return; 2676 2677 /* Mark the NIC as fini, then stop the interface */ 2678 rtnl_lock(); 2679 dev_close(efx->net_dev); 2680 efx_disable_interrupts(efx); 2681 rtnl_unlock(); 2682 2683 efx_sriov_fini(efx); 2684 efx_unregister_netdev(efx); 2685 2686 efx_mtd_remove(efx); 2687 2688 efx_pci_remove_main(efx); 2689 2690 efx_fini_io(efx); 2691 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); 2692 2693 efx_fini_struct(efx); 2694 pci_set_drvdata(pci_dev, NULL); 2695 free_netdev(efx->net_dev); 2696 2697 pci_disable_pcie_error_reporting(pci_dev); 2698}; 2699 2700/* NIC VPD information 2701 * Called during probe to display the part number of the 2702 * installed NIC. VPD is potentially very large but this should 2703 * always appear within the first 512 bytes. 2704 */ 2705#define SFC_VPD_LEN 512 2706static void efx_print_product_vpd(struct efx_nic *efx) 2707{ 2708 struct pci_dev *dev = efx->pci_dev; 2709 char vpd_data[SFC_VPD_LEN]; 2710 ssize_t vpd_size; 2711 int i, j; 2712 2713 /* Get the vpd data from the device */ 2714 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); 2715 if (vpd_size <= 0) { 2716 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); 2717 return; 2718 } 2719 2720 /* Get the Read only section */ 2721 i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); 2722 if (i < 0) { 2723 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); 2724 return; 2725 } 2726 2727 j = pci_vpd_lrdt_size(&vpd_data[i]); 2728 i += PCI_VPD_LRDT_TAG_SIZE; 2729 if (i + j > vpd_size) 2730 j = vpd_size - i; 2731 2732 /* Get the Part number */ 2733 i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); 2734 if (i < 0) { 2735 netif_err(efx, drv, efx->net_dev, "Part number not found\n"); 2736 return; 2737 } 2738 2739 j = pci_vpd_info_field_size(&vpd_data[i]); 2740 i += PCI_VPD_INFO_FLD_HDR_SIZE; 2741 if (i + j > vpd_size) { 2742 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); 2743 return; 2744 } 2745 2746 netif_info(efx, drv, efx->net_dev, 2747 "Part Number : %.*s\n", j, &vpd_data[i]); 2748} 2749 2750 2751/* Main body of NIC initialisation 2752 * This is called at module load (or hotplug insertion, theoretically). 2753 */ 2754static int efx_pci_probe_main(struct efx_nic *efx) 2755{ 2756 int rc; 2757 2758 /* Do start-of-day initialisation */ 2759 rc = efx_probe_all(efx); 2760 if (rc) 2761 goto fail1; 2762 2763 efx_init_napi(efx); 2764 2765 rc = efx->type->init(efx); 2766 if (rc) { 2767 netif_err(efx, probe, efx->net_dev, 2768 "failed to initialise NIC\n"); 2769 goto fail3; 2770 } 2771 2772 rc = efx_init_port(efx); 2773 if (rc) { 2774 netif_err(efx, probe, efx->net_dev, 2775 "failed to initialise port\n"); 2776 goto fail4; 2777 } 2778 2779 rc = efx_nic_init_interrupt(efx); 2780 if (rc) 2781 goto fail5; 2782 rc = efx_enable_interrupts(efx); 2783 if (rc) 2784 goto fail6; 2785 2786 return 0; 2787 2788 fail6: 2789 efx_nic_fini_interrupt(efx); 2790 fail5: 2791 efx_fini_port(efx); 2792 fail4: 2793 efx->type->fini(efx); 2794 fail3: 2795 efx_fini_napi(efx); 2796 efx_remove_all(efx); 2797 fail1: 2798 return rc; 2799} 2800 2801/* NIC initialisation 2802 * 2803 * This is called at module load (or hotplug insertion, 2804 * theoretically). It sets up PCI mappings, resets the NIC, 2805 * sets up and registers the network devices with the kernel and hooks 2806 * the interrupt service routine. It does not prepare the device for 2807 * transmission; this is left to the first time one of the network 2808 * interfaces is brought up (i.e. efx_net_open). 2809 */ 2810static int efx_pci_probe(struct pci_dev *pci_dev, 2811 const struct pci_device_id *entry) 2812{ 2813 struct net_device *net_dev; 2814 struct efx_nic *efx; 2815 int rc; 2816 2817 /* Allocate and initialise a struct net_device and struct efx_nic */ 2818 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, 2819 EFX_MAX_RX_QUEUES); 2820 if (!net_dev) 2821 return -ENOMEM; 2822 efx = netdev_priv(net_dev); 2823 efx->type = (const struct efx_nic_type *) entry->driver_data; 2824 net_dev->features |= (efx->type->offload_features | NETIF_F_SG | 2825 NETIF_F_HIGHDMA | NETIF_F_TSO | 2826 NETIF_F_RXCSUM); 2827 if (efx->type->offload_features & NETIF_F_V6_CSUM) 2828 net_dev->features |= NETIF_F_TSO6; 2829 /* Mask for features that also apply to VLAN devices */ 2830 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2831 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | 2832 NETIF_F_RXCSUM); 2833 /* All offloads can be toggled */ 2834 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; 2835 pci_set_drvdata(pci_dev, efx); 2836 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 2837 rc = efx_init_struct(efx, pci_dev, net_dev); 2838 if (rc) 2839 goto fail1; 2840 2841 netif_info(efx, probe, efx->net_dev, 2842 "Solarflare NIC detected\n"); 2843 2844 efx_print_product_vpd(efx); 2845 2846 /* Set up basic I/O (BAR mappings etc) */ 2847 rc = efx_init_io(efx); 2848 if (rc) 2849 goto fail2; 2850 2851 rc = efx_pci_probe_main(efx); 2852 if (rc) 2853 goto fail3; 2854 2855 rc = efx_register_netdev(efx); 2856 if (rc) 2857 goto fail4; 2858 2859 rc = efx_sriov_init(efx); 2860 if (rc) 2861 netif_err(efx, probe, efx->net_dev, 2862 "SR-IOV can't be enabled rc %d\n", rc); 2863 2864 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); 2865 2866 /* Try to create MTDs, but allow this to fail */ 2867 rtnl_lock(); 2868 rc = efx_mtd_probe(efx); 2869 rtnl_unlock(); 2870 if (rc) 2871 netif_warn(efx, probe, efx->net_dev, 2872 "failed to create MTDs (%d)\n", rc); 2873 2874 rc = pci_enable_pcie_error_reporting(pci_dev); 2875 if (rc && rc != -EINVAL) 2876 netif_warn(efx, probe, efx->net_dev, 2877 "pci_enable_pcie_error_reporting failed (%d)\n", rc); 2878 2879 return 0; 2880 2881 fail4: 2882 efx_pci_remove_main(efx); 2883 fail3: 2884 efx_fini_io(efx); 2885 fail2: 2886 efx_fini_struct(efx); 2887 fail1: 2888 pci_set_drvdata(pci_dev, NULL); 2889 WARN_ON(rc > 0); 2890 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 2891 free_netdev(net_dev); 2892 return rc; 2893} 2894 2895static int efx_pm_freeze(struct device *dev) 2896{ 2897 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2898 2899 rtnl_lock(); 2900 2901 if (efx->state != STATE_DISABLED) { 2902 efx->state = STATE_UNINIT; 2903 2904 efx_device_detach_sync(efx); 2905 2906 efx_stop_all(efx); 2907 efx_disable_interrupts(efx); 2908 } 2909 2910 rtnl_unlock(); 2911 2912 return 0; 2913} 2914 2915static int efx_pm_thaw(struct device *dev) 2916{ 2917 int rc; 2918 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2919 2920 rtnl_lock(); 2921 2922 if (efx->state != STATE_DISABLED) { 2923 rc = efx_enable_interrupts(efx); 2924 if (rc) 2925 goto fail; 2926 2927 mutex_lock(&efx->mac_lock); 2928 efx->phy_op->reconfigure(efx); 2929 mutex_unlock(&efx->mac_lock); 2930 2931 efx_start_all(efx); 2932 2933 netif_device_attach(efx->net_dev); 2934 2935 efx->state = STATE_READY; 2936 2937 efx->type->resume_wol(efx); 2938 } 2939 2940 rtnl_unlock(); 2941 2942 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ 2943 queue_work(reset_workqueue, &efx->reset_work); 2944 2945 return 0; 2946 2947fail: 2948 rtnl_unlock(); 2949 2950 return rc; 2951} 2952 2953static int efx_pm_poweroff(struct device *dev) 2954{ 2955 struct pci_dev *pci_dev = to_pci_dev(dev); 2956 struct efx_nic *efx = pci_get_drvdata(pci_dev); 2957 2958 efx->type->fini(efx); 2959 2960 efx->reset_pending = 0; 2961 2962 pci_save_state(pci_dev); 2963 return pci_set_power_state(pci_dev, PCI_D3hot); 2964} 2965 2966/* Used for both resume and restore */ 2967static int efx_pm_resume(struct device *dev) 2968{ 2969 struct pci_dev *pci_dev = to_pci_dev(dev); 2970 struct efx_nic *efx = pci_get_drvdata(pci_dev); 2971 int rc; 2972 2973 rc = pci_set_power_state(pci_dev, PCI_D0); 2974 if (rc) 2975 return rc; 2976 pci_restore_state(pci_dev); 2977 rc = pci_enable_device(pci_dev); 2978 if (rc) 2979 return rc; 2980 pci_set_master(efx->pci_dev); 2981 rc = efx->type->reset(efx, RESET_TYPE_ALL); 2982 if (rc) 2983 return rc; 2984 rc = efx->type->init(efx); 2985 if (rc) 2986 return rc; 2987 rc = efx_pm_thaw(dev); 2988 return rc; 2989} 2990 2991static int efx_pm_suspend(struct device *dev) 2992{ 2993 int rc; 2994 2995 efx_pm_freeze(dev); 2996 rc = efx_pm_poweroff(dev); 2997 if (rc) 2998 efx_pm_resume(dev); 2999 return rc; 3000} 3001 3002static const struct dev_pm_ops efx_pm_ops = { 3003 .suspend = efx_pm_suspend, 3004 .resume = efx_pm_resume, 3005 .freeze = efx_pm_freeze, 3006 .thaw = efx_pm_thaw, 3007 .poweroff = efx_pm_poweroff, 3008 .restore = efx_pm_resume, 3009}; 3010 3011/* A PCI error affecting this device was detected. 3012 * At this point MMIO and DMA may be disabled. 3013 * Stop the software path and request a slot reset. 3014 */ 3015static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, 3016 enum pci_channel_state state) 3017{ 3018 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 3019 struct efx_nic *efx = pci_get_drvdata(pdev); 3020 3021 if (state == pci_channel_io_perm_failure) 3022 return PCI_ERS_RESULT_DISCONNECT; 3023 3024 rtnl_lock(); 3025 3026 if (efx->state != STATE_DISABLED) { 3027 efx->state = STATE_RECOVERY; 3028 efx->reset_pending = 0; 3029 3030 efx_device_detach_sync(efx); 3031 3032 efx_stop_all(efx); 3033 efx_disable_interrupts(efx); 3034 3035 status = PCI_ERS_RESULT_NEED_RESET; 3036 } else { 3037 /* If the interface is disabled we don't want to do anything 3038 * with it. 3039 */ 3040 status = PCI_ERS_RESULT_RECOVERED; 3041 } 3042 3043 rtnl_unlock(); 3044 3045 pci_disable_device(pdev); 3046 3047 return status; 3048} 3049 3050/* Fake a successfull reset, which will be performed later in efx_io_resume. */ 3051static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) 3052{ 3053 struct efx_nic *efx = pci_get_drvdata(pdev); 3054 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; 3055 int rc; 3056 3057 if (pci_enable_device(pdev)) { 3058 netif_err(efx, hw, efx->net_dev, 3059 "Cannot re-enable PCI device after reset.\n"); 3060 status = PCI_ERS_RESULT_DISCONNECT; 3061 } 3062 3063 rc = pci_cleanup_aer_uncorrect_error_status(pdev); 3064 if (rc) { 3065 netif_err(efx, hw, efx->net_dev, 3066 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); 3067 /* Non-fatal error. Continue. */ 3068 } 3069 3070 return status; 3071} 3072 3073/* Perform the actual reset and resume I/O operations. */ 3074static void efx_io_resume(struct pci_dev *pdev) 3075{ 3076 struct efx_nic *efx = pci_get_drvdata(pdev); 3077 int rc; 3078 3079 rtnl_lock(); 3080 3081 if (efx->state == STATE_DISABLED) 3082 goto out; 3083 3084 rc = efx_reset(efx, RESET_TYPE_ALL); 3085 if (rc) { 3086 netif_err(efx, hw, efx->net_dev, 3087 "efx_reset failed after PCI error (%d)\n", rc); 3088 } else { 3089 efx->state = STATE_READY; 3090 netif_dbg(efx, hw, efx->net_dev, 3091 "Done resetting and resuming IO after PCI error.\n"); 3092 } 3093 3094out: 3095 rtnl_unlock(); 3096} 3097 3098/* For simplicity and reliability, we always require a slot reset and try to 3099 * reset the hardware when a pci error affecting the device is detected. 3100 * We leave both the link_reset and mmio_enabled callback unimplemented: 3101 * with our request for slot reset the mmio_enabled callback will never be 3102 * called, and the link_reset callback is not used by AER or EEH mechanisms. 3103 */ 3104static struct pci_error_handlers efx_err_handlers = { 3105 .error_detected = efx_io_error_detected, 3106 .slot_reset = efx_io_slot_reset, 3107 .resume = efx_io_resume, 3108}; 3109 3110static struct pci_driver efx_pci_driver = { 3111 .name = KBUILD_MODNAME, 3112 .id_table = efx_pci_table, 3113 .probe = efx_pci_probe, 3114 .remove = efx_pci_remove, 3115 .driver.pm = &efx_pm_ops, 3116 .err_handler = &efx_err_handlers, 3117}; 3118 3119/************************************************************************** 3120 * 3121 * Kernel module interface 3122 * 3123 *************************************************************************/ 3124 3125module_param(interrupt_mode, uint, 0444); 3126MODULE_PARM_DESC(interrupt_mode, 3127 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); 3128 3129static int __init efx_init_module(void) 3130{ 3131 int rc; 3132 3133 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); 3134 3135 rc = register_netdevice_notifier(&efx_netdev_notifier); 3136 if (rc) 3137 goto err_notifier; 3138 3139 rc = efx_init_sriov(); 3140 if (rc) 3141 goto err_sriov; 3142 3143 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 3144 if (!reset_workqueue) { 3145 rc = -ENOMEM; 3146 goto err_reset; 3147 } 3148 3149 rc = pci_register_driver(&efx_pci_driver); 3150 if (rc < 0) 3151 goto err_pci; 3152 3153 return 0; 3154 3155 err_pci: 3156 destroy_workqueue(reset_workqueue); 3157 err_reset: 3158 efx_fini_sriov(); 3159 err_sriov: 3160 unregister_netdevice_notifier(&efx_netdev_notifier); 3161 err_notifier: 3162 return rc; 3163} 3164 3165static void __exit efx_exit_module(void) 3166{ 3167 printk(KERN_INFO "Solarflare NET driver unloading\n"); 3168 3169 pci_unregister_driver(&efx_pci_driver); 3170 destroy_workqueue(reset_workqueue); 3171 efx_fini_sriov(); 3172 unregister_netdevice_notifier(&efx_netdev_notifier); 3173 3174} 3175 3176module_init(efx_init_module); 3177module_exit(efx_exit_module); 3178 3179MODULE_AUTHOR("Solarflare Communications and " 3180 "Michael Brown <mbrown@fensystems.co.uk>"); 3181MODULE_DESCRIPTION("Solarflare Communications network driver"); 3182MODULE_LICENSE("GPL"); 3183MODULE_DEVICE_TABLE(pci, efx_pci_table); 3184