net_driver.h revision f9c762500ae77ab8940094be1325c8a2a1c8e5f5
1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2005-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11/* Common definitions for all Efx net driver code */ 12 13#ifndef EFX_NET_DRIVER_H 14#define EFX_NET_DRIVER_H 15 16#include <linux/netdevice.h> 17#include <linux/etherdevice.h> 18#include <linux/ethtool.h> 19#include <linux/if_vlan.h> 20#include <linux/timer.h> 21#include <linux/mdio.h> 22#include <linux/list.h> 23#include <linux/pci.h> 24#include <linux/device.h> 25#include <linux/highmem.h> 26#include <linux/workqueue.h> 27#include <linux/vmalloc.h> 28#include <linux/i2c.h> 29 30#include "enum.h" 31#include "bitfield.h" 32 33/************************************************************************** 34 * 35 * Build definitions 36 * 37 **************************************************************************/ 38 39#define EFX_DRIVER_VERSION "3.1" 40 41#ifdef DEBUG 42#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 43#define EFX_WARN_ON_PARANOID(x) WARN_ON(x) 44#else 45#define EFX_BUG_ON_PARANOID(x) do {} while (0) 46#define EFX_WARN_ON_PARANOID(x) do {} while (0) 47#endif 48 49/************************************************************************** 50 * 51 * Efx data structures 52 * 53 **************************************************************************/ 54 55#define EFX_MAX_CHANNELS 32 56#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 57 58/* Checksum generation is a per-queue option in hardware, so each 59 * queue visible to the networking core is backed by two hardware TX 60 * queues. */ 61#define EFX_MAX_TX_TC 2 62#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) 63#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ 64#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ 65#define EFX_TXQ_TYPES 4 66#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) 67 68/** 69 * struct efx_special_buffer - An Efx special buffer 70 * @addr: CPU base address of the buffer 71 * @dma_addr: DMA base address of the buffer 72 * @len: Buffer length, in bytes 73 * @index: Buffer index within controller;s buffer table 74 * @entries: Number of buffer table entries 75 * 76 * Special buffers are used for the event queues and the TX and RX 77 * descriptor queues for each channel. They are *not* used for the 78 * actual transmit and receive buffers. 79 */ 80struct efx_special_buffer { 81 void *addr; 82 dma_addr_t dma_addr; 83 unsigned int len; 84 int index; 85 int entries; 86}; 87 88enum efx_flush_state { 89 FLUSH_NONE, 90 FLUSH_PENDING, 91 FLUSH_FAILED, 92 FLUSH_DONE, 93}; 94 95/** 96 * struct efx_tx_buffer - An Efx TX buffer 97 * @skb: The associated socket buffer. 98 * Set only on the final fragment of a packet; %NULL for all other 99 * fragments. When this fragment completes, then we can free this 100 * skb. 101 * @tsoh: The associated TSO header structure, or %NULL if this 102 * buffer is not a TSO header. 103 * @dma_addr: DMA address of the fragment. 104 * @len: Length of this fragment. 105 * This field is zero when the queue slot is empty. 106 * @continuation: True if this fragment is not the end of a packet. 107 * @unmap_single: True if pci_unmap_single should be used. 108 * @unmap_len: Length of this fragment to unmap 109 */ 110struct efx_tx_buffer { 111 const struct sk_buff *skb; 112 struct efx_tso_header *tsoh; 113 dma_addr_t dma_addr; 114 unsigned short len; 115 bool continuation; 116 bool unmap_single; 117 unsigned short unmap_len; 118}; 119 120/** 121 * struct efx_tx_queue - An Efx TX queue 122 * 123 * This is a ring buffer of TX fragments. 124 * Since the TX completion path always executes on the same 125 * CPU and the xmit path can operate on different CPUs, 126 * performance is increased by ensuring that the completion 127 * path and the xmit path operate on different cache lines. 128 * This is particularly important if the xmit path is always 129 * executing on one CPU which is different from the completion 130 * path. There is also a cache line for members which are 131 * read but not written on the fast path. 132 * 133 * @efx: The associated Efx NIC 134 * @queue: DMA queue number 135 * @channel: The associated channel 136 * @core_txq: The networking core TX queue structure 137 * @buffer: The software buffer ring 138 * @txd: The hardware descriptor ring 139 * @ptr_mask: The size of the ring minus 1. 140 * @initialised: Has hardware queue been initialised? 141 * @flushed: Used when handling queue flushing 142 * @read_count: Current read pointer. 143 * This is the number of buffers that have been removed from both rings. 144 * @old_write_count: The value of @write_count when last checked. 145 * This is here for performance reasons. The xmit path will 146 * only get the up-to-date value of @write_count if this 147 * variable indicates that the queue is empty. This is to 148 * avoid cache-line ping-pong between the xmit path and the 149 * completion path. 150 * @insert_count: Current insert pointer 151 * This is the number of buffers that have been added to the 152 * software ring. 153 * @write_count: Current write pointer 154 * This is the number of buffers that have been added to the 155 * hardware ring. 156 * @old_read_count: The value of read_count when last checked. 157 * This is here for performance reasons. The xmit path will 158 * only get the up-to-date value of read_count if this 159 * variable indicates that the queue is full. This is to 160 * avoid cache-line ping-pong between the xmit path and the 161 * completion path. 162 * @tso_headers_free: A list of TSO headers allocated for this TX queue 163 * that are not in use, and so available for new TSO sends. The list 164 * is protected by the TX queue lock. 165 * @tso_bursts: Number of times TSO xmit invoked by kernel 166 * @tso_long_headers: Number of packets with headers too long for standard 167 * blocks 168 * @tso_packets: Number of packets via the TSO xmit path 169 * @pushes: Number of times the TX push feature has been used 170 * @empty_read_count: If the completion path has seen the queue as empty 171 * and the transmission path has not yet checked this, the value of 172 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. 173 */ 174struct efx_tx_queue { 175 /* Members which don't change on the fast path */ 176 struct efx_nic *efx ____cacheline_aligned_in_smp; 177 unsigned queue; 178 struct efx_channel *channel; 179 struct netdev_queue *core_txq; 180 struct efx_tx_buffer *buffer; 181 struct efx_special_buffer txd; 182 unsigned int ptr_mask; 183 bool initialised; 184 enum efx_flush_state flushed; 185 186 /* Members used mainly on the completion path */ 187 unsigned int read_count ____cacheline_aligned_in_smp; 188 unsigned int old_write_count; 189 190 /* Members used only on the xmit path */ 191 unsigned int insert_count ____cacheline_aligned_in_smp; 192 unsigned int write_count; 193 unsigned int old_read_count; 194 struct efx_tso_header *tso_headers_free; 195 unsigned int tso_bursts; 196 unsigned int tso_long_headers; 197 unsigned int tso_packets; 198 unsigned int pushes; 199 200 /* Members shared between paths and sometimes updated */ 201 unsigned int empty_read_count ____cacheline_aligned_in_smp; 202#define EFX_EMPTY_COUNT_VALID 0x80000000 203}; 204 205/** 206 * struct efx_rx_buffer - An Efx RX data buffer 207 * @dma_addr: DMA base address of the buffer 208 * @skb: The associated socket buffer, if any. 209 * If both this and page are %NULL, the buffer slot is currently free. 210 * @page: The associated page buffer, if any. 211 * If both this and skb are %NULL, the buffer slot is currently free. 212 * @len: Buffer length, in bytes. 213 * @is_page: Indicates if @page is valid. If false, @skb is valid. 214 */ 215struct efx_rx_buffer { 216 dma_addr_t dma_addr; 217 union { 218 struct sk_buff *skb; 219 struct page *page; 220 } u; 221 unsigned int len; 222 bool is_page; 223}; 224 225/** 226 * struct efx_rx_page_state - Page-based rx buffer state 227 * 228 * Inserted at the start of every page allocated for receive buffers. 229 * Used to facilitate sharing dma mappings between recycled rx buffers 230 * and those passed up to the kernel. 231 * 232 * @refcnt: Number of struct efx_rx_buffer's referencing this page. 233 * When refcnt falls to zero, the page is unmapped for dma 234 * @dma_addr: The dma address of this page. 235 */ 236struct efx_rx_page_state { 237 unsigned refcnt; 238 dma_addr_t dma_addr; 239 240 unsigned int __pad[0] ____cacheline_aligned; 241}; 242 243/** 244 * struct efx_rx_queue - An Efx RX queue 245 * @efx: The associated Efx NIC 246 * @buffer: The software buffer ring 247 * @rxd: The hardware descriptor ring 248 * @ptr_mask: The size of the ring minus 1. 249 * @added_count: Number of buffers added to the receive queue. 250 * @notified_count: Number of buffers given to NIC (<= @added_count). 251 * @removed_count: Number of buffers removed from the receive queue. 252 * @max_fill: RX descriptor maximum fill level (<= ring size) 253 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 254 * (<= @max_fill) 255 * @fast_fill_limit: The level to which a fast fill will fill 256 * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill) 257 * @min_fill: RX descriptor minimum non-zero fill level. 258 * This records the minimum fill level observed when a ring 259 * refill was triggered. 260 * @alloc_page_count: RX allocation strategy counter. 261 * @alloc_skb_count: RX allocation strategy counter. 262 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 263 * @flushed: Use when handling queue flushing 264 */ 265struct efx_rx_queue { 266 struct efx_nic *efx; 267 struct efx_rx_buffer *buffer; 268 struct efx_special_buffer rxd; 269 unsigned int ptr_mask; 270 271 int added_count; 272 int notified_count; 273 int removed_count; 274 unsigned int max_fill; 275 unsigned int fast_fill_trigger; 276 unsigned int fast_fill_limit; 277 unsigned int min_fill; 278 unsigned int min_overfill; 279 unsigned int alloc_page_count; 280 unsigned int alloc_skb_count; 281 struct timer_list slow_fill; 282 unsigned int slow_fill_count; 283 284 enum efx_flush_state flushed; 285}; 286 287/** 288 * struct efx_buffer - An Efx general-purpose buffer 289 * @addr: host base address of the buffer 290 * @dma_addr: DMA base address of the buffer 291 * @len: Buffer length, in bytes 292 * 293 * The NIC uses these buffers for its interrupt status registers and 294 * MAC stats dumps. 295 */ 296struct efx_buffer { 297 void *addr; 298 dma_addr_t dma_addr; 299 unsigned int len; 300}; 301 302 303enum efx_rx_alloc_method { 304 RX_ALLOC_METHOD_AUTO = 0, 305 RX_ALLOC_METHOD_SKB = 1, 306 RX_ALLOC_METHOD_PAGE = 2, 307}; 308 309/** 310 * struct efx_channel - An Efx channel 311 * 312 * A channel comprises an event queue, at least one TX queue, at least 313 * one RX queue, and an associated tasklet for processing the event 314 * queue. 315 * 316 * @efx: Associated Efx NIC 317 * @channel: Channel instance number 318 * @enabled: Channel enabled indicator 319 * @irq: IRQ number (MSI and MSI-X only) 320 * @irq_moderation: IRQ moderation value (in hardware ticks) 321 * @napi_dev: Net device used with NAPI 322 * @napi_str: NAPI control structure 323 * @work_pending: Is work pending via NAPI? 324 * @eventq: Event queue buffer 325 * @eventq_mask: Event queue pointer mask 326 * @eventq_read_ptr: Event queue read pointer 327 * @last_eventq_read_ptr: Last event queue read pointer value. 328 * @last_irq_cpu: Last CPU to handle interrupt for this channel 329 * @irq_count: Number of IRQs since last adaptive moderation decision 330 * @irq_mod_score: IRQ moderation score 331 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 332 * and diagnostic counters 333 * @rx_alloc_push_pages: RX allocation method currently in use for pushing 334 * descriptors 335 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 336 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 337 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 338 * @n_rx_mcast_mismatch: Count of unmatched multicast frames 339 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 340 * @n_rx_overlength: Count of RX_OVERLENGTH errors 341 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 342 * @rx_queue: RX queue for this channel 343 * @tx_queue: TX queues for this channel 344 */ 345struct efx_channel { 346 struct efx_nic *efx; 347 int channel; 348 bool enabled; 349 int irq; 350 unsigned int irq_moderation; 351 struct net_device *napi_dev; 352 struct napi_struct napi_str; 353 bool work_pending; 354 struct efx_special_buffer eventq; 355 unsigned int eventq_mask; 356 unsigned int eventq_read_ptr; 357 unsigned int last_eventq_read_ptr; 358 359 int last_irq_cpu; 360 unsigned int irq_count; 361 unsigned int irq_mod_score; 362#ifdef CONFIG_RFS_ACCEL 363 unsigned int rfs_filters_added; 364#endif 365 366 int rx_alloc_level; 367 int rx_alloc_push_pages; 368 369 unsigned n_rx_tobe_disc; 370 unsigned n_rx_ip_hdr_chksum_err; 371 unsigned n_rx_tcp_udp_chksum_err; 372 unsigned n_rx_mcast_mismatch; 373 unsigned n_rx_frm_trunc; 374 unsigned n_rx_overlength; 375 unsigned n_skbuff_leaks; 376 377 /* Used to pipeline received packets in order to optimise memory 378 * access with prefetches. 379 */ 380 struct efx_rx_buffer *rx_pkt; 381 bool rx_pkt_csummed; 382 383 struct efx_rx_queue rx_queue; 384 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; 385}; 386 387enum efx_led_mode { 388 EFX_LED_OFF = 0, 389 EFX_LED_ON = 1, 390 EFX_LED_DEFAULT = 2 391}; 392 393#define STRING_TABLE_LOOKUP(val, member) \ 394 ((val) < member ## _max) ? member ## _names[val] : "(invalid)" 395 396extern const char *const efx_loopback_mode_names[]; 397extern const unsigned int efx_loopback_mode_max; 398#define LOOPBACK_MODE(efx) \ 399 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) 400 401extern const char *const efx_reset_type_names[]; 402extern const unsigned int efx_reset_type_max; 403#define RESET_TYPE(type) \ 404 STRING_TABLE_LOOKUP(type, efx_reset_type) 405 406enum efx_int_mode { 407 /* Be careful if altering to correct macro below */ 408 EFX_INT_MODE_MSIX = 0, 409 EFX_INT_MODE_MSI = 1, 410 EFX_INT_MODE_LEGACY = 2, 411 EFX_INT_MODE_MAX /* Insert any new items before this */ 412}; 413#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 414 415enum nic_state { 416 STATE_INIT = 0, 417 STATE_RUNNING = 1, 418 STATE_FINI = 2, 419 STATE_DISABLED = 3, 420 STATE_MAX, 421}; 422 423/* 424 * Alignment of page-allocated RX buffers 425 * 426 * Controls the number of bytes inserted at the start of an RX buffer. 427 * This is the equivalent of NET_IP_ALIGN [which controls the alignment 428 * of the skb->head for hardware DMA]. 429 */ 430#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 431#define EFX_PAGE_IP_ALIGN 0 432#else 433#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN 434#endif 435 436/* 437 * Alignment of the skb->head which wraps a page-allocated RX buffer 438 * 439 * The skb allocated to wrap an rx_buffer can have this alignment. Since 440 * the data is memcpy'd from the rx_buf, it does not need to be equal to 441 * EFX_PAGE_IP_ALIGN. 442 */ 443#define EFX_PAGE_SKB_ALIGN 2 444 445/* Forward declaration */ 446struct efx_nic; 447 448/* Pseudo bit-mask flow control field */ 449#define EFX_FC_RX FLOW_CTRL_RX 450#define EFX_FC_TX FLOW_CTRL_TX 451#define EFX_FC_AUTO 4 452 453/** 454 * struct efx_link_state - Current state of the link 455 * @up: Link is up 456 * @fd: Link is full-duplex 457 * @fc: Actual flow control flags 458 * @speed: Link speed (Mbps) 459 */ 460struct efx_link_state { 461 bool up; 462 bool fd; 463 u8 fc; 464 unsigned int speed; 465}; 466 467static inline bool efx_link_state_equal(const struct efx_link_state *left, 468 const struct efx_link_state *right) 469{ 470 return left->up == right->up && left->fd == right->fd && 471 left->fc == right->fc && left->speed == right->speed; 472} 473 474/** 475 * struct efx_phy_operations - Efx PHY operations table 476 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, 477 * efx->loopback_modes. 478 * @init: Initialise PHY 479 * @fini: Shut down PHY 480 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 481 * @poll: Update @link_state and report whether it changed. 482 * Serialised by the mac_lock. 483 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 484 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 485 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 486 * (only needed where AN bit is set in mmds) 487 * @test_alive: Test that PHY is 'alive' (online) 488 * @test_name: Get the name of a PHY-specific test/result 489 * @run_tests: Run tests and record results as appropriate (offline). 490 * Flags are the ethtool tests flags. 491 */ 492struct efx_phy_operations { 493 int (*probe) (struct efx_nic *efx); 494 int (*init) (struct efx_nic *efx); 495 void (*fini) (struct efx_nic *efx); 496 void (*remove) (struct efx_nic *efx); 497 int (*reconfigure) (struct efx_nic *efx); 498 bool (*poll) (struct efx_nic *efx); 499 void (*get_settings) (struct efx_nic *efx, 500 struct ethtool_cmd *ecmd); 501 int (*set_settings) (struct efx_nic *efx, 502 struct ethtool_cmd *ecmd); 503 void (*set_npage_adv) (struct efx_nic *efx, u32); 504 int (*test_alive) (struct efx_nic *efx); 505 const char *(*test_name) (struct efx_nic *efx, unsigned int index); 506 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); 507}; 508 509/** 510 * @enum efx_phy_mode - PHY operating mode flags 511 * @PHY_MODE_NORMAL: on and should pass traffic 512 * @PHY_MODE_TX_DISABLED: on with TX disabled 513 * @PHY_MODE_LOW_POWER: set to low power through MDIO 514 * @PHY_MODE_OFF: switched off through external control 515 * @PHY_MODE_SPECIAL: on but will not pass traffic 516 */ 517enum efx_phy_mode { 518 PHY_MODE_NORMAL = 0, 519 PHY_MODE_TX_DISABLED = 1, 520 PHY_MODE_LOW_POWER = 2, 521 PHY_MODE_OFF = 4, 522 PHY_MODE_SPECIAL = 8, 523}; 524 525static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) 526{ 527 return !!(mode & ~PHY_MODE_TX_DISABLED); 528} 529 530/* 531 * Efx extended statistics 532 * 533 * Not all statistics are provided by all supported MACs. The purpose 534 * is this structure is to contain the raw statistics provided by each 535 * MAC. 536 */ 537struct efx_mac_stats { 538 u64 tx_bytes; 539 u64 tx_good_bytes; 540 u64 tx_bad_bytes; 541 u64 tx_packets; 542 u64 tx_bad; 543 u64 tx_pause; 544 u64 tx_control; 545 u64 tx_unicast; 546 u64 tx_multicast; 547 u64 tx_broadcast; 548 u64 tx_lt64; 549 u64 tx_64; 550 u64 tx_65_to_127; 551 u64 tx_128_to_255; 552 u64 tx_256_to_511; 553 u64 tx_512_to_1023; 554 u64 tx_1024_to_15xx; 555 u64 tx_15xx_to_jumbo; 556 u64 tx_gtjumbo; 557 u64 tx_collision; 558 u64 tx_single_collision; 559 u64 tx_multiple_collision; 560 u64 tx_excessive_collision; 561 u64 tx_deferred; 562 u64 tx_late_collision; 563 u64 tx_excessive_deferred; 564 u64 tx_non_tcpudp; 565 u64 tx_mac_src_error; 566 u64 tx_ip_src_error; 567 u64 rx_bytes; 568 u64 rx_good_bytes; 569 u64 rx_bad_bytes; 570 u64 rx_packets; 571 u64 rx_good; 572 u64 rx_bad; 573 u64 rx_pause; 574 u64 rx_control; 575 u64 rx_unicast; 576 u64 rx_multicast; 577 u64 rx_broadcast; 578 u64 rx_lt64; 579 u64 rx_64; 580 u64 rx_65_to_127; 581 u64 rx_128_to_255; 582 u64 rx_256_to_511; 583 u64 rx_512_to_1023; 584 u64 rx_1024_to_15xx; 585 u64 rx_15xx_to_jumbo; 586 u64 rx_gtjumbo; 587 u64 rx_bad_lt64; 588 u64 rx_bad_64_to_15xx; 589 u64 rx_bad_15xx_to_jumbo; 590 u64 rx_bad_gtjumbo; 591 u64 rx_overflow; 592 u64 rx_missed; 593 u64 rx_false_carrier; 594 u64 rx_symbol_error; 595 u64 rx_align_error; 596 u64 rx_length_error; 597 u64 rx_internal_error; 598 u64 rx_good_lt64; 599}; 600 601/* Number of bits used in a multicast filter hash address */ 602#define EFX_MCAST_HASH_BITS 8 603 604/* Number of (single-bit) entries in a multicast filter hash */ 605#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) 606 607/* An Efx multicast filter hash */ 608union efx_multicast_hash { 609 u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; 610 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; 611}; 612 613struct efx_filter_state; 614 615/** 616 * struct efx_nic - an Efx NIC 617 * @name: Device name (net device name or bus id before net device registered) 618 * @pci_dev: The PCI device 619 * @type: Controller type attributes 620 * @legacy_irq: IRQ number 621 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)? 622 * @workqueue: Workqueue for port reconfigures and the HW monitor. 623 * Work items do not hold and must not acquire RTNL. 624 * @workqueue_name: Name of workqueue 625 * @reset_work: Scheduled reset workitem 626 * @membase_phys: Memory BAR value as physical address 627 * @membase: Memory BAR value 628 * @interrupt_mode: Interrupt mode 629 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds 630 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 631 * @irq_rx_moderation: IRQ moderation time for RX event queues 632 * @msg_enable: Log message enable flags 633 * @state: Device state flag. Serialised by the rtnl_lock. 634 * @reset_pending: Bitmask for pending resets 635 * @tx_queue: TX DMA queues 636 * @rx_queue: RX DMA queues 637 * @channel: Channels 638 * @channel_name: Names for channels and their IRQs 639 * @rxq_entries: Size of receive queues requested by user. 640 * @txq_entries: Size of transmit queues requested by user. 641 * @next_buffer_table: First available buffer table id 642 * @n_channels: Number of channels in use 643 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 644 * @n_tx_channels: Number of channels used for TX 645 * @rx_buffer_len: RX buffer length 646 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 647 * @rx_hash_key: Toeplitz hash key for RSS 648 * @rx_indir_table: Indirection table for RSS 649 * @int_error_count: Number of internal errors seen recently 650 * @int_error_expire: Time at which error count will be expired 651 * @irq_status: Interrupt status buffer 652 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 653 * @irq_level: IRQ level/index for IRQs not triggered by an event queue 654 * @mtd_list: List of MTDs attached to the NIC 655 * @nic_data: Hardware dependent state 656 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 657 * efx_monitor() and efx_reconfigure_port() 658 * @port_enabled: Port enabled indicator. 659 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and 660 * efx_mac_work() with kernel interfaces. Safe to read under any 661 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must 662 * be held to modify it. 663 * @port_initialized: Port initialized? 664 * @net_dev: Operating system network device. Consider holding the rtnl lock 665 * @stats_buffer: DMA buffer for statistics 666 * @phy_type: PHY type 667 * @phy_op: PHY interface 668 * @phy_data: PHY private data (including PHY-specific stats) 669 * @mdio: PHY MDIO interface 670 * @mdio_bus: PHY MDIO bus ID (only used by Siena) 671 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 672 * @link_advertising: Autonegotiation advertising flags 673 * @link_state: Current state of the link 674 * @n_link_state_changes: Number of times the link has changed state 675 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 676 * @multicast_hash: Multicast hash table 677 * @wanted_fc: Wanted flow control flags 678 * @mac_work: Work item for changing MAC promiscuity and multicast hash 679 * @loopback_mode: Loopback status 680 * @loopback_modes: Supported loopback mode bitmask 681 * @loopback_selftest: Offline self-test private state 682 * @monitor_work: Hardware monitor workitem 683 * @biu_lock: BIU (bus interface unit) lock 684 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This 685 * field is used by efx_test_interrupts() to verify that an 686 * interrupt has occurred. 687 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 688 * @mac_stats: MAC statistics. These include all statistics the MACs 689 * can provide. Generic code converts these into a standard 690 * &struct net_device_stats. 691 * @stats_lock: Statistics update lock. Serialises statistics fetches 692 * and access to @mac_stats. 693 * 694 * This is stored in the private area of the &struct net_device. 695 */ 696struct efx_nic { 697 /* The following fields should be written very rarely */ 698 699 char name[IFNAMSIZ]; 700 struct pci_dev *pci_dev; 701 const struct efx_nic_type *type; 702 int legacy_irq; 703 bool legacy_irq_enabled; 704 struct workqueue_struct *workqueue; 705 char workqueue_name[16]; 706 struct work_struct reset_work; 707 resource_size_t membase_phys; 708 void __iomem *membase; 709 710 enum efx_int_mode interrupt_mode; 711 unsigned int timer_quantum_ns; 712 bool irq_rx_adaptive; 713 unsigned int irq_rx_moderation; 714 u32 msg_enable; 715 716 enum nic_state state; 717 unsigned long reset_pending; 718 719 struct efx_channel *channel[EFX_MAX_CHANNELS]; 720 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; 721 722 unsigned rxq_entries; 723 unsigned txq_entries; 724 unsigned next_buffer_table; 725 unsigned n_channels; 726 unsigned n_rx_channels; 727 unsigned tx_channel_offset; 728 unsigned n_tx_channels; 729 unsigned int rx_buffer_len; 730 unsigned int rx_buffer_order; 731 u8 rx_hash_key[40]; 732 u32 rx_indir_table[128]; 733 734 unsigned int_error_count; 735 unsigned long int_error_expire; 736 737 struct efx_buffer irq_status; 738 unsigned irq_zero_count; 739 unsigned irq_level; 740 741#ifdef CONFIG_SFC_MTD 742 struct list_head mtd_list; 743#endif 744 745 void *nic_data; 746 747 struct mutex mac_lock; 748 struct work_struct mac_work; 749 bool port_enabled; 750 751 bool port_initialized; 752 struct net_device *net_dev; 753 754 struct efx_buffer stats_buffer; 755 756 unsigned int phy_type; 757 const struct efx_phy_operations *phy_op; 758 void *phy_data; 759 struct mdio_if_info mdio; 760 unsigned int mdio_bus; 761 enum efx_phy_mode phy_mode; 762 763 u32 link_advertising; 764 struct efx_link_state link_state; 765 unsigned int n_link_state_changes; 766 767 bool promiscuous; 768 union efx_multicast_hash multicast_hash; 769 u8 wanted_fc; 770 771 atomic_t rx_reset; 772 enum efx_loopback_mode loopback_mode; 773 u64 loopback_modes; 774 775 void *loopback_selftest; 776 777 struct efx_filter_state *filter_state; 778 779 /* The following fields may be written more often */ 780 781 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 782 spinlock_t biu_lock; 783 int last_irq_cpu; 784 unsigned n_rx_nodesc_drop_cnt; 785 struct efx_mac_stats mac_stats; 786 spinlock_t stats_lock; 787}; 788 789static inline int efx_dev_registered(struct efx_nic *efx) 790{ 791 return efx->net_dev->reg_state == NETREG_REGISTERED; 792} 793 794static inline unsigned int efx_port_num(struct efx_nic *efx) 795{ 796 return efx->net_dev->dev_id; 797} 798 799/** 800 * struct efx_nic_type - Efx device type definition 801 * @probe: Probe the controller 802 * @remove: Free resources allocated by probe() 803 * @init: Initialise the controller 804 * @fini: Shut down the controller 805 * @monitor: Periodic function for polling link state and hardware monitor 806 * @map_reset_reason: Map ethtool reset reason to a reset method 807 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible 808 * @reset: Reset the controller hardware and possibly the PHY. This will 809 * be called while the controller is uninitialised. 810 * @probe_port: Probe the MAC and PHY 811 * @remove_port: Free resources allocated by probe_port() 812 * @handle_global_event: Handle a "global" event (may be %NULL) 813 * @prepare_flush: Prepare the hardware for flushing the DMA queues 814 * @update_stats: Update statistics not provided by event handling 815 * @start_stats: Start the regular fetching of statistics 816 * @stop_stats: Stop the regular fetching of statistics 817 * @set_id_led: Set state of identifying LED or revert to automatic function 818 * @push_irq_moderation: Apply interrupt moderation value 819 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY 820 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings 821 * to the hardware. Serialised by the mac_lock. 822 * @check_mac_fault: Check MAC fault state. True if fault present. 823 * @get_wol: Get WoL configuration from driver state 824 * @set_wol: Push WoL configuration to the NIC 825 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) 826 * @test_registers: Test read/write functionality of control registers 827 * @test_nvram: Test validity of NVRAM contents 828 * @revision: Hardware architecture revision 829 * @mem_map_size: Memory BAR mapped size 830 * @txd_ptr_tbl_base: TX descriptor ring base address 831 * @rxd_ptr_tbl_base: RX descriptor ring base address 832 * @buf_tbl_base: Buffer table base address 833 * @evq_ptr_tbl_base: Event queue pointer table base address 834 * @evq_rptr_tbl_base: Event queue read-pointer table base address 835 * @max_dma_mask: Maximum possible DMA mask 836 * @rx_buffer_hash_size: Size of hash at start of RX buffer 837 * @rx_buffer_padding: Size of padding at end of RX buffer 838 * @max_interrupt_mode: Highest capability interrupt mode supported 839 * from &enum efx_init_mode. 840 * @phys_addr_channels: Number of channels with physically addressed 841 * descriptors 842 * @timer_period_max: Maximum period of interrupt timer (in ticks) 843 * @tx_dc_base: Base address in SRAM of TX queue descriptor caches 844 * @rx_dc_base: Base address in SRAM of RX queue descriptor caches 845 * @offload_features: net_device feature flags for protocol offload 846 * features implemented in hardware 847 */ 848struct efx_nic_type { 849 int (*probe)(struct efx_nic *efx); 850 void (*remove)(struct efx_nic *efx); 851 int (*init)(struct efx_nic *efx); 852 void (*fini)(struct efx_nic *efx); 853 void (*monitor)(struct efx_nic *efx); 854 enum reset_type (*map_reset_reason)(enum reset_type reason); 855 int (*map_reset_flags)(u32 *flags); 856 int (*reset)(struct efx_nic *efx, enum reset_type method); 857 int (*probe_port)(struct efx_nic *efx); 858 void (*remove_port)(struct efx_nic *efx); 859 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); 860 void (*prepare_flush)(struct efx_nic *efx); 861 void (*update_stats)(struct efx_nic *efx); 862 void (*start_stats)(struct efx_nic *efx); 863 void (*stop_stats)(struct efx_nic *efx); 864 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); 865 void (*push_irq_moderation)(struct efx_channel *channel); 866 int (*reconfigure_port)(struct efx_nic *efx); 867 int (*reconfigure_mac)(struct efx_nic *efx); 868 bool (*check_mac_fault)(struct efx_nic *efx); 869 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); 870 int (*set_wol)(struct efx_nic *efx, u32 type); 871 void (*resume_wol)(struct efx_nic *efx); 872 int (*test_registers)(struct efx_nic *efx); 873 int (*test_nvram)(struct efx_nic *efx); 874 875 int revision; 876 unsigned int mem_map_size; 877 unsigned int txd_ptr_tbl_base; 878 unsigned int rxd_ptr_tbl_base; 879 unsigned int buf_tbl_base; 880 unsigned int evq_ptr_tbl_base; 881 unsigned int evq_rptr_tbl_base; 882 u64 max_dma_mask; 883 unsigned int rx_buffer_hash_size; 884 unsigned int rx_buffer_padding; 885 unsigned int max_interrupt_mode; 886 unsigned int phys_addr_channels; 887 unsigned int timer_period_max; 888 unsigned int tx_dc_base; 889 unsigned int rx_dc_base; 890 netdev_features_t offload_features; 891}; 892 893/************************************************************************** 894 * 895 * Prototypes and inline functions 896 * 897 *************************************************************************/ 898 899static inline struct efx_channel * 900efx_get_channel(struct efx_nic *efx, unsigned index) 901{ 902 EFX_BUG_ON_PARANOID(index >= efx->n_channels); 903 return efx->channel[index]; 904} 905 906/* Iterate over all used channels */ 907#define efx_for_each_channel(_channel, _efx) \ 908 for (_channel = (_efx)->channel[0]; \ 909 _channel; \ 910 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ 911 (_efx)->channel[_channel->channel + 1] : NULL) 912 913static inline struct efx_tx_queue * 914efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) 915{ 916 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || 917 type >= EFX_TXQ_TYPES); 918 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; 919} 920 921static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) 922{ 923 return channel->channel - channel->efx->tx_channel_offset < 924 channel->efx->n_tx_channels; 925} 926 927static inline struct efx_tx_queue * 928efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 929{ 930 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || 931 type >= EFX_TXQ_TYPES); 932 return &channel->tx_queue[type]; 933} 934 935static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) 936{ 937 return !(tx_queue->efx->net_dev->num_tc < 2 && 938 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); 939} 940 941/* Iterate over all TX queues belonging to a channel */ 942#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 943 if (!efx_channel_has_tx_queues(_channel)) \ 944 ; \ 945 else \ 946 for (_tx_queue = (_channel)->tx_queue; \ 947 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ 948 efx_tx_queue_used(_tx_queue); \ 949 _tx_queue++) 950 951/* Iterate over all possible TX queues belonging to a channel */ 952#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ 953 for (_tx_queue = (_channel)->tx_queue; \ 954 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 955 _tx_queue++) 956 957static inline struct efx_rx_queue * 958efx_get_rx_queue(struct efx_nic *efx, unsigned index) 959{ 960 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels); 961 return &efx->channel[index]->rx_queue; 962} 963 964static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) 965{ 966 return channel->channel < channel->efx->n_rx_channels; 967} 968 969static inline struct efx_rx_queue * 970efx_channel_get_rx_queue(struct efx_channel *channel) 971{ 972 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); 973 return &channel->rx_queue; 974} 975 976/* Iterate over all RX queues belonging to a channel */ 977#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 978 if (!efx_channel_has_rx_queue(_channel)) \ 979 ; \ 980 else \ 981 for (_rx_queue = &(_channel)->rx_queue; \ 982 _rx_queue; \ 983 _rx_queue = NULL) 984 985static inline struct efx_channel * 986efx_rx_queue_channel(struct efx_rx_queue *rx_queue) 987{ 988 return container_of(rx_queue, struct efx_channel, rx_queue); 989} 990 991static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) 992{ 993 return efx_rx_queue_channel(rx_queue)->channel; 994} 995 996/* Returns a pointer to the specified receive buffer in the RX 997 * descriptor queue. 998 */ 999static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, 1000 unsigned int index) 1001{ 1002 return &rx_queue->buffer[index]; 1003} 1004 1005/* Set bit in a little-endian bitfield */ 1006static inline void set_bit_le(unsigned nr, unsigned char *addr) 1007{ 1008 addr[nr / 8] |= (1 << (nr % 8)); 1009} 1010 1011/* Clear bit in a little-endian bitfield */ 1012static inline void clear_bit_le(unsigned nr, unsigned char *addr) 1013{ 1014 addr[nr / 8] &= ~(1 << (nr % 8)); 1015} 1016 1017 1018/** 1019 * EFX_MAX_FRAME_LEN - calculate maximum frame length 1020 * 1021 * This calculates the maximum frame length that will be used for a 1022 * given MTU. The frame length will be equal to the MTU plus a 1023 * constant amount of header space and padding. This is the quantity 1024 * that the net driver will program into the MAC as the maximum frame 1025 * length. 1026 * 1027 * The 10G MAC requires 8-byte alignment on the frame 1028 * length, so we round up to the nearest 8. 1029 * 1030 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an 1031 * XGMII cycle). If the frame length reaches the maximum value in the 1032 * same cycle, the XMAC can miss the IPG altogether. We work around 1033 * this by adding a further 16 bytes. 1034 */ 1035#define EFX_MAX_FRAME_LEN(mtu) \ 1036 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) 1037 1038 1039#endif /* EFX_NET_DRIVER_H */ 1040