ixgbevf_main.c revision 9bdfefd21afdd6efcc40aa009fb0f97c4179a2a5
1/******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28 29/****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31******************************************************************************/ 32 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35#include <linux/types.h> 36#include <linux/bitops.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/vmalloc.h> 41#include <linux/string.h> 42#include <linux/in.h> 43#include <linux/ip.h> 44#include <linux/tcp.h> 45#include <linux/sctp.h> 46#include <linux/ipv6.h> 47#include <linux/slab.h> 48#include <net/checksum.h> 49#include <net/ip6_checksum.h> 50#include <linux/ethtool.h> 51#include <linux/if.h> 52#include <linux/if_vlan.h> 53#include <linux/prefetch.h> 54 55#include "ixgbevf.h" 56 57const char ixgbevf_driver_name[] = "ixgbevf"; 58static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61#define DRV_VERSION "2.12.1-k" 62const char ixgbevf_driver_version[] = DRV_VERSION; 63static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69}; 70 71/* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, 82 /* required last entry */ 83 {0, } 84}; 85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 86 87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 89MODULE_LICENSE("GPL"); 90MODULE_VERSION(DRV_VERSION); 91 92#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 93static int debug = -1; 94module_param(debug, int, 0); 95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 96 97/* forward decls */ 98static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 101 102static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, 103 u32 val) 104{ 105 rx_ring->next_to_use = val; 106 107 /* 108 * Force memory writes to complete before letting h/w 109 * know there are new descriptors to fetch. (Only 110 * applicable for weak-ordered memory model archs, 111 * such as IA-64). 112 */ 113 wmb(); 114 writel(val, rx_ring->tail); 115} 116 117/** 118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 119 * @adapter: pointer to adapter struct 120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 121 * @queue: queue to map the corresponding interrupt to 122 * @msix_vector: the vector to map to the corresponding queue 123 */ 124static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 125 u8 queue, u8 msix_vector) 126{ 127 u32 ivar, index; 128 struct ixgbe_hw *hw = &adapter->hw; 129 if (direction == -1) { 130 /* other causes */ 131 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 133 ivar &= ~0xFF; 134 ivar |= msix_vector; 135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 136 } else { 137 /* tx or rx causes */ 138 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 139 index = ((16 * (queue & 1)) + (8 * direction)); 140 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 141 ivar &= ~(0xFF << index); 142 ivar |= (msix_vector << index); 143 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 144 } 145} 146 147static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 148 struct ixgbevf_tx_buffer *tx_buffer) 149{ 150 if (tx_buffer->skb) { 151 dev_kfree_skb_any(tx_buffer->skb); 152 if (dma_unmap_len(tx_buffer, len)) 153 dma_unmap_single(tx_ring->dev, 154 dma_unmap_addr(tx_buffer, dma), 155 dma_unmap_len(tx_buffer, len), 156 DMA_TO_DEVICE); 157 } else if (dma_unmap_len(tx_buffer, len)) { 158 dma_unmap_page(tx_ring->dev, 159 dma_unmap_addr(tx_buffer, dma), 160 dma_unmap_len(tx_buffer, len), 161 DMA_TO_DEVICE); 162 } 163 tx_buffer->next_to_watch = NULL; 164 tx_buffer->skb = NULL; 165 dma_unmap_len_set(tx_buffer, len, 0); 166 /* tx_buffer must be completely set up in the transmit path */ 167} 168 169#define IXGBE_MAX_TXD_PWR 14 170#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 171 172/* Tx Descriptors needed, worst case */ 173#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 174#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 175 176static void ixgbevf_tx_timeout(struct net_device *netdev); 177 178/** 179 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 180 * @q_vector: board private structure 181 * @tx_ring: tx ring to clean 182 **/ 183static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 184 struct ixgbevf_ring *tx_ring) 185{ 186 struct ixgbevf_adapter *adapter = q_vector->adapter; 187 struct ixgbevf_tx_buffer *tx_buffer; 188 union ixgbe_adv_tx_desc *tx_desc; 189 unsigned int total_bytes = 0, total_packets = 0; 190 unsigned int budget = tx_ring->count / 2; 191 unsigned int i = tx_ring->next_to_clean; 192 193 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 194 return true; 195 196 tx_buffer = &tx_ring->tx_buffer_info[i]; 197 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 198 i -= tx_ring->count; 199 200 do { 201 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; 202 203 /* if next_to_watch is not set then there is no work pending */ 204 if (!eop_desc) 205 break; 206 207 /* prevent any other reads prior to eop_desc */ 208 read_barrier_depends(); 209 210 /* if DD is not set pending work has not been completed */ 211 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 212 break; 213 214 /* clear next_to_watch to prevent false hangs */ 215 tx_buffer->next_to_watch = NULL; 216 217 /* update the statistics for this packet */ 218 total_bytes += tx_buffer->bytecount; 219 total_packets += tx_buffer->gso_segs; 220 221 /* free the skb */ 222 dev_kfree_skb_any(tx_buffer->skb); 223 224 /* unmap skb header data */ 225 dma_unmap_single(tx_ring->dev, 226 dma_unmap_addr(tx_buffer, dma), 227 dma_unmap_len(tx_buffer, len), 228 DMA_TO_DEVICE); 229 230 /* clear tx_buffer data */ 231 tx_buffer->skb = NULL; 232 dma_unmap_len_set(tx_buffer, len, 0); 233 234 /* unmap remaining buffers */ 235 while (tx_desc != eop_desc) { 236 tx_desc->wb.status = 0; 237 238 tx_buffer++; 239 tx_desc++; 240 i++; 241 if (unlikely(!i)) { 242 i -= tx_ring->count; 243 tx_buffer = tx_ring->tx_buffer_info; 244 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 245 } 246 247 /* unmap any remaining paged data */ 248 if (dma_unmap_len(tx_buffer, len)) { 249 dma_unmap_page(tx_ring->dev, 250 dma_unmap_addr(tx_buffer, dma), 251 dma_unmap_len(tx_buffer, len), 252 DMA_TO_DEVICE); 253 dma_unmap_len_set(tx_buffer, len, 0); 254 } 255 } 256 257 tx_desc->wb.status = 0; 258 259 /* move us one more past the eop_desc for start of next pkt */ 260 tx_buffer++; 261 tx_desc++; 262 i++; 263 if (unlikely(!i)) { 264 i -= tx_ring->count; 265 tx_buffer = tx_ring->tx_buffer_info; 266 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 267 } 268 269 /* issue prefetch for next Tx descriptor */ 270 prefetch(tx_desc); 271 272 /* update budget accounting */ 273 budget--; 274 } while (likely(budget)); 275 276 i += tx_ring->count; 277 tx_ring->next_to_clean = i; 278 u64_stats_update_begin(&tx_ring->syncp); 279 tx_ring->stats.bytes += total_bytes; 280 tx_ring->stats.packets += total_packets; 281 u64_stats_update_end(&tx_ring->syncp); 282 q_vector->tx.total_bytes += total_bytes; 283 q_vector->tx.total_packets += total_packets; 284 285#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 286 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && 287 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { 288 /* Make sure that anybody stopping the queue after this 289 * sees the new next_to_clean. 290 */ 291 smp_mb(); 292 293 if (__netif_subqueue_stopped(tx_ring->netdev, 294 tx_ring->queue_index) && 295 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 296 netif_wake_subqueue(tx_ring->netdev, 297 tx_ring->queue_index); 298 ++tx_ring->tx_stats.restart_queue; 299 } 300 } 301 302 return !!budget; 303} 304 305/** 306 * ixgbevf_receive_skb - Send a completed packet up the stack 307 * @q_vector: structure containing interrupt and ring information 308 * @skb: packet to send up 309 * @status: hardware indication of status of receive 310 * @rx_desc: rx descriptor 311 **/ 312static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 313 struct sk_buff *skb, u8 status, 314 union ixgbe_adv_rx_desc *rx_desc) 315{ 316 struct ixgbevf_adapter *adapter = q_vector->adapter; 317 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 318 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 319 320 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 321 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 322 323 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 324 napi_gro_receive(&q_vector->napi, skb); 325 else 326 netif_rx(skb); 327} 328 329/** 330 * ixgbevf_rx_skb - Helper function to determine proper Rx method 331 * @q_vector: structure containing interrupt and ring information 332 * @skb: packet to send up 333 * @status: hardware indication of status of receive 334 * @rx_desc: rx descriptor 335 **/ 336static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, 337 struct sk_buff *skb, u8 status, 338 union ixgbe_adv_rx_desc *rx_desc) 339{ 340#ifdef CONFIG_NET_RX_BUSY_POLL 341 skb_mark_napi_id(skb, &q_vector->napi); 342 343 if (ixgbevf_qv_busy_polling(q_vector)) { 344 netif_receive_skb(skb); 345 /* exit early if we busy polled */ 346 return; 347 } 348#endif /* CONFIG_NET_RX_BUSY_POLL */ 349 350 ixgbevf_receive_skb(q_vector, skb, status, rx_desc); 351} 352 353/** 354 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 355 * @ring: pointer to Rx descriptor ring structure 356 * @status_err: hardware indication of status of receive 357 * @skb: skb currently being received and modified 358 **/ 359static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 360 u32 status_err, struct sk_buff *skb) 361{ 362 skb_checksum_none_assert(skb); 363 364 /* Rx csum disabled */ 365 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 366 return; 367 368 /* if IP and error */ 369 if ((status_err & IXGBE_RXD_STAT_IPCS) && 370 (status_err & IXGBE_RXDADV_ERR_IPE)) { 371 ring->rx_stats.csum_err++; 372 return; 373 } 374 375 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 376 return; 377 378 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 379 ring->rx_stats.csum_err++; 380 return; 381 } 382 383 /* It must be a TCP or UDP packet with a valid checksum */ 384 skb->ip_summed = CHECKSUM_UNNECESSARY; 385} 386 387/** 388 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 389 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on 390 **/ 391static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, 392 int cleaned_count) 393{ 394 union ixgbe_adv_rx_desc *rx_desc; 395 struct ixgbevf_rx_buffer *bi; 396 unsigned int i = rx_ring->next_to_use; 397 398 while (cleaned_count--) { 399 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 400 bi = &rx_ring->rx_buffer_info[i]; 401 402 if (!bi->skb) { 403 struct sk_buff *skb; 404 405 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 406 rx_ring->rx_buf_len); 407 if (!skb) 408 goto no_buffers; 409 410 bi->skb = skb; 411 412 bi->dma = dma_map_single(rx_ring->dev, skb->data, 413 rx_ring->rx_buf_len, 414 DMA_FROM_DEVICE); 415 if (dma_mapping_error(rx_ring->dev, bi->dma)) { 416 dev_kfree_skb(skb); 417 bi->skb = NULL; 418 dev_err(rx_ring->dev, "Rx DMA map failed\n"); 419 break; 420 } 421 } 422 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 423 424 i++; 425 if (i == rx_ring->count) 426 i = 0; 427 } 428 429no_buffers: 430 rx_ring->rx_stats.alloc_rx_buff_failed++; 431 if (rx_ring->next_to_use != i) 432 ixgbevf_release_rx_desc(rx_ring, i); 433} 434 435static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 436 u32 qmask) 437{ 438 struct ixgbe_hw *hw = &adapter->hw; 439 440 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 441} 442 443static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 444 struct ixgbevf_ring *rx_ring, 445 int budget) 446{ 447 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 448 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 449 struct sk_buff *skb; 450 unsigned int i; 451 u32 len, staterr; 452 int cleaned_count = 0; 453 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 454 455 i = rx_ring->next_to_clean; 456 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 457 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 458 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 459 460 while (staterr & IXGBE_RXD_STAT_DD) { 461 if (!budget) 462 break; 463 budget--; 464 465 rmb(); /* read descriptor and rx_buffer_info after status DD */ 466 len = le16_to_cpu(rx_desc->wb.upper.length); 467 skb = rx_buffer_info->skb; 468 prefetch(skb->data - NET_IP_ALIGN); 469 rx_buffer_info->skb = NULL; 470 471 if (rx_buffer_info->dma) { 472 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, 473 rx_ring->rx_buf_len, 474 DMA_FROM_DEVICE); 475 rx_buffer_info->dma = 0; 476 skb_put(skb, len); 477 } 478 479 i++; 480 if (i == rx_ring->count) 481 i = 0; 482 483 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 484 prefetch(next_rxd); 485 cleaned_count++; 486 487 next_buffer = &rx_ring->rx_buffer_info[i]; 488 489 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 490 skb->next = next_buffer->skb; 491 IXGBE_CB(skb->next)->prev = skb; 492 rx_ring->rx_stats.non_eop_descs++; 493 goto next_desc; 494 } 495 496 /* we should not be chaining buffers, if we did drop the skb */ 497 if (IXGBE_CB(skb)->prev) { 498 do { 499 struct sk_buff *this = skb; 500 skb = IXGBE_CB(skb)->prev; 501 dev_kfree_skb(this); 502 } while (skb); 503 goto next_desc; 504 } 505 506 /* ERR_MASK will only have valid bits if EOP set */ 507 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 508 dev_kfree_skb_irq(skb); 509 goto next_desc; 510 } 511 512 ixgbevf_rx_checksum(rx_ring, staterr, skb); 513 514 /* probably a little skewed due to removing CRC */ 515 total_rx_bytes += skb->len; 516 total_rx_packets++; 517 518 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 519 520 /* Workaround hardware that can't do proper VEPA multicast 521 * source pruning. 522 */ 523 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 524 ether_addr_equal(rx_ring->netdev->dev_addr, 525 eth_hdr(skb)->h_source)) { 526 dev_kfree_skb_irq(skb); 527 goto next_desc; 528 } 529 530 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); 531 532next_desc: 533 rx_desc->wb.upper.status_error = 0; 534 535 /* return some buffers to hardware, one at a time is too slow */ 536 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 537 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 538 cleaned_count = 0; 539 } 540 541 /* use prefetched values */ 542 rx_desc = next_rxd; 543 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 544 545 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 546 } 547 548 rx_ring->next_to_clean = i; 549 cleaned_count = ixgbevf_desc_unused(rx_ring); 550 551 if (cleaned_count) 552 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); 553 554 u64_stats_update_begin(&rx_ring->syncp); 555 rx_ring->stats.packets += total_rx_packets; 556 rx_ring->stats.bytes += total_rx_bytes; 557 u64_stats_update_end(&rx_ring->syncp); 558 q_vector->rx.total_packets += total_rx_packets; 559 q_vector->rx.total_bytes += total_rx_bytes; 560 561 return total_rx_packets; 562} 563 564/** 565 * ixgbevf_poll - NAPI polling calback 566 * @napi: napi struct with our devices info in it 567 * @budget: amount of work driver is allowed to do this pass, in packets 568 * 569 * This function will clean more than one or more rings associated with a 570 * q_vector. 571 **/ 572static int ixgbevf_poll(struct napi_struct *napi, int budget) 573{ 574 struct ixgbevf_q_vector *q_vector = 575 container_of(napi, struct ixgbevf_q_vector, napi); 576 struct ixgbevf_adapter *adapter = q_vector->adapter; 577 struct ixgbevf_ring *ring; 578 int per_ring_budget; 579 bool clean_complete = true; 580 581 ixgbevf_for_each_ring(ring, q_vector->tx) 582 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 583 584#ifdef CONFIG_NET_RX_BUSY_POLL 585 if (!ixgbevf_qv_lock_napi(q_vector)) 586 return budget; 587#endif 588 589 /* attempt to distribute budget to each queue fairly, but don't allow 590 * the budget to go below 1 because we'll exit polling */ 591 if (q_vector->rx.count > 1) 592 per_ring_budget = max(budget/q_vector->rx.count, 1); 593 else 594 per_ring_budget = budget; 595 596 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 597 ixgbevf_for_each_ring(ring, q_vector->rx) 598 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, 599 per_ring_budget) 600 < per_ring_budget); 601 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 602 603#ifdef CONFIG_NET_RX_BUSY_POLL 604 ixgbevf_qv_unlock_napi(q_vector); 605#endif 606 607 /* If all work not completed, return budget and keep polling */ 608 if (!clean_complete) 609 return budget; 610 /* all work done, exit the polling mode */ 611 napi_complete(napi); 612 if (adapter->rx_itr_setting & 1) 613 ixgbevf_set_itr(q_vector); 614 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 615 ixgbevf_irq_enable_queues(adapter, 616 1 << q_vector->v_idx); 617 618 return 0; 619} 620 621/** 622 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 623 * @q_vector: structure containing interrupt and ring information 624 */ 625void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 626{ 627 struct ixgbevf_adapter *adapter = q_vector->adapter; 628 struct ixgbe_hw *hw = &adapter->hw; 629 int v_idx = q_vector->v_idx; 630 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 631 632 /* 633 * set the WDIS bit to not clear the timer bits and cause an 634 * immediate assertion of the interrupt 635 */ 636 itr_reg |= IXGBE_EITR_CNT_WDIS; 637 638 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 639} 640 641#ifdef CONFIG_NET_RX_BUSY_POLL 642/* must be called with local_bh_disable()d */ 643static int ixgbevf_busy_poll_recv(struct napi_struct *napi) 644{ 645 struct ixgbevf_q_vector *q_vector = 646 container_of(napi, struct ixgbevf_q_vector, napi); 647 struct ixgbevf_adapter *adapter = q_vector->adapter; 648 struct ixgbevf_ring *ring; 649 int found = 0; 650 651 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 652 return LL_FLUSH_FAILED; 653 654 if (!ixgbevf_qv_lock_poll(q_vector)) 655 return LL_FLUSH_BUSY; 656 657 ixgbevf_for_each_ring(ring, q_vector->rx) { 658 found = ixgbevf_clean_rx_irq(q_vector, ring, 4); 659#ifdef BP_EXTENDED_STATS 660 if (found) 661 ring->stats.cleaned += found; 662 else 663 ring->stats.misses++; 664#endif 665 if (found) 666 break; 667 } 668 669 ixgbevf_qv_unlock_poll(q_vector); 670 671 return found; 672} 673#endif /* CONFIG_NET_RX_BUSY_POLL */ 674 675/** 676 * ixgbevf_configure_msix - Configure MSI-X hardware 677 * @adapter: board private structure 678 * 679 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 680 * interrupts. 681 **/ 682static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 683{ 684 struct ixgbevf_q_vector *q_vector; 685 int q_vectors, v_idx; 686 687 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 688 adapter->eims_enable_mask = 0; 689 690 /* 691 * Populate the IVAR table and set the ITR values to the 692 * corresponding register. 693 */ 694 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 695 struct ixgbevf_ring *ring; 696 q_vector = adapter->q_vector[v_idx]; 697 698 ixgbevf_for_each_ring(ring, q_vector->rx) 699 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 700 701 ixgbevf_for_each_ring(ring, q_vector->tx) 702 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 703 704 if (q_vector->tx.ring && !q_vector->rx.ring) { 705 /* tx only vector */ 706 if (adapter->tx_itr_setting == 1) 707 q_vector->itr = IXGBE_10K_ITR; 708 else 709 q_vector->itr = adapter->tx_itr_setting; 710 } else { 711 /* rx or rx/tx vector */ 712 if (adapter->rx_itr_setting == 1) 713 q_vector->itr = IXGBE_20K_ITR; 714 else 715 q_vector->itr = adapter->rx_itr_setting; 716 } 717 718 /* add q_vector eims value to global eims_enable_mask */ 719 adapter->eims_enable_mask |= 1 << v_idx; 720 721 ixgbevf_write_eitr(q_vector); 722 } 723 724 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 725 /* setup eims_other and add value to global eims_enable_mask */ 726 adapter->eims_other = 1 << v_idx; 727 adapter->eims_enable_mask |= adapter->eims_other; 728} 729 730enum latency_range { 731 lowest_latency = 0, 732 low_latency = 1, 733 bulk_latency = 2, 734 latency_invalid = 255 735}; 736 737/** 738 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 739 * @q_vector: structure containing interrupt and ring information 740 * @ring_container: structure containing ring performance data 741 * 742 * Stores a new ITR value based on packets and byte 743 * counts during the last interrupt. The advantage of per interrupt 744 * computation is faster updates and more accurate ITR for the current 745 * traffic pattern. Constants in this function were computed 746 * based on theoretical maximum wire speed and thresholds were set based 747 * on testing data as well as attempting to minimize response time 748 * while increasing bulk throughput. 749 **/ 750static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 751 struct ixgbevf_ring_container *ring_container) 752{ 753 int bytes = ring_container->total_bytes; 754 int packets = ring_container->total_packets; 755 u32 timepassed_us; 756 u64 bytes_perint; 757 u8 itr_setting = ring_container->itr; 758 759 if (packets == 0) 760 return; 761 762 /* simple throttlerate management 763 * 0-20MB/s lowest (100000 ints/s) 764 * 20-100MB/s low (20000 ints/s) 765 * 100-1249MB/s bulk (8000 ints/s) 766 */ 767 /* what was last interrupt timeslice? */ 768 timepassed_us = q_vector->itr >> 2; 769 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 770 771 switch (itr_setting) { 772 case lowest_latency: 773 if (bytes_perint > 10) 774 itr_setting = low_latency; 775 break; 776 case low_latency: 777 if (bytes_perint > 20) 778 itr_setting = bulk_latency; 779 else if (bytes_perint <= 10) 780 itr_setting = lowest_latency; 781 break; 782 case bulk_latency: 783 if (bytes_perint <= 20) 784 itr_setting = low_latency; 785 break; 786 } 787 788 /* clear work counters since we have the values we need */ 789 ring_container->total_bytes = 0; 790 ring_container->total_packets = 0; 791 792 /* write updated itr to ring container */ 793 ring_container->itr = itr_setting; 794} 795 796static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 797{ 798 u32 new_itr = q_vector->itr; 799 u8 current_itr; 800 801 ixgbevf_update_itr(q_vector, &q_vector->tx); 802 ixgbevf_update_itr(q_vector, &q_vector->rx); 803 804 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 805 806 switch (current_itr) { 807 /* counts and packets in update_itr are dependent on these numbers */ 808 case lowest_latency: 809 new_itr = IXGBE_100K_ITR; 810 break; 811 case low_latency: 812 new_itr = IXGBE_20K_ITR; 813 break; 814 case bulk_latency: 815 default: 816 new_itr = IXGBE_8K_ITR; 817 break; 818 } 819 820 if (new_itr != q_vector->itr) { 821 /* do an exponential smoothing */ 822 new_itr = (10 * new_itr * q_vector->itr) / 823 ((9 * new_itr) + q_vector->itr); 824 825 /* save the algorithm value here */ 826 q_vector->itr = new_itr; 827 828 ixgbevf_write_eitr(q_vector); 829 } 830} 831 832static irqreturn_t ixgbevf_msix_other(int irq, void *data) 833{ 834 struct ixgbevf_adapter *adapter = data; 835 struct ixgbe_hw *hw = &adapter->hw; 836 837 hw->mac.get_link_status = 1; 838 839 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 840 mod_timer(&adapter->watchdog_timer, jiffies); 841 842 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 843 844 return IRQ_HANDLED; 845} 846 847/** 848 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 849 * @irq: unused 850 * @data: pointer to our q_vector struct for this interrupt vector 851 **/ 852static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 853{ 854 struct ixgbevf_q_vector *q_vector = data; 855 856 /* EIAM disabled interrupts (on this vector) for us */ 857 if (q_vector->rx.ring || q_vector->tx.ring) 858 napi_schedule(&q_vector->napi); 859 860 return IRQ_HANDLED; 861} 862 863static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 864 int r_idx) 865{ 866 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 867 868 a->rx_ring[r_idx]->next = q_vector->rx.ring; 869 q_vector->rx.ring = a->rx_ring[r_idx]; 870 q_vector->rx.count++; 871} 872 873static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 874 int t_idx) 875{ 876 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 877 878 a->tx_ring[t_idx]->next = q_vector->tx.ring; 879 q_vector->tx.ring = a->tx_ring[t_idx]; 880 q_vector->tx.count++; 881} 882 883/** 884 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 885 * @adapter: board private structure to initialize 886 * 887 * This function maps descriptor rings to the queue-specific vectors 888 * we were allotted through the MSI-X enabling code. Ideally, we'd have 889 * one vector per ring/queue, but on a constrained vector budget, we 890 * group the rings as "efficiently" as possible. You would add new 891 * mapping configurations in here. 892 **/ 893static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 894{ 895 int q_vectors; 896 int v_start = 0; 897 int rxr_idx = 0, txr_idx = 0; 898 int rxr_remaining = adapter->num_rx_queues; 899 int txr_remaining = adapter->num_tx_queues; 900 int i, j; 901 int rqpv, tqpv; 902 int err = 0; 903 904 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 905 906 /* 907 * The ideal configuration... 908 * We have enough vectors to map one per queue. 909 */ 910 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 911 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 912 map_vector_to_rxq(adapter, v_start, rxr_idx); 913 914 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 915 map_vector_to_txq(adapter, v_start, txr_idx); 916 goto out; 917 } 918 919 /* 920 * If we don't have enough vectors for a 1-to-1 921 * mapping, we'll have to group them so there are 922 * multiple queues per vector. 923 */ 924 /* Re-adjusting *qpv takes care of the remainder. */ 925 for (i = v_start; i < q_vectors; i++) { 926 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 927 for (j = 0; j < rqpv; j++) { 928 map_vector_to_rxq(adapter, i, rxr_idx); 929 rxr_idx++; 930 rxr_remaining--; 931 } 932 } 933 for (i = v_start; i < q_vectors; i++) { 934 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 935 for (j = 0; j < tqpv; j++) { 936 map_vector_to_txq(adapter, i, txr_idx); 937 txr_idx++; 938 txr_remaining--; 939 } 940 } 941 942out: 943 return err; 944} 945 946/** 947 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 948 * @adapter: board private structure 949 * 950 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 951 * interrupts from the kernel. 952 **/ 953static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 954{ 955 struct net_device *netdev = adapter->netdev; 956 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 957 int vector, err; 958 int ri = 0, ti = 0; 959 960 for (vector = 0; vector < q_vectors; vector++) { 961 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 962 struct msix_entry *entry = &adapter->msix_entries[vector]; 963 964 if (q_vector->tx.ring && q_vector->rx.ring) { 965 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 966 "%s-%s-%d", netdev->name, "TxRx", ri++); 967 ti++; 968 } else if (q_vector->rx.ring) { 969 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 970 "%s-%s-%d", netdev->name, "rx", ri++); 971 } else if (q_vector->tx.ring) { 972 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 973 "%s-%s-%d", netdev->name, "tx", ti++); 974 } else { 975 /* skip this unused q_vector */ 976 continue; 977 } 978 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 979 q_vector->name, q_vector); 980 if (err) { 981 hw_dbg(&adapter->hw, 982 "request_irq failed for MSIX interrupt " 983 "Error: %d\n", err); 984 goto free_queue_irqs; 985 } 986 } 987 988 err = request_irq(adapter->msix_entries[vector].vector, 989 &ixgbevf_msix_other, 0, netdev->name, adapter); 990 if (err) { 991 hw_dbg(&adapter->hw, 992 "request_irq for msix_other failed: %d\n", err); 993 goto free_queue_irqs; 994 } 995 996 return 0; 997 998free_queue_irqs: 999 while (vector) { 1000 vector--; 1001 free_irq(adapter->msix_entries[vector].vector, 1002 adapter->q_vector[vector]); 1003 } 1004 /* This failure is non-recoverable - it indicates the system is 1005 * out of MSIX vector resources and the VF driver cannot run 1006 * without them. Set the number of msix vectors to zero 1007 * indicating that not enough can be allocated. The error 1008 * will be returned to the user indicating device open failed. 1009 * Any further attempts to force the driver to open will also 1010 * fail. The only way to recover is to unload the driver and 1011 * reload it again. If the system has recovered some MSIX 1012 * vectors then it may succeed. 1013 */ 1014 adapter->num_msix_vectors = 0; 1015 return err; 1016} 1017 1018static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 1019{ 1020 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1021 1022 for (i = 0; i < q_vectors; i++) { 1023 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 1024 q_vector->rx.ring = NULL; 1025 q_vector->tx.ring = NULL; 1026 q_vector->rx.count = 0; 1027 q_vector->tx.count = 0; 1028 } 1029} 1030 1031/** 1032 * ixgbevf_request_irq - initialize interrupts 1033 * @adapter: board private structure 1034 * 1035 * Attempts to configure interrupts using the best available 1036 * capabilities of the hardware and kernel. 1037 **/ 1038static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 1039{ 1040 int err = 0; 1041 1042 err = ixgbevf_request_msix_irqs(adapter); 1043 1044 if (err) 1045 hw_dbg(&adapter->hw, 1046 "request_irq failed, Error %d\n", err); 1047 1048 return err; 1049} 1050 1051static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 1052{ 1053 int i, q_vectors; 1054 1055 q_vectors = adapter->num_msix_vectors; 1056 i = q_vectors - 1; 1057 1058 free_irq(adapter->msix_entries[i].vector, adapter); 1059 i--; 1060 1061 for (; i >= 0; i--) { 1062 /* free only the irqs that were actually requested */ 1063 if (!adapter->q_vector[i]->rx.ring && 1064 !adapter->q_vector[i]->tx.ring) 1065 continue; 1066 1067 free_irq(adapter->msix_entries[i].vector, 1068 adapter->q_vector[i]); 1069 } 1070 1071 ixgbevf_reset_q_vectors(adapter); 1072} 1073 1074/** 1075 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1076 * @adapter: board private structure 1077 **/ 1078static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1079{ 1080 struct ixgbe_hw *hw = &adapter->hw; 1081 int i; 1082 1083 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 1084 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1085 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1086 1087 IXGBE_WRITE_FLUSH(hw); 1088 1089 for (i = 0; i < adapter->num_msix_vectors; i++) 1090 synchronize_irq(adapter->msix_entries[i].vector); 1091} 1092 1093/** 1094 * ixgbevf_irq_enable - Enable default interrupt generation settings 1095 * @adapter: board private structure 1096 **/ 1097static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1098{ 1099 struct ixgbe_hw *hw = &adapter->hw; 1100 1101 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1102 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1103 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1104} 1105 1106/** 1107 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset 1108 * @adapter: board private structure 1109 * @ring: structure containing ring specific data 1110 * 1111 * Configure the Tx descriptor ring after a reset. 1112 **/ 1113static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, 1114 struct ixgbevf_ring *ring) 1115{ 1116 struct ixgbe_hw *hw = &adapter->hw; 1117 u64 tdba = ring->dma; 1118 int wait_loop = 10; 1119 u32 txdctl = IXGBE_TXDCTL_ENABLE; 1120 u8 reg_idx = ring->reg_idx; 1121 1122 /* disable queue to avoid issues while updating state */ 1123 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); 1124 IXGBE_WRITE_FLUSH(hw); 1125 1126 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); 1127 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32); 1128 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx), 1129 ring->count * sizeof(union ixgbe_adv_tx_desc)); 1130 1131 /* disable head writeback */ 1132 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); 1133 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); 1134 1135 /* enable relaxed ordering */ 1136 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), 1137 (IXGBE_DCA_TXCTRL_DESC_RRO_EN | 1138 IXGBE_DCA_TXCTRL_DATA_RRO_EN)); 1139 1140 /* reset head and tail pointers */ 1141 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); 1142 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); 1143 ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx); 1144 1145 /* reset ntu and ntc to place SW in sync with hardwdare */ 1146 ring->next_to_clean = 0; 1147 ring->next_to_use = 0; 1148 1149 /* In order to avoid issues WTHRESH + PTHRESH should always be equal 1150 * to or less than the number of on chip descriptors, which is 1151 * currently 40. 1152 */ 1153 txdctl |= (8 << 16); /* WTHRESH = 8 */ 1154 1155 /* Setting PTHRESH to 32 both improves performance */ 1156 txdctl |= (1 << 8) | /* HTHRESH = 1 */ 1157 32; /* PTHRESH = 32 */ 1158 1159 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1160 1161 /* poll to verify queue is enabled */ 1162 do { 1163 usleep_range(1000, 2000); 1164 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); 1165 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); 1166 if (!wait_loop) 1167 pr_err("Could not enable Tx Queue %d\n", reg_idx); 1168} 1169 1170/** 1171 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1172 * @adapter: board private structure 1173 * 1174 * Configure the Tx unit of the MAC after a reset. 1175 **/ 1176static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1177{ 1178 u32 i; 1179 1180 /* Setup the HW Tx Head and Tail descriptor pointers */ 1181 for (i = 0; i < adapter->num_tx_queues; i++) 1182 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); 1183} 1184 1185#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1186 1187static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1188{ 1189 struct ixgbevf_ring *rx_ring; 1190 struct ixgbe_hw *hw = &adapter->hw; 1191 u32 srrctl; 1192 1193 rx_ring = adapter->rx_ring[index]; 1194 1195 srrctl = IXGBE_SRRCTL_DROP_EN; 1196 1197 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1198 1199 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1200 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1201 1202 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1203} 1204 1205static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) 1206{ 1207 struct ixgbe_hw *hw = &adapter->hw; 1208 1209 /* PSRTYPE must be initialized in 82599 */ 1210 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1211 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | 1212 IXGBE_PSRTYPE_L2HDR; 1213 1214 if (adapter->num_rx_queues > 1) 1215 psrtype |= 1 << 29; 1216 1217 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1218} 1219 1220static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1221{ 1222 struct ixgbe_hw *hw = &adapter->hw; 1223 struct net_device *netdev = adapter->netdev; 1224 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1225 int i; 1226 u16 rx_buf_len; 1227 1228 /* notify the PF of our intent to use this size of frame */ 1229 ixgbevf_rlpml_set_vf(hw, max_frame); 1230 1231 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1232 max_frame += VLAN_HLEN; 1233 1234 /* 1235 * Allocate buffer sizes that fit well into 32K and 1236 * take into account max frame size of 9.5K 1237 */ 1238 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1239 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1240 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1241 else if (max_frame <= IXGBEVF_RXBUFFER_2K) 1242 rx_buf_len = IXGBEVF_RXBUFFER_2K; 1243 else if (max_frame <= IXGBEVF_RXBUFFER_4K) 1244 rx_buf_len = IXGBEVF_RXBUFFER_4K; 1245 else if (max_frame <= IXGBEVF_RXBUFFER_8K) 1246 rx_buf_len = IXGBEVF_RXBUFFER_8K; 1247 else 1248 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1249 1250 for (i = 0; i < adapter->num_rx_queues; i++) 1251 adapter->rx_ring[i]->rx_buf_len = rx_buf_len; 1252} 1253 1254#define IXGBEVF_MAX_RX_DESC_POLL 10 1255static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, 1256 struct ixgbevf_ring *ring) 1257{ 1258 struct ixgbe_hw *hw = &adapter->hw; 1259 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1260 u32 rxdctl; 1261 u8 reg_idx = ring->reg_idx; 1262 1263 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1264 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1265 1266 /* write value back with RXDCTL.ENABLE bit cleared */ 1267 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1268 1269 /* the hardware may take up to 100us to really disable the rx queue */ 1270 do { 1271 udelay(10); 1272 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1273 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); 1274 1275 if (!wait_loop) 1276 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", 1277 reg_idx); 1278} 1279 1280static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1281 struct ixgbevf_ring *ring) 1282{ 1283 struct ixgbe_hw *hw = &adapter->hw; 1284 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; 1285 u32 rxdctl; 1286 u8 reg_idx = ring->reg_idx; 1287 1288 do { 1289 usleep_range(1000, 2000); 1290 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1291 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); 1292 1293 if (!wait_loop) 1294 pr_err("RXDCTL.ENABLE queue %d not set while polling\n", 1295 reg_idx); 1296} 1297 1298static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, 1299 struct ixgbevf_ring *ring) 1300{ 1301 struct ixgbe_hw *hw = &adapter->hw; 1302 u64 rdba = ring->dma; 1303 u32 rxdctl; 1304 u8 reg_idx = ring->reg_idx; 1305 1306 /* disable queue to avoid issues while updating state */ 1307 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1308 ixgbevf_disable_rx_queue(adapter, ring); 1309 1310 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); 1311 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32); 1312 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), 1313 ring->count * sizeof(union ixgbe_adv_rx_desc)); 1314 1315 /* enable relaxed ordering */ 1316 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), 1317 IXGBE_DCA_RXCTRL_DESC_RRO_EN); 1318 1319 /* reset head and tail pointers */ 1320 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); 1321 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1322 ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx); 1323 1324 /* reset ntu and ntc to place SW in sync with hardwdare */ 1325 ring->next_to_clean = 0; 1326 ring->next_to_use = 0; 1327 1328 ixgbevf_configure_srrctl(adapter, reg_idx); 1329 1330 /* prevent DMA from exceeding buffer space available */ 1331 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1332 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; 1333 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1334 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); 1335 1336 ixgbevf_rx_desc_queue_enable(adapter, ring); 1337 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); 1338} 1339 1340/** 1341 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1342 * @adapter: board private structure 1343 * 1344 * Configure the Rx unit of the MAC after a reset. 1345 **/ 1346static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1347{ 1348 int i; 1349 1350 ixgbevf_setup_psrtype(adapter); 1351 1352 /* set_rx_buffer_len must be called before ring initialization */ 1353 ixgbevf_set_rx_buffer_len(adapter); 1354 1355 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1356 * the Base and Length of the Rx Descriptor Ring */ 1357 for (i = 0; i < adapter->num_rx_queues; i++) 1358 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); 1359} 1360 1361static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, 1362 __be16 proto, u16 vid) 1363{ 1364 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1365 struct ixgbe_hw *hw = &adapter->hw; 1366 int err; 1367 1368 spin_lock_bh(&adapter->mbx_lock); 1369 1370 /* add VID to filter table */ 1371 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1372 1373 spin_unlock_bh(&adapter->mbx_lock); 1374 1375 /* translate error return types so error makes sense */ 1376 if (err == IXGBE_ERR_MBX) 1377 return -EIO; 1378 1379 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1380 return -EACCES; 1381 1382 set_bit(vid, adapter->active_vlans); 1383 1384 return err; 1385} 1386 1387static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, 1388 __be16 proto, u16 vid) 1389{ 1390 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1391 struct ixgbe_hw *hw = &adapter->hw; 1392 int err = -EOPNOTSUPP; 1393 1394 spin_lock_bh(&adapter->mbx_lock); 1395 1396 /* remove VID from filter table */ 1397 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1398 1399 spin_unlock_bh(&adapter->mbx_lock); 1400 1401 clear_bit(vid, adapter->active_vlans); 1402 1403 return err; 1404} 1405 1406static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1407{ 1408 u16 vid; 1409 1410 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1411 ixgbevf_vlan_rx_add_vid(adapter->netdev, 1412 htons(ETH_P_8021Q), vid); 1413} 1414 1415static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1416{ 1417 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1418 struct ixgbe_hw *hw = &adapter->hw; 1419 int count = 0; 1420 1421 if ((netdev_uc_count(netdev)) > 10) { 1422 pr_err("Too many unicast filters - No Space\n"); 1423 return -ENOSPC; 1424 } 1425 1426 if (!netdev_uc_empty(netdev)) { 1427 struct netdev_hw_addr *ha; 1428 netdev_for_each_uc_addr(ha, netdev) { 1429 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1430 udelay(200); 1431 } 1432 } else { 1433 /* 1434 * If the list is empty then send message to PF driver to 1435 * clear all macvlans on this VF. 1436 */ 1437 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1438 } 1439 1440 return count; 1441} 1442 1443/** 1444 * ixgbevf_set_rx_mode - Multicast and unicast set 1445 * @netdev: network interface device structure 1446 * 1447 * The set_rx_method entry point is called whenever the multicast address 1448 * list, unicast address list or the network interface flags are updated. 1449 * This routine is responsible for configuring the hardware for proper 1450 * multicast mode and configuring requested unicast filters. 1451 **/ 1452static void ixgbevf_set_rx_mode(struct net_device *netdev) 1453{ 1454 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1455 struct ixgbe_hw *hw = &adapter->hw; 1456 1457 spin_lock_bh(&adapter->mbx_lock); 1458 1459 /* reprogram multicast list */ 1460 hw->mac.ops.update_mc_addr_list(hw, netdev); 1461 1462 ixgbevf_write_uc_addr_list(netdev); 1463 1464 spin_unlock_bh(&adapter->mbx_lock); 1465} 1466 1467static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1468{ 1469 int q_idx; 1470 struct ixgbevf_q_vector *q_vector; 1471 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1472 1473 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1474 q_vector = adapter->q_vector[q_idx]; 1475#ifdef CONFIG_NET_RX_BUSY_POLL 1476 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); 1477#endif 1478 napi_enable(&q_vector->napi); 1479 } 1480} 1481 1482static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1483{ 1484 int q_idx; 1485 struct ixgbevf_q_vector *q_vector; 1486 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1487 1488 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1489 q_vector = adapter->q_vector[q_idx]; 1490 napi_disable(&q_vector->napi); 1491#ifdef CONFIG_NET_RX_BUSY_POLL 1492 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { 1493 pr_info("QV %d locked\n", q_idx); 1494 usleep_range(1000, 20000); 1495 } 1496#endif /* CONFIG_NET_RX_BUSY_POLL */ 1497 } 1498} 1499 1500static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) 1501{ 1502 struct ixgbe_hw *hw = &adapter->hw; 1503 unsigned int def_q = 0; 1504 unsigned int num_tcs = 0; 1505 unsigned int num_rx_queues = 1; 1506 int err; 1507 1508 spin_lock_bh(&adapter->mbx_lock); 1509 1510 /* fetch queue configuration from the PF */ 1511 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1512 1513 spin_unlock_bh(&adapter->mbx_lock); 1514 1515 if (err) 1516 return err; 1517 1518 if (num_tcs > 1) { 1519 /* update default Tx ring register index */ 1520 adapter->tx_ring[0]->reg_idx = def_q; 1521 1522 /* we need as many queues as traffic classes */ 1523 num_rx_queues = num_tcs; 1524 } 1525 1526 /* if we have a bad config abort request queue reset */ 1527 if (adapter->num_rx_queues != num_rx_queues) { 1528 /* force mailbox timeout to prevent further messages */ 1529 hw->mbx.timeout = 0; 1530 1531 /* wait for watchdog to come around and bail us out */ 1532 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; 1533 } 1534 1535 return 0; 1536} 1537 1538static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1539{ 1540 ixgbevf_configure_dcb(adapter); 1541 1542 ixgbevf_set_rx_mode(adapter->netdev); 1543 1544 ixgbevf_restore_vlan(adapter); 1545 1546 ixgbevf_configure_tx(adapter); 1547 ixgbevf_configure_rx(adapter); 1548} 1549 1550static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1551{ 1552 /* Only save pre-reset stats if there are some */ 1553 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1554 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1555 adapter->stats.base_vfgprc; 1556 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1557 adapter->stats.base_vfgptc; 1558 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1559 adapter->stats.base_vfgorc; 1560 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1561 adapter->stats.base_vfgotc; 1562 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1563 adapter->stats.base_vfmprc; 1564 } 1565} 1566 1567static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1568{ 1569 struct ixgbe_hw *hw = &adapter->hw; 1570 1571 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1572 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1573 adapter->stats.last_vfgorc |= 1574 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1575 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1576 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1577 adapter->stats.last_vfgotc |= 1578 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1579 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1580 1581 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1582 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1583 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1584 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1585 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1586} 1587 1588static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1589{ 1590 struct ixgbe_hw *hw = &adapter->hw; 1591 int api[] = { ixgbe_mbox_api_11, 1592 ixgbe_mbox_api_10, 1593 ixgbe_mbox_api_unknown }; 1594 int err = 0, idx = 0; 1595 1596 spin_lock_bh(&adapter->mbx_lock); 1597 1598 while (api[idx] != ixgbe_mbox_api_unknown) { 1599 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1600 if (!err) 1601 break; 1602 idx++; 1603 } 1604 1605 spin_unlock_bh(&adapter->mbx_lock); 1606} 1607 1608static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1609{ 1610 struct net_device *netdev = adapter->netdev; 1611 struct ixgbe_hw *hw = &adapter->hw; 1612 1613 ixgbevf_configure_msix(adapter); 1614 1615 spin_lock_bh(&adapter->mbx_lock); 1616 1617 if (is_valid_ether_addr(hw->mac.addr)) 1618 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1619 else 1620 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1621 1622 spin_unlock_bh(&adapter->mbx_lock); 1623 1624 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1625 ixgbevf_napi_enable_all(adapter); 1626 1627 /* enable transmits */ 1628 netif_tx_start_all_queues(netdev); 1629 1630 ixgbevf_save_reset_stats(adapter); 1631 ixgbevf_init_last_counter_stats(adapter); 1632 1633 hw->mac.get_link_status = 1; 1634 mod_timer(&adapter->watchdog_timer, jiffies); 1635} 1636 1637void ixgbevf_up(struct ixgbevf_adapter *adapter) 1638{ 1639 struct ixgbe_hw *hw = &adapter->hw; 1640 1641 ixgbevf_configure(adapter); 1642 1643 ixgbevf_up_complete(adapter); 1644 1645 /* clear any pending interrupts, may auto mask */ 1646 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1647 1648 ixgbevf_irq_enable(adapter); 1649} 1650 1651/** 1652 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1653 * @rx_ring: ring to free buffers from 1654 **/ 1655static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) 1656{ 1657 unsigned long size; 1658 unsigned int i; 1659 1660 if (!rx_ring->rx_buffer_info) 1661 return; 1662 1663 /* Free all the Rx ring sk_buffs */ 1664 for (i = 0; i < rx_ring->count; i++) { 1665 struct ixgbevf_rx_buffer *rx_buffer_info; 1666 1667 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1668 if (rx_buffer_info->dma) { 1669 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, 1670 rx_ring->rx_buf_len, 1671 DMA_FROM_DEVICE); 1672 rx_buffer_info->dma = 0; 1673 } 1674 if (rx_buffer_info->skb) { 1675 struct sk_buff *skb = rx_buffer_info->skb; 1676 rx_buffer_info->skb = NULL; 1677 do { 1678 struct sk_buff *this = skb; 1679 skb = IXGBE_CB(skb)->prev; 1680 dev_kfree_skb(this); 1681 } while (skb); 1682 } 1683 } 1684 1685 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1686 memset(rx_ring->rx_buffer_info, 0, size); 1687 1688 /* Zero out the descriptor ring */ 1689 memset(rx_ring->desc, 0, rx_ring->size); 1690} 1691 1692/** 1693 * ixgbevf_clean_tx_ring - Free Tx Buffers 1694 * @tx_ring: ring to be cleaned 1695 **/ 1696static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) 1697{ 1698 struct ixgbevf_tx_buffer *tx_buffer_info; 1699 unsigned long size; 1700 unsigned int i; 1701 1702 if (!tx_ring->tx_buffer_info) 1703 return; 1704 1705 /* Free all the Tx ring sk_buffs */ 1706 for (i = 0; i < tx_ring->count; i++) { 1707 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1708 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1709 } 1710 1711 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1712 memset(tx_ring->tx_buffer_info, 0, size); 1713 1714 memset(tx_ring->desc, 0, tx_ring->size); 1715} 1716 1717/** 1718 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1719 * @adapter: board private structure 1720 **/ 1721static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1722{ 1723 int i; 1724 1725 for (i = 0; i < adapter->num_rx_queues; i++) 1726 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); 1727} 1728 1729/** 1730 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1731 * @adapter: board private structure 1732 **/ 1733static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1734{ 1735 int i; 1736 1737 for (i = 0; i < adapter->num_tx_queues; i++) 1738 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); 1739} 1740 1741void ixgbevf_down(struct ixgbevf_adapter *adapter) 1742{ 1743 struct net_device *netdev = adapter->netdev; 1744 struct ixgbe_hw *hw = &adapter->hw; 1745 int i; 1746 1747 /* signal that we are down to the interrupt handler */ 1748 set_bit(__IXGBEVF_DOWN, &adapter->state); 1749 1750 /* disable all enabled rx queues */ 1751 for (i = 0; i < adapter->num_rx_queues; i++) 1752 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); 1753 1754 netif_tx_disable(netdev); 1755 1756 msleep(10); 1757 1758 netif_tx_stop_all_queues(netdev); 1759 1760 ixgbevf_irq_disable(adapter); 1761 1762 ixgbevf_napi_disable_all(adapter); 1763 1764 del_timer_sync(&adapter->watchdog_timer); 1765 /* can't call flush scheduled work here because it can deadlock 1766 * if linkwatch_event tries to acquire the rtnl_lock which we are 1767 * holding */ 1768 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1769 msleep(1); 1770 1771 /* disable transmits in the hardware now that interrupts are off */ 1772 for (i = 0; i < adapter->num_tx_queues; i++) { 1773 u8 reg_idx = adapter->tx_ring[i]->reg_idx; 1774 1775 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), 1776 IXGBE_TXDCTL_SWFLSH); 1777 } 1778 1779 netif_carrier_off(netdev); 1780 1781 if (!pci_channel_offline(adapter->pdev)) 1782 ixgbevf_reset(adapter); 1783 1784 ixgbevf_clean_all_tx_rings(adapter); 1785 ixgbevf_clean_all_rx_rings(adapter); 1786} 1787 1788void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1789{ 1790 WARN_ON(in_interrupt()); 1791 1792 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1793 msleep(1); 1794 1795 ixgbevf_down(adapter); 1796 ixgbevf_up(adapter); 1797 1798 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1799} 1800 1801void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1802{ 1803 struct ixgbe_hw *hw = &adapter->hw; 1804 struct net_device *netdev = adapter->netdev; 1805 1806 if (hw->mac.ops.reset_hw(hw)) { 1807 hw_dbg(hw, "PF still resetting\n"); 1808 } else { 1809 hw->mac.ops.init_hw(hw); 1810 ixgbevf_negotiate_api(adapter); 1811 } 1812 1813 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1814 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1815 netdev->addr_len); 1816 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1817 netdev->addr_len); 1818 } 1819} 1820 1821static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1822 int vectors) 1823{ 1824 int err = 0; 1825 int vector_threshold; 1826 1827 /* We'll want at least 2 (vector_threshold): 1828 * 1) TxQ[0] + RxQ[0] handler 1829 * 2) Other (Link Status Change, etc.) 1830 */ 1831 vector_threshold = MIN_MSIX_COUNT; 1832 1833 /* The more we get, the more we will assign to Tx/Rx Cleanup 1834 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1835 * Right now, we simply care about how many we'll get; we'll 1836 * set them up later while requesting irq's. 1837 */ 1838 while (vectors >= vector_threshold) { 1839 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1840 vectors); 1841 if (!err || err < 0) /* Success or a nasty failure. */ 1842 break; 1843 else /* err == number of vectors we should try again with */ 1844 vectors = err; 1845 } 1846 1847 if (vectors < vector_threshold) 1848 err = -ENOMEM; 1849 1850 if (err) { 1851 dev_err(&adapter->pdev->dev, 1852 "Unable to allocate MSI-X interrupts\n"); 1853 kfree(adapter->msix_entries); 1854 adapter->msix_entries = NULL; 1855 } else { 1856 /* 1857 * Adjust for only the vectors we'll use, which is minimum 1858 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1859 * vectors we were allocated. 1860 */ 1861 adapter->num_msix_vectors = vectors; 1862 } 1863 1864 return err; 1865} 1866 1867/** 1868 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1869 * @adapter: board private structure to initialize 1870 * 1871 * This is the top level queue allocation routine. The order here is very 1872 * important, starting with the "most" number of features turned on at once, 1873 * and ending with the smallest set of features. This way large combinations 1874 * can be allocated if they're turned on, and smaller combinations are the 1875 * fallthrough conditions. 1876 * 1877 **/ 1878static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1879{ 1880 struct ixgbe_hw *hw = &adapter->hw; 1881 unsigned int def_q = 0; 1882 unsigned int num_tcs = 0; 1883 int err; 1884 1885 /* Start with base case */ 1886 adapter->num_rx_queues = 1; 1887 adapter->num_tx_queues = 1; 1888 1889 spin_lock_bh(&adapter->mbx_lock); 1890 1891 /* fetch queue configuration from the PF */ 1892 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1893 1894 spin_unlock_bh(&adapter->mbx_lock); 1895 1896 if (err) 1897 return; 1898 1899 /* we need as many queues as traffic classes */ 1900 if (num_tcs > 1) 1901 adapter->num_rx_queues = num_tcs; 1902} 1903 1904/** 1905 * ixgbevf_alloc_queues - Allocate memory for all rings 1906 * @adapter: board private structure to initialize 1907 * 1908 * We allocate one ring per queue at run-time since we don't know the 1909 * number of queues at compile-time. The polling_netdev array is 1910 * intended for Multiqueue, but should work fine with a single queue. 1911 **/ 1912static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1913{ 1914 struct ixgbevf_ring *ring; 1915 int rx = 0, tx = 0; 1916 1917 for (; tx < adapter->num_tx_queues; tx++) { 1918 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1919 if (!ring) 1920 goto err_allocation; 1921 1922 ring->dev = &adapter->pdev->dev; 1923 ring->netdev = adapter->netdev; 1924 ring->count = adapter->tx_ring_count; 1925 ring->queue_index = tx; 1926 ring->reg_idx = tx; 1927 1928 adapter->tx_ring[tx] = ring; 1929 } 1930 1931 for (; rx < adapter->num_rx_queues; rx++) { 1932 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1933 if (!ring) 1934 goto err_allocation; 1935 1936 ring->dev = &adapter->pdev->dev; 1937 ring->netdev = adapter->netdev; 1938 1939 ring->count = adapter->rx_ring_count; 1940 ring->queue_index = rx; 1941 ring->reg_idx = rx; 1942 1943 adapter->rx_ring[rx] = ring; 1944 } 1945 1946 return 0; 1947 1948err_allocation: 1949 while (tx) { 1950 kfree(adapter->tx_ring[--tx]); 1951 adapter->tx_ring[tx] = NULL; 1952 } 1953 1954 while (rx) { 1955 kfree(adapter->rx_ring[--rx]); 1956 adapter->rx_ring[rx] = NULL; 1957 } 1958 return -ENOMEM; 1959} 1960 1961/** 1962 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1963 * @adapter: board private structure to initialize 1964 * 1965 * Attempt to configure the interrupts using the best available 1966 * capabilities of the hardware and the kernel. 1967 **/ 1968static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1969{ 1970 struct net_device *netdev = adapter->netdev; 1971 int err = 0; 1972 int vector, v_budget; 1973 1974 /* 1975 * It's easy to be greedy for MSI-X vectors, but it really 1976 * doesn't do us much good if we have a lot more vectors 1977 * than CPU's. So let's be conservative and only ask for 1978 * (roughly) the same number of vectors as there are CPU's. 1979 * The default is to use pairs of vectors. 1980 */ 1981 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1982 v_budget = min_t(int, v_budget, num_online_cpus()); 1983 v_budget += NON_Q_VECTORS; 1984 1985 /* A failure in MSI-X entry allocation isn't fatal, but it does 1986 * mean we disable MSI-X capabilities of the adapter. */ 1987 adapter->msix_entries = kcalloc(v_budget, 1988 sizeof(struct msix_entry), GFP_KERNEL); 1989 if (!adapter->msix_entries) { 1990 err = -ENOMEM; 1991 goto out; 1992 } 1993 1994 for (vector = 0; vector < v_budget; vector++) 1995 adapter->msix_entries[vector].entry = vector; 1996 1997 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 1998 if (err) 1999 goto out; 2000 2001 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 2002 if (err) 2003 goto out; 2004 2005 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 2006 2007out: 2008 return err; 2009} 2010 2011/** 2012 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 2013 * @adapter: board private structure to initialize 2014 * 2015 * We allocate one q_vector per queue interrupt. If allocation fails we 2016 * return -ENOMEM. 2017 **/ 2018static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2019{ 2020 int q_idx, num_q_vectors; 2021 struct ixgbevf_q_vector *q_vector; 2022 2023 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2024 2025 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2026 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 2027 if (!q_vector) 2028 goto err_out; 2029 q_vector->adapter = adapter; 2030 q_vector->v_idx = q_idx; 2031 netif_napi_add(adapter->netdev, &q_vector->napi, 2032 ixgbevf_poll, 64); 2033#ifdef CONFIG_NET_RX_BUSY_POLL 2034 napi_hash_add(&q_vector->napi); 2035#endif 2036 adapter->q_vector[q_idx] = q_vector; 2037 } 2038 2039 return 0; 2040 2041err_out: 2042 while (q_idx) { 2043 q_idx--; 2044 q_vector = adapter->q_vector[q_idx]; 2045#ifdef CONFIG_NET_RX_BUSY_POLL 2046 napi_hash_del(&q_vector->napi); 2047#endif 2048 netif_napi_del(&q_vector->napi); 2049 kfree(q_vector); 2050 adapter->q_vector[q_idx] = NULL; 2051 } 2052 return -ENOMEM; 2053} 2054 2055/** 2056 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 2057 * @adapter: board private structure to initialize 2058 * 2059 * This function frees the memory allocated to the q_vectors. In addition if 2060 * NAPI is enabled it will delete any references to the NAPI struct prior 2061 * to freeing the q_vector. 2062 **/ 2063static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2064{ 2065 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2066 2067 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2068 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 2069 2070 adapter->q_vector[q_idx] = NULL; 2071#ifdef CONFIG_NET_RX_BUSY_POLL 2072 napi_hash_del(&q_vector->napi); 2073#endif 2074 netif_napi_del(&q_vector->napi); 2075 kfree(q_vector); 2076 } 2077} 2078 2079/** 2080 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 2081 * @adapter: board private structure 2082 * 2083 **/ 2084static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 2085{ 2086 pci_disable_msix(adapter->pdev); 2087 kfree(adapter->msix_entries); 2088 adapter->msix_entries = NULL; 2089} 2090 2091/** 2092 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 2093 * @adapter: board private structure to initialize 2094 * 2095 **/ 2096static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 2097{ 2098 int err; 2099 2100 /* Number of supported queues */ 2101 ixgbevf_set_num_queues(adapter); 2102 2103 err = ixgbevf_set_interrupt_capability(adapter); 2104 if (err) { 2105 hw_dbg(&adapter->hw, 2106 "Unable to setup interrupt capabilities\n"); 2107 goto err_set_interrupt; 2108 } 2109 2110 err = ixgbevf_alloc_q_vectors(adapter); 2111 if (err) { 2112 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 2113 "vectors\n"); 2114 goto err_alloc_q_vectors; 2115 } 2116 2117 err = ixgbevf_alloc_queues(adapter); 2118 if (err) { 2119 pr_err("Unable to allocate memory for queues\n"); 2120 goto err_alloc_queues; 2121 } 2122 2123 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 2124 "Tx Queue count = %u\n", 2125 (adapter->num_rx_queues > 1) ? "Enabled" : 2126 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2127 2128 set_bit(__IXGBEVF_DOWN, &adapter->state); 2129 2130 return 0; 2131err_alloc_queues: 2132 ixgbevf_free_q_vectors(adapter); 2133err_alloc_q_vectors: 2134 ixgbevf_reset_interrupt_capability(adapter); 2135err_set_interrupt: 2136 return err; 2137} 2138 2139/** 2140 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2141 * @adapter: board private structure to clear interrupt scheme on 2142 * 2143 * We go through and clear interrupt specific resources and reset the structure 2144 * to pre-load conditions 2145 **/ 2146static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2147{ 2148 int i; 2149 2150 for (i = 0; i < adapter->num_tx_queues; i++) { 2151 kfree(adapter->tx_ring[i]); 2152 adapter->tx_ring[i] = NULL; 2153 } 2154 for (i = 0; i < adapter->num_rx_queues; i++) { 2155 kfree(adapter->rx_ring[i]); 2156 adapter->rx_ring[i] = NULL; 2157 } 2158 2159 adapter->num_tx_queues = 0; 2160 adapter->num_rx_queues = 0; 2161 2162 ixgbevf_free_q_vectors(adapter); 2163 ixgbevf_reset_interrupt_capability(adapter); 2164} 2165 2166/** 2167 * ixgbevf_sw_init - Initialize general software structures 2168 * (struct ixgbevf_adapter) 2169 * @adapter: board private structure to initialize 2170 * 2171 * ixgbevf_sw_init initializes the Adapter private data structure. 2172 * Fields are initialized based on PCI device information and 2173 * OS network device settings (MTU size). 2174 **/ 2175static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2176{ 2177 struct ixgbe_hw *hw = &adapter->hw; 2178 struct pci_dev *pdev = adapter->pdev; 2179 struct net_device *netdev = adapter->netdev; 2180 int err; 2181 2182 /* PCI config space info */ 2183 2184 hw->vendor_id = pdev->vendor; 2185 hw->device_id = pdev->device; 2186 hw->revision_id = pdev->revision; 2187 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2188 hw->subsystem_device_id = pdev->subsystem_device; 2189 2190 hw->mbx.ops.init_params(hw); 2191 2192 /* assume legacy case in which PF would only give VF 2 queues */ 2193 hw->mac.max_tx_queues = 2; 2194 hw->mac.max_rx_queues = 2; 2195 2196 /* lock to protect mailbox accesses */ 2197 spin_lock_init(&adapter->mbx_lock); 2198 2199 err = hw->mac.ops.reset_hw(hw); 2200 if (err) { 2201 dev_info(&pdev->dev, 2202 "PF still in reset state. Is the PF interface up?\n"); 2203 } else { 2204 err = hw->mac.ops.init_hw(hw); 2205 if (err) { 2206 pr_err("init_shared_code failed: %d\n", err); 2207 goto out; 2208 } 2209 ixgbevf_negotiate_api(adapter); 2210 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 2211 if (err) 2212 dev_info(&pdev->dev, "Error reading MAC address\n"); 2213 else if (is_zero_ether_addr(adapter->hw.mac.addr)) 2214 dev_info(&pdev->dev, 2215 "MAC address not assigned by administrator.\n"); 2216 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); 2217 } 2218 2219 if (!is_valid_ether_addr(netdev->dev_addr)) { 2220 dev_info(&pdev->dev, "Assigning random MAC address\n"); 2221 eth_hw_addr_random(netdev); 2222 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); 2223 } 2224 2225 /* Enable dynamic interrupt throttling rates */ 2226 adapter->rx_itr_setting = 1; 2227 adapter->tx_itr_setting = 1; 2228 2229 /* set default ring sizes */ 2230 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2231 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2232 2233 set_bit(__IXGBEVF_DOWN, &adapter->state); 2234 return 0; 2235 2236out: 2237 return err; 2238} 2239 2240#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2241 { \ 2242 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2243 if (current_counter < last_counter) \ 2244 counter += 0x100000000LL; \ 2245 last_counter = current_counter; \ 2246 counter &= 0xFFFFFFFF00000000LL; \ 2247 counter |= current_counter; \ 2248 } 2249 2250#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2251 { \ 2252 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2253 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2254 u64 current_counter = (current_counter_msb << 32) | \ 2255 current_counter_lsb; \ 2256 if (current_counter < last_counter) \ 2257 counter += 0x1000000000LL; \ 2258 last_counter = current_counter; \ 2259 counter &= 0xFFFFFFF000000000LL; \ 2260 counter |= current_counter; \ 2261 } 2262/** 2263 * ixgbevf_update_stats - Update the board statistics counters. 2264 * @adapter: board private structure 2265 **/ 2266void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2267{ 2268 struct ixgbe_hw *hw = &adapter->hw; 2269 int i; 2270 2271 if (!adapter->link_up) 2272 return; 2273 2274 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2275 adapter->stats.vfgprc); 2276 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2277 adapter->stats.vfgptc); 2278 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2279 adapter->stats.last_vfgorc, 2280 adapter->stats.vfgorc); 2281 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2282 adapter->stats.last_vfgotc, 2283 adapter->stats.vfgotc); 2284 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2285 adapter->stats.vfmprc); 2286 2287 for (i = 0; i < adapter->num_rx_queues; i++) { 2288 adapter->hw_csum_rx_error += 2289 adapter->rx_ring[i]->hw_csum_rx_error; 2290 adapter->rx_ring[i]->hw_csum_rx_error = 0; 2291 } 2292} 2293 2294/** 2295 * ixgbevf_watchdog - Timer Call-back 2296 * @data: pointer to adapter cast into an unsigned long 2297 **/ 2298static void ixgbevf_watchdog(unsigned long data) 2299{ 2300 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2301 struct ixgbe_hw *hw = &adapter->hw; 2302 u32 eics = 0; 2303 int i; 2304 2305 /* 2306 * Do the watchdog outside of interrupt context due to the lovely 2307 * delays that some of the newer hardware requires 2308 */ 2309 2310 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2311 goto watchdog_short_circuit; 2312 2313 /* get one bit for every active tx/rx interrupt vector */ 2314 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2315 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2316 if (qv->rx.ring || qv->tx.ring) 2317 eics |= 1 << i; 2318 } 2319 2320 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2321 2322watchdog_short_circuit: 2323 schedule_work(&adapter->watchdog_task); 2324} 2325 2326/** 2327 * ixgbevf_tx_timeout - Respond to a Tx Hang 2328 * @netdev: network interface device structure 2329 **/ 2330static void ixgbevf_tx_timeout(struct net_device *netdev) 2331{ 2332 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2333 2334 /* Do the reset outside of interrupt context */ 2335 schedule_work(&adapter->reset_task); 2336} 2337 2338static void ixgbevf_reset_task(struct work_struct *work) 2339{ 2340 struct ixgbevf_adapter *adapter; 2341 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2342 2343 /* If we're already down or resetting, just bail */ 2344 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2345 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2346 return; 2347 2348 adapter->tx_timeout_count++; 2349 2350 ixgbevf_reinit_locked(adapter); 2351} 2352 2353/** 2354 * ixgbevf_watchdog_task - worker thread to bring link up 2355 * @work: pointer to work_struct containing our data 2356 **/ 2357static void ixgbevf_watchdog_task(struct work_struct *work) 2358{ 2359 struct ixgbevf_adapter *adapter = container_of(work, 2360 struct ixgbevf_adapter, 2361 watchdog_task); 2362 struct net_device *netdev = adapter->netdev; 2363 struct ixgbe_hw *hw = &adapter->hw; 2364 u32 link_speed = adapter->link_speed; 2365 bool link_up = adapter->link_up; 2366 s32 need_reset; 2367 2368 ixgbevf_queue_reset_subtask(adapter); 2369 2370 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2371 2372 /* 2373 * Always check the link on the watchdog because we have 2374 * no LSC interrupt 2375 */ 2376 spin_lock_bh(&adapter->mbx_lock); 2377 2378 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2379 2380 spin_unlock_bh(&adapter->mbx_lock); 2381 2382 if (need_reset) { 2383 adapter->link_up = link_up; 2384 adapter->link_speed = link_speed; 2385 netif_carrier_off(netdev); 2386 netif_tx_stop_all_queues(netdev); 2387 schedule_work(&adapter->reset_task); 2388 goto pf_has_reset; 2389 } 2390 adapter->link_up = link_up; 2391 adapter->link_speed = link_speed; 2392 2393 if (link_up) { 2394 if (!netif_carrier_ok(netdev)) { 2395 char *link_speed_string; 2396 switch (link_speed) { 2397 case IXGBE_LINK_SPEED_10GB_FULL: 2398 link_speed_string = "10 Gbps"; 2399 break; 2400 case IXGBE_LINK_SPEED_1GB_FULL: 2401 link_speed_string = "1 Gbps"; 2402 break; 2403 case IXGBE_LINK_SPEED_100_FULL: 2404 link_speed_string = "100 Mbps"; 2405 break; 2406 default: 2407 link_speed_string = "unknown speed"; 2408 break; 2409 } 2410 dev_info(&adapter->pdev->dev, 2411 "NIC Link is Up, %s\n", link_speed_string); 2412 netif_carrier_on(netdev); 2413 netif_tx_wake_all_queues(netdev); 2414 } 2415 } else { 2416 adapter->link_up = false; 2417 adapter->link_speed = 0; 2418 if (netif_carrier_ok(netdev)) { 2419 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2420 netif_carrier_off(netdev); 2421 netif_tx_stop_all_queues(netdev); 2422 } 2423 } 2424 2425 ixgbevf_update_stats(adapter); 2426 2427pf_has_reset: 2428 /* Reset the timer */ 2429 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2430 mod_timer(&adapter->watchdog_timer, 2431 round_jiffies(jiffies + (2 * HZ))); 2432 2433 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2434} 2435 2436/** 2437 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2438 * @tx_ring: Tx descriptor ring for a specific queue 2439 * 2440 * Free all transmit software resources 2441 **/ 2442void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) 2443{ 2444 ixgbevf_clean_tx_ring(tx_ring); 2445 2446 vfree(tx_ring->tx_buffer_info); 2447 tx_ring->tx_buffer_info = NULL; 2448 2449 /* if not set, then don't free */ 2450 if (!tx_ring->desc) 2451 return; 2452 2453 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, 2454 tx_ring->dma); 2455 2456 tx_ring->desc = NULL; 2457} 2458 2459/** 2460 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2461 * @adapter: board private structure 2462 * 2463 * Free all transmit software resources 2464 **/ 2465static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2466{ 2467 int i; 2468 2469 for (i = 0; i < adapter->num_tx_queues; i++) 2470 if (adapter->tx_ring[i]->desc) 2471 ixgbevf_free_tx_resources(adapter->tx_ring[i]); 2472} 2473 2474/** 2475 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2476 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2477 * 2478 * Return 0 on success, negative on failure 2479 **/ 2480int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) 2481{ 2482 int size; 2483 2484 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2485 tx_ring->tx_buffer_info = vzalloc(size); 2486 if (!tx_ring->tx_buffer_info) 2487 goto err; 2488 2489 /* round up to nearest 4K */ 2490 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2491 tx_ring->size = ALIGN(tx_ring->size, 4096); 2492 2493 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, 2494 &tx_ring->dma, GFP_KERNEL); 2495 if (!tx_ring->desc) 2496 goto err; 2497 2498 return 0; 2499 2500err: 2501 vfree(tx_ring->tx_buffer_info); 2502 tx_ring->tx_buffer_info = NULL; 2503 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2504 "descriptor ring\n"); 2505 return -ENOMEM; 2506} 2507 2508/** 2509 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2510 * @adapter: board private structure 2511 * 2512 * If this function returns with an error, then it's possible one or 2513 * more of the rings is populated (while the rest are not). It is the 2514 * callers duty to clean those orphaned rings. 2515 * 2516 * Return 0 on success, negative on failure 2517 **/ 2518static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2519{ 2520 int i, err = 0; 2521 2522 for (i = 0; i < adapter->num_tx_queues; i++) { 2523 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); 2524 if (!err) 2525 continue; 2526 hw_dbg(&adapter->hw, 2527 "Allocation for Tx Queue %u failed\n", i); 2528 break; 2529 } 2530 2531 return err; 2532} 2533 2534/** 2535 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2536 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2537 * 2538 * Returns 0 on success, negative on failure 2539 **/ 2540int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) 2541{ 2542 int size; 2543 2544 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2545 rx_ring->rx_buffer_info = vzalloc(size); 2546 if (!rx_ring->rx_buffer_info) 2547 goto err; 2548 2549 /* Round up to nearest 4K */ 2550 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2551 rx_ring->size = ALIGN(rx_ring->size, 4096); 2552 2553 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, 2554 &rx_ring->dma, GFP_KERNEL); 2555 2556 if (!rx_ring->desc) 2557 goto err; 2558 2559 return 0; 2560err: 2561 vfree(rx_ring->rx_buffer_info); 2562 rx_ring->rx_buffer_info = NULL; 2563 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); 2564 return -ENOMEM; 2565} 2566 2567/** 2568 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2569 * @adapter: board private structure 2570 * 2571 * If this function returns with an error, then it's possible one or 2572 * more of the rings is populated (while the rest are not). It is the 2573 * callers duty to clean those orphaned rings. 2574 * 2575 * Return 0 on success, negative on failure 2576 **/ 2577static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2578{ 2579 int i, err = 0; 2580 2581 for (i = 0; i < adapter->num_rx_queues; i++) { 2582 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); 2583 if (!err) 2584 continue; 2585 hw_dbg(&adapter->hw, 2586 "Allocation for Rx Queue %u failed\n", i); 2587 break; 2588 } 2589 return err; 2590} 2591 2592/** 2593 * ixgbevf_free_rx_resources - Free Rx Resources 2594 * @rx_ring: ring to clean the resources from 2595 * 2596 * Free all receive software resources 2597 **/ 2598void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) 2599{ 2600 ixgbevf_clean_rx_ring(rx_ring); 2601 2602 vfree(rx_ring->rx_buffer_info); 2603 rx_ring->rx_buffer_info = NULL; 2604 2605 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, 2606 rx_ring->dma); 2607 2608 rx_ring->desc = NULL; 2609} 2610 2611/** 2612 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2613 * @adapter: board private structure 2614 * 2615 * Free all receive software resources 2616 **/ 2617static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2618{ 2619 int i; 2620 2621 for (i = 0; i < adapter->num_rx_queues; i++) 2622 if (adapter->rx_ring[i]->desc) 2623 ixgbevf_free_rx_resources(adapter->rx_ring[i]); 2624} 2625 2626/** 2627 * ixgbevf_open - Called when a network interface is made active 2628 * @netdev: network interface device structure 2629 * 2630 * Returns 0 on success, negative value on failure 2631 * 2632 * The open entry point is called when a network interface is made 2633 * active by the system (IFF_UP). At this point all resources needed 2634 * for transmit and receive operations are allocated, the interrupt 2635 * handler is registered with the OS, the watchdog timer is started, 2636 * and the stack is notified that the interface is ready. 2637 **/ 2638static int ixgbevf_open(struct net_device *netdev) 2639{ 2640 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2641 struct ixgbe_hw *hw = &adapter->hw; 2642 int err; 2643 2644 /* A previous failure to open the device because of a lack of 2645 * available MSIX vector resources may have reset the number 2646 * of msix vectors variable to zero. The only way to recover 2647 * is to unload/reload the driver and hope that the system has 2648 * been able to recover some MSIX vector resources. 2649 */ 2650 if (!adapter->num_msix_vectors) 2651 return -ENOMEM; 2652 2653 /* disallow open during test */ 2654 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2655 return -EBUSY; 2656 2657 if (hw->adapter_stopped) { 2658 ixgbevf_reset(adapter); 2659 /* if adapter is still stopped then PF isn't up and 2660 * the vf can't start. */ 2661 if (hw->adapter_stopped) { 2662 err = IXGBE_ERR_MBX; 2663 pr_err("Unable to start - perhaps the PF Driver isn't " 2664 "up yet\n"); 2665 goto err_setup_reset; 2666 } 2667 } 2668 2669 /* allocate transmit descriptors */ 2670 err = ixgbevf_setup_all_tx_resources(adapter); 2671 if (err) 2672 goto err_setup_tx; 2673 2674 /* allocate receive descriptors */ 2675 err = ixgbevf_setup_all_rx_resources(adapter); 2676 if (err) 2677 goto err_setup_rx; 2678 2679 ixgbevf_configure(adapter); 2680 2681 /* 2682 * Map the Tx/Rx rings to the vectors we were allotted. 2683 * if request_irq will be called in this function map_rings 2684 * must be called *before* up_complete 2685 */ 2686 ixgbevf_map_rings_to_vectors(adapter); 2687 2688 ixgbevf_up_complete(adapter); 2689 2690 /* clear any pending interrupts, may auto mask */ 2691 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2692 err = ixgbevf_request_irq(adapter); 2693 if (err) 2694 goto err_req_irq; 2695 2696 ixgbevf_irq_enable(adapter); 2697 2698 return 0; 2699 2700err_req_irq: 2701 ixgbevf_down(adapter); 2702err_setup_rx: 2703 ixgbevf_free_all_rx_resources(adapter); 2704err_setup_tx: 2705 ixgbevf_free_all_tx_resources(adapter); 2706 ixgbevf_reset(adapter); 2707 2708err_setup_reset: 2709 2710 return err; 2711} 2712 2713/** 2714 * ixgbevf_close - Disables a network interface 2715 * @netdev: network interface device structure 2716 * 2717 * Returns 0, this is not allowed to fail 2718 * 2719 * The close entry point is called when an interface is de-activated 2720 * by the OS. The hardware is still under the drivers control, but 2721 * needs to be disabled. A global MAC reset is issued to stop the 2722 * hardware, and all transmit and receive resources are freed. 2723 **/ 2724static int ixgbevf_close(struct net_device *netdev) 2725{ 2726 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2727 2728 ixgbevf_down(adapter); 2729 ixgbevf_free_irq(adapter); 2730 2731 ixgbevf_free_all_tx_resources(adapter); 2732 ixgbevf_free_all_rx_resources(adapter); 2733 2734 return 0; 2735} 2736 2737static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) 2738{ 2739 struct net_device *dev = adapter->netdev; 2740 2741 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) 2742 return; 2743 2744 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; 2745 2746 /* if interface is down do nothing */ 2747 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2748 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2749 return; 2750 2751 /* Hardware has to reinitialize queues and interrupts to 2752 * match packet buffer alignment. Unfortunately, the 2753 * hardware is not flexible enough to do this dynamically. 2754 */ 2755 if (netif_running(dev)) 2756 ixgbevf_close(dev); 2757 2758 ixgbevf_clear_interrupt_scheme(adapter); 2759 ixgbevf_init_interrupt_scheme(adapter); 2760 2761 if (netif_running(dev)) 2762 ixgbevf_open(dev); 2763} 2764 2765static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2766 u32 vlan_macip_lens, u32 type_tucmd, 2767 u32 mss_l4len_idx) 2768{ 2769 struct ixgbe_adv_tx_context_desc *context_desc; 2770 u16 i = tx_ring->next_to_use; 2771 2772 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2773 2774 i++; 2775 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2776 2777 /* set bits to identify this as an advanced context descriptor */ 2778 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2779 2780 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2781 context_desc->seqnum_seed = 0; 2782 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2783 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2784} 2785 2786static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2787 struct ixgbevf_tx_buffer *first, 2788 u8 *hdr_len) 2789{ 2790 struct sk_buff *skb = first->skb; 2791 u32 vlan_macip_lens, type_tucmd; 2792 u32 mss_l4len_idx, l4len; 2793 2794 if (!skb_is_gso(skb)) 2795 return 0; 2796 2797 if (skb_header_cloned(skb)) { 2798 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2799 if (err) 2800 return err; 2801 } 2802 2803 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2804 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2805 2806 if (skb->protocol == htons(ETH_P_IP)) { 2807 struct iphdr *iph = ip_hdr(skb); 2808 iph->tot_len = 0; 2809 iph->check = 0; 2810 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2811 iph->daddr, 0, 2812 IPPROTO_TCP, 2813 0); 2814 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2815 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 2816 IXGBE_TX_FLAGS_CSUM | 2817 IXGBE_TX_FLAGS_IPV4; 2818 } else if (skb_is_gso_v6(skb)) { 2819 ipv6_hdr(skb)->payload_len = 0; 2820 tcp_hdr(skb)->check = 2821 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2822 &ipv6_hdr(skb)->daddr, 2823 0, IPPROTO_TCP, 0); 2824 first->tx_flags |= IXGBE_TX_FLAGS_TSO | 2825 IXGBE_TX_FLAGS_CSUM; 2826 } 2827 2828 /* compute header lengths */ 2829 l4len = tcp_hdrlen(skb); 2830 *hdr_len += l4len; 2831 *hdr_len = skb_transport_offset(skb) + l4len; 2832 2833 /* update gso size and bytecount with header size */ 2834 first->gso_segs = skb_shinfo(skb)->gso_segs; 2835 first->bytecount += (first->gso_segs - 1) * *hdr_len; 2836 2837 /* mss_l4len_id: use 1 as index for TSO */ 2838 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2839 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2840 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2841 2842 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2843 vlan_macip_lens = skb_network_header_len(skb); 2844 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2845 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2846 2847 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2848 type_tucmd, mss_l4len_idx); 2849 2850 return 1; 2851} 2852 2853static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2854 struct ixgbevf_tx_buffer *first) 2855{ 2856 struct sk_buff *skb = first->skb; 2857 u32 vlan_macip_lens = 0; 2858 u32 mss_l4len_idx = 0; 2859 u32 type_tucmd = 0; 2860 2861 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2862 u8 l4_hdr = 0; 2863 switch (skb->protocol) { 2864 case __constant_htons(ETH_P_IP): 2865 vlan_macip_lens |= skb_network_header_len(skb); 2866 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2867 l4_hdr = ip_hdr(skb)->protocol; 2868 break; 2869 case __constant_htons(ETH_P_IPV6): 2870 vlan_macip_lens |= skb_network_header_len(skb); 2871 l4_hdr = ipv6_hdr(skb)->nexthdr; 2872 break; 2873 default: 2874 if (unlikely(net_ratelimit())) { 2875 dev_warn(tx_ring->dev, 2876 "partial checksum but proto=%x!\n", 2877 first->protocol); 2878 } 2879 break; 2880 } 2881 2882 switch (l4_hdr) { 2883 case IPPROTO_TCP: 2884 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2885 mss_l4len_idx = tcp_hdrlen(skb) << 2886 IXGBE_ADVTXD_L4LEN_SHIFT; 2887 break; 2888 case IPPROTO_SCTP: 2889 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2890 mss_l4len_idx = sizeof(struct sctphdr) << 2891 IXGBE_ADVTXD_L4LEN_SHIFT; 2892 break; 2893 case IPPROTO_UDP: 2894 mss_l4len_idx = sizeof(struct udphdr) << 2895 IXGBE_ADVTXD_L4LEN_SHIFT; 2896 break; 2897 default: 2898 if (unlikely(net_ratelimit())) { 2899 dev_warn(tx_ring->dev, 2900 "partial checksum but l4 proto=%x!\n", 2901 l4_hdr); 2902 } 2903 break; 2904 } 2905 2906 /* update TX checksum flag */ 2907 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; 2908 } 2909 2910 /* vlan_macip_lens: MACLEN, VLAN tag */ 2911 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2912 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2913 2914 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2915 type_tucmd, mss_l4len_idx); 2916} 2917 2918static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2919 struct ixgbevf_tx_buffer *first) 2920{ 2921 dma_addr_t dma; 2922 struct sk_buff *skb = first->skb; 2923 struct ixgbevf_tx_buffer *tx_buffer_info; 2924 unsigned int len; 2925 unsigned int total = skb->len; 2926 unsigned int offset = 0, size; 2927 int count = 0; 2928 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2929 unsigned int f; 2930 int i; 2931 2932 i = tx_ring->next_to_use; 2933 2934 len = min(skb_headlen(skb), total); 2935 while (len) { 2936 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2937 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2938 2939 tx_buffer_info->tx_flags = first->tx_flags; 2940 dma = dma_map_single(tx_ring->dev, skb->data + offset, 2941 size, DMA_TO_DEVICE); 2942 if (dma_mapping_error(tx_ring->dev, dma)) 2943 goto dma_error; 2944 2945 /* record length, and DMA address */ 2946 dma_unmap_len_set(tx_buffer_info, len, size); 2947 dma_unmap_addr_set(tx_buffer_info, dma, dma); 2948 2949 len -= size; 2950 total -= size; 2951 offset += size; 2952 count++; 2953 i++; 2954 if (i == tx_ring->count) 2955 i = 0; 2956 } 2957 2958 for (f = 0; f < nr_frags; f++) { 2959 const struct skb_frag_struct *frag; 2960 2961 frag = &skb_shinfo(skb)->frags[f]; 2962 len = min((unsigned int)skb_frag_size(frag), total); 2963 offset = 0; 2964 2965 while (len) { 2966 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2967 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2968 2969 dma = skb_frag_dma_map(tx_ring->dev, frag, 2970 offset, size, DMA_TO_DEVICE); 2971 if (dma_mapping_error(tx_ring->dev, dma)) 2972 goto dma_error; 2973 2974 /* record length, and DMA address */ 2975 dma_unmap_len_set(tx_buffer_info, len, size); 2976 dma_unmap_addr_set(tx_buffer_info, dma, dma); 2977 2978 len -= size; 2979 total -= size; 2980 offset += size; 2981 count++; 2982 i++; 2983 if (i == tx_ring->count) 2984 i = 0; 2985 } 2986 if (total == 0) 2987 break; 2988 } 2989 2990 if (i == 0) 2991 i = tx_ring->count - 1; 2992 else 2993 i = i - 1; 2994 2995 first->next_to_watch = IXGBEVF_TX_DESC(tx_ring, i); 2996 first->time_stamp = jiffies; 2997 2998 return count; 2999 3000dma_error: 3001 dev_err(tx_ring->dev, "TX DMA map failed\n"); 3002 3003 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 3004 tx_buffer_info->dma = 0; 3005 count--; 3006 3007 /* clear timestamp and dma mappings for remaining portion of packet */ 3008 while (count >= 0) { 3009 count--; 3010 i--; 3011 if (i < 0) 3012 i += tx_ring->count; 3013 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3014 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 3015 } 3016 3017 return count; 3018} 3019 3020static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, 3021 struct ixgbevf_tx_buffer *first, 3022 int count, u8 hdr_len) 3023{ 3024 union ixgbe_adv_tx_desc *tx_desc = NULL; 3025 struct sk_buff *skb = first->skb; 3026 struct ixgbevf_tx_buffer *tx_buffer_info; 3027 u32 olinfo_status = 0, cmd_type_len = 0; 3028 u32 tx_flags = first->tx_flags; 3029 unsigned int i; 3030 3031 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 3032 3033 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 3034 3035 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 3036 3037 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3038 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 3039 3040 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3041 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 3042 3043 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 3044 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3045 3046 /* use index 1 context for tso */ 3047 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 3048 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3049 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 3050 } 3051 3052 /* 3053 * Check Context must be set if Tx switch is enabled, which it 3054 * always is for case where virtual functions are running 3055 */ 3056 olinfo_status |= IXGBE_ADVTXD_CC; 3057 3058 olinfo_status |= ((skb->len - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3059 3060 i = tx_ring->next_to_use; 3061 while (count--) { 3062 dma_addr_t dma; 3063 unsigned int len; 3064 3065 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3066 dma = dma_unmap_addr(tx_buffer_info, dma); 3067 len = dma_unmap_len(tx_buffer_info, len); 3068 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3069 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3070 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | len); 3071 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3072 i++; 3073 if (i == tx_ring->count) 3074 i = 0; 3075 } 3076 3077 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 3078 3079 tx_ring->next_to_use = i; 3080} 3081 3082static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3083{ 3084 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 3085 /* Herbert's original patch had: 3086 * smp_mb__after_netif_stop_queue(); 3087 * but since that doesn't exist yet, just open code it. */ 3088 smp_mb(); 3089 3090 /* We need to check again in a case another CPU has just 3091 * made room available. */ 3092 if (likely(ixgbevf_desc_unused(tx_ring) < size)) 3093 return -EBUSY; 3094 3095 /* A reprieve! - use start_queue because it doesn't call schedule */ 3096 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 3097 ++tx_ring->tx_stats.restart_queue; 3098 3099 return 0; 3100} 3101 3102static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 3103{ 3104 if (likely(ixgbevf_desc_unused(tx_ring) >= size)) 3105 return 0; 3106 return __ixgbevf_maybe_stop_tx(tx_ring, size); 3107} 3108 3109static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3110{ 3111 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3112 struct ixgbevf_tx_buffer *first; 3113 struct ixgbevf_ring *tx_ring; 3114 int tso; 3115 u32 tx_flags = 0; 3116 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 3117#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3118 unsigned short f; 3119#endif 3120 u8 hdr_len = 0; 3121 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 3122 3123 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 3124 dev_kfree_skb(skb); 3125 return NETDEV_TX_OK; 3126 } 3127 3128 tx_ring = adapter->tx_ring[skb->queue_mapping]; 3129 3130 /* 3131 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3132 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 3133 * + 2 desc gap to keep tail from touching head, 3134 * + 1 desc for context descriptor, 3135 * otherwise try next time 3136 */ 3137#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3138 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3139 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3140#else 3141 count += skb_shinfo(skb)->nr_frags; 3142#endif 3143 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 3144 tx_ring->tx_stats.tx_busy++; 3145 return NETDEV_TX_BUSY; 3146 } 3147 3148 /* record the location of the first descriptor for this packet */ 3149 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 3150 first->skb = skb; 3151 first->bytecount = skb->len; 3152 first->gso_segs = 1; 3153 3154 if (vlan_tx_tag_present(skb)) { 3155 tx_flags |= vlan_tx_tag_get(skb); 3156 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3157 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3158 } 3159 3160 /* record initial flags and protocol */ 3161 first->tx_flags = tx_flags; 3162 first->protocol = vlan_get_protocol(skb); 3163 3164 tso = ixgbevf_tso(tx_ring, first, &hdr_len); 3165 if (tso < 0) 3166 goto out_drop; 3167 else 3168 ixgbevf_tx_csum(tx_ring, first); 3169 3170 ixgbevf_tx_queue(tx_ring, first, 3171 ixgbevf_tx_map(tx_ring, first), hdr_len); 3172 3173 /* Force memory writes to complete before letting h/w 3174 * know there are new descriptors to fetch. (Only 3175 * applicable for weak-ordered memory model archs, 3176 * such as IA-64). 3177 */ 3178 wmb(); 3179 3180 writel(tx_ring->next_to_use, tx_ring->tail); 3181 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3182 3183 return NETDEV_TX_OK; 3184 3185out_drop: 3186 dev_kfree_skb_any(first->skb); 3187 first->skb = NULL; 3188 3189 return NETDEV_TX_OK; 3190} 3191 3192/** 3193 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3194 * @netdev: network interface device structure 3195 * @p: pointer to an address structure 3196 * 3197 * Returns 0 on success, negative on failure 3198 **/ 3199static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3200{ 3201 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3202 struct ixgbe_hw *hw = &adapter->hw; 3203 struct sockaddr *addr = p; 3204 3205 if (!is_valid_ether_addr(addr->sa_data)) 3206 return -EADDRNOTAVAIL; 3207 3208 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3209 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3210 3211 spin_lock_bh(&adapter->mbx_lock); 3212 3213 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3214 3215 spin_unlock_bh(&adapter->mbx_lock); 3216 3217 return 0; 3218} 3219 3220/** 3221 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3222 * @netdev: network interface device structure 3223 * @new_mtu: new value for maximum frame size 3224 * 3225 * Returns 0 on success, negative on failure 3226 **/ 3227static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3228{ 3229 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3230 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3231 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3232 3233 switch (adapter->hw.api_version) { 3234 case ixgbe_mbox_api_11: 3235 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3236 break; 3237 default: 3238 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3239 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3240 break; 3241 } 3242 3243 /* MTU < 68 is an error and causes problems on some kernels */ 3244 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3245 return -EINVAL; 3246 3247 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3248 netdev->mtu, new_mtu); 3249 /* must set new MTU before calling down or up */ 3250 netdev->mtu = new_mtu; 3251 3252 if (netif_running(netdev)) 3253 ixgbevf_reinit_locked(adapter); 3254 3255 return 0; 3256} 3257 3258static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3259{ 3260 struct net_device *netdev = pci_get_drvdata(pdev); 3261 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3262#ifdef CONFIG_PM 3263 int retval = 0; 3264#endif 3265 3266 netif_device_detach(netdev); 3267 3268 if (netif_running(netdev)) { 3269 rtnl_lock(); 3270 ixgbevf_down(adapter); 3271 ixgbevf_free_irq(adapter); 3272 ixgbevf_free_all_tx_resources(adapter); 3273 ixgbevf_free_all_rx_resources(adapter); 3274 rtnl_unlock(); 3275 } 3276 3277 ixgbevf_clear_interrupt_scheme(adapter); 3278 3279#ifdef CONFIG_PM 3280 retval = pci_save_state(pdev); 3281 if (retval) 3282 return retval; 3283 3284#endif 3285 pci_disable_device(pdev); 3286 3287 return 0; 3288} 3289 3290#ifdef CONFIG_PM 3291static int ixgbevf_resume(struct pci_dev *pdev) 3292{ 3293 struct net_device *netdev = pci_get_drvdata(pdev); 3294 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3295 u32 err; 3296 3297 pci_set_power_state(pdev, PCI_D0); 3298 pci_restore_state(pdev); 3299 /* 3300 * pci_restore_state clears dev->state_saved so call 3301 * pci_save_state to restore it. 3302 */ 3303 pci_save_state(pdev); 3304 3305 err = pci_enable_device_mem(pdev); 3306 if (err) { 3307 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3308 return err; 3309 } 3310 pci_set_master(pdev); 3311 3312 ixgbevf_reset(adapter); 3313 3314 rtnl_lock(); 3315 err = ixgbevf_init_interrupt_scheme(adapter); 3316 rtnl_unlock(); 3317 if (err) { 3318 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3319 return err; 3320 } 3321 3322 if (netif_running(netdev)) { 3323 err = ixgbevf_open(netdev); 3324 if (err) 3325 return err; 3326 } 3327 3328 netif_device_attach(netdev); 3329 3330 return err; 3331} 3332 3333#endif /* CONFIG_PM */ 3334static void ixgbevf_shutdown(struct pci_dev *pdev) 3335{ 3336 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3337} 3338 3339static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3340 struct rtnl_link_stats64 *stats) 3341{ 3342 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3343 unsigned int start; 3344 u64 bytes, packets; 3345 const struct ixgbevf_ring *ring; 3346 int i; 3347 3348 ixgbevf_update_stats(adapter); 3349 3350 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3351 3352 for (i = 0; i < adapter->num_rx_queues; i++) { 3353 ring = adapter->rx_ring[i]; 3354 do { 3355 start = u64_stats_fetch_begin_bh(&ring->syncp); 3356 bytes = ring->stats.bytes; 3357 packets = ring->stats.packets; 3358 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3359 stats->rx_bytes += bytes; 3360 stats->rx_packets += packets; 3361 } 3362 3363 for (i = 0; i < adapter->num_tx_queues; i++) { 3364 ring = adapter->tx_ring[i]; 3365 do { 3366 start = u64_stats_fetch_begin_bh(&ring->syncp); 3367 bytes = ring->stats.bytes; 3368 packets = ring->stats.packets; 3369 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3370 stats->tx_bytes += bytes; 3371 stats->tx_packets += packets; 3372 } 3373 3374 return stats; 3375} 3376 3377static const struct net_device_ops ixgbevf_netdev_ops = { 3378 .ndo_open = ixgbevf_open, 3379 .ndo_stop = ixgbevf_close, 3380 .ndo_start_xmit = ixgbevf_xmit_frame, 3381 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3382 .ndo_get_stats64 = ixgbevf_get_stats, 3383 .ndo_validate_addr = eth_validate_addr, 3384 .ndo_set_mac_address = ixgbevf_set_mac, 3385 .ndo_change_mtu = ixgbevf_change_mtu, 3386 .ndo_tx_timeout = ixgbevf_tx_timeout, 3387 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3388 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3389#ifdef CONFIG_NET_RX_BUSY_POLL 3390 .ndo_busy_poll = ixgbevf_busy_poll_recv, 3391#endif 3392}; 3393 3394static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3395{ 3396 dev->netdev_ops = &ixgbevf_netdev_ops; 3397 ixgbevf_set_ethtool_ops(dev); 3398 dev->watchdog_timeo = 5 * HZ; 3399} 3400 3401/** 3402 * ixgbevf_probe - Device Initialization Routine 3403 * @pdev: PCI device information struct 3404 * @ent: entry in ixgbevf_pci_tbl 3405 * 3406 * Returns 0 on success, negative on failure 3407 * 3408 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3409 * The OS initialization, configuring of the adapter private structure, 3410 * and a hardware reset occur. 3411 **/ 3412static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3413{ 3414 struct net_device *netdev; 3415 struct ixgbevf_adapter *adapter = NULL; 3416 struct ixgbe_hw *hw = NULL; 3417 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3418 static int cards_found; 3419 int err, pci_using_dac; 3420 3421 err = pci_enable_device(pdev); 3422 if (err) 3423 return err; 3424 3425 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { 3426 pci_using_dac = 1; 3427 } else { 3428 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3429 if (err) { 3430 dev_err(&pdev->dev, "No usable DMA " 3431 "configuration, aborting\n"); 3432 goto err_dma; 3433 } 3434 pci_using_dac = 0; 3435 } 3436 3437 err = pci_request_regions(pdev, ixgbevf_driver_name); 3438 if (err) { 3439 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3440 goto err_pci_reg; 3441 } 3442 3443 pci_set_master(pdev); 3444 3445 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3446 MAX_TX_QUEUES); 3447 if (!netdev) { 3448 err = -ENOMEM; 3449 goto err_alloc_etherdev; 3450 } 3451 3452 SET_NETDEV_DEV(netdev, &pdev->dev); 3453 3454 pci_set_drvdata(pdev, netdev); 3455 adapter = netdev_priv(netdev); 3456 3457 adapter->netdev = netdev; 3458 adapter->pdev = pdev; 3459 hw = &adapter->hw; 3460 hw->back = adapter; 3461 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3462 3463 /* 3464 * call save state here in standalone driver because it relies on 3465 * adapter struct to exist, and needs to call netdev_priv 3466 */ 3467 pci_save_state(pdev); 3468 3469 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3470 pci_resource_len(pdev, 0)); 3471 if (!hw->hw_addr) { 3472 err = -EIO; 3473 goto err_ioremap; 3474 } 3475 3476 ixgbevf_assign_netdev_ops(netdev); 3477 3478 adapter->bd_number = cards_found; 3479 3480 /* Setup hw api */ 3481 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3482 hw->mac.type = ii->mac; 3483 3484 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3485 sizeof(struct ixgbe_mbx_operations)); 3486 3487 /* setup the private structure */ 3488 err = ixgbevf_sw_init(adapter); 3489 if (err) 3490 goto err_sw_init; 3491 3492 /* The HW MAC address was set and/or determined in sw_init */ 3493 if (!is_valid_ether_addr(netdev->dev_addr)) { 3494 pr_err("invalid MAC address\n"); 3495 err = -EIO; 3496 goto err_sw_init; 3497 } 3498 3499 netdev->hw_features = NETIF_F_SG | 3500 NETIF_F_IP_CSUM | 3501 NETIF_F_IPV6_CSUM | 3502 NETIF_F_TSO | 3503 NETIF_F_TSO6 | 3504 NETIF_F_RXCSUM; 3505 3506 netdev->features = netdev->hw_features | 3507 NETIF_F_HW_VLAN_CTAG_TX | 3508 NETIF_F_HW_VLAN_CTAG_RX | 3509 NETIF_F_HW_VLAN_CTAG_FILTER; 3510 3511 netdev->vlan_features |= NETIF_F_TSO; 3512 netdev->vlan_features |= NETIF_F_TSO6; 3513 netdev->vlan_features |= NETIF_F_IP_CSUM; 3514 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3515 netdev->vlan_features |= NETIF_F_SG; 3516 3517 if (pci_using_dac) 3518 netdev->features |= NETIF_F_HIGHDMA; 3519 3520 netdev->priv_flags |= IFF_UNICAST_FLT; 3521 3522 init_timer(&adapter->watchdog_timer); 3523 adapter->watchdog_timer.function = ixgbevf_watchdog; 3524 adapter->watchdog_timer.data = (unsigned long)adapter; 3525 3526 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3527 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3528 3529 err = ixgbevf_init_interrupt_scheme(adapter); 3530 if (err) 3531 goto err_sw_init; 3532 3533 strcpy(netdev->name, "eth%d"); 3534 3535 err = register_netdev(netdev); 3536 if (err) 3537 goto err_register; 3538 3539 netif_carrier_off(netdev); 3540 3541 ixgbevf_init_last_counter_stats(adapter); 3542 3543 /* print the MAC address */ 3544 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3545 3546 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3547 3548 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3549 cards_found++; 3550 return 0; 3551 3552err_register: 3553 ixgbevf_clear_interrupt_scheme(adapter); 3554err_sw_init: 3555 ixgbevf_reset_interrupt_capability(adapter); 3556 iounmap(hw->hw_addr); 3557err_ioremap: 3558 free_netdev(netdev); 3559err_alloc_etherdev: 3560 pci_release_regions(pdev); 3561err_pci_reg: 3562err_dma: 3563 pci_disable_device(pdev); 3564 return err; 3565} 3566 3567/** 3568 * ixgbevf_remove - Device Removal Routine 3569 * @pdev: PCI device information struct 3570 * 3571 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3572 * that it should release a PCI device. The could be caused by a 3573 * Hot-Plug event, or because the driver is going to be removed from 3574 * memory. 3575 **/ 3576static void ixgbevf_remove(struct pci_dev *pdev) 3577{ 3578 struct net_device *netdev = pci_get_drvdata(pdev); 3579 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3580 3581 set_bit(__IXGBEVF_DOWN, &adapter->state); 3582 3583 del_timer_sync(&adapter->watchdog_timer); 3584 3585 cancel_work_sync(&adapter->reset_task); 3586 cancel_work_sync(&adapter->watchdog_task); 3587 3588 if (netdev->reg_state == NETREG_REGISTERED) 3589 unregister_netdev(netdev); 3590 3591 ixgbevf_clear_interrupt_scheme(adapter); 3592 ixgbevf_reset_interrupt_capability(adapter); 3593 3594 iounmap(adapter->hw.hw_addr); 3595 pci_release_regions(pdev); 3596 3597 hw_dbg(&adapter->hw, "Remove complete\n"); 3598 3599 free_netdev(netdev); 3600 3601 pci_disable_device(pdev); 3602} 3603 3604/** 3605 * ixgbevf_io_error_detected - called when PCI error is detected 3606 * @pdev: Pointer to PCI device 3607 * @state: The current pci connection state 3608 * 3609 * This function is called after a PCI bus error affecting 3610 * this device has been detected. 3611 */ 3612static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3613 pci_channel_state_t state) 3614{ 3615 struct net_device *netdev = pci_get_drvdata(pdev); 3616 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3617 3618 netif_device_detach(netdev); 3619 3620 if (state == pci_channel_io_perm_failure) 3621 return PCI_ERS_RESULT_DISCONNECT; 3622 3623 if (netif_running(netdev)) 3624 ixgbevf_down(adapter); 3625 3626 pci_disable_device(pdev); 3627 3628 /* Request a slot slot reset. */ 3629 return PCI_ERS_RESULT_NEED_RESET; 3630} 3631 3632/** 3633 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3634 * @pdev: Pointer to PCI device 3635 * 3636 * Restart the card from scratch, as if from a cold-boot. Implementation 3637 * resembles the first-half of the ixgbevf_resume routine. 3638 */ 3639static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3640{ 3641 struct net_device *netdev = pci_get_drvdata(pdev); 3642 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3643 3644 if (pci_enable_device_mem(pdev)) { 3645 dev_err(&pdev->dev, 3646 "Cannot re-enable PCI device after reset.\n"); 3647 return PCI_ERS_RESULT_DISCONNECT; 3648 } 3649 3650 pci_set_master(pdev); 3651 3652 ixgbevf_reset(adapter); 3653 3654 return PCI_ERS_RESULT_RECOVERED; 3655} 3656 3657/** 3658 * ixgbevf_io_resume - called when traffic can start flowing again. 3659 * @pdev: Pointer to PCI device 3660 * 3661 * This callback is called when the error recovery driver tells us that 3662 * its OK to resume normal operation. Implementation resembles the 3663 * second-half of the ixgbevf_resume routine. 3664 */ 3665static void ixgbevf_io_resume(struct pci_dev *pdev) 3666{ 3667 struct net_device *netdev = pci_get_drvdata(pdev); 3668 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3669 3670 if (netif_running(netdev)) 3671 ixgbevf_up(adapter); 3672 3673 netif_device_attach(netdev); 3674} 3675 3676/* PCI Error Recovery (ERS) */ 3677static const struct pci_error_handlers ixgbevf_err_handler = { 3678 .error_detected = ixgbevf_io_error_detected, 3679 .slot_reset = ixgbevf_io_slot_reset, 3680 .resume = ixgbevf_io_resume, 3681}; 3682 3683static struct pci_driver ixgbevf_driver = { 3684 .name = ixgbevf_driver_name, 3685 .id_table = ixgbevf_pci_tbl, 3686 .probe = ixgbevf_probe, 3687 .remove = ixgbevf_remove, 3688#ifdef CONFIG_PM 3689 /* Power Management Hooks */ 3690 .suspend = ixgbevf_suspend, 3691 .resume = ixgbevf_resume, 3692#endif 3693 .shutdown = ixgbevf_shutdown, 3694 .err_handler = &ixgbevf_err_handler 3695}; 3696 3697/** 3698 * ixgbevf_init_module - Driver Registration Routine 3699 * 3700 * ixgbevf_init_module is the first routine called when the driver is 3701 * loaded. All it does is register with the PCI subsystem. 3702 **/ 3703static int __init ixgbevf_init_module(void) 3704{ 3705 int ret; 3706 pr_info("%s - version %s\n", ixgbevf_driver_string, 3707 ixgbevf_driver_version); 3708 3709 pr_info("%s\n", ixgbevf_copyright); 3710 3711 ret = pci_register_driver(&ixgbevf_driver); 3712 return ret; 3713} 3714 3715module_init(ixgbevf_init_module); 3716 3717/** 3718 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3719 * 3720 * ixgbevf_exit_module is called just before the driver is removed 3721 * from memory. 3722 **/ 3723static void __exit ixgbevf_exit_module(void) 3724{ 3725 pci_unregister_driver(&ixgbevf_driver); 3726} 3727 3728#ifdef DEBUG 3729/** 3730 * ixgbevf_get_hw_dev_name - return device name string 3731 * used by hardware layer to print debugging information 3732 **/ 3733char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3734{ 3735 struct ixgbevf_adapter *adapter = hw->back; 3736 return adapter->netdev->name; 3737} 3738 3739#endif 3740module_exit(ixgbevf_exit_module); 3741 3742/* ixgbevf_main.c */ 3743