ixgbevf_main.c revision 4b2cd27f834e526f933aa63ce91964b7581271f4
1/******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28 29/****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31******************************************************************************/ 32 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35#include <linux/types.h> 36#include <linux/bitops.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/vmalloc.h> 41#include <linux/string.h> 42#include <linux/in.h> 43#include <linux/ip.h> 44#include <linux/tcp.h> 45#include <linux/sctp.h> 46#include <linux/ipv6.h> 47#include <linux/slab.h> 48#include <net/checksum.h> 49#include <net/ip6_checksum.h> 50#include <linux/ethtool.h> 51#include <linux/if.h> 52#include <linux/if_vlan.h> 53#include <linux/prefetch.h> 54 55#include "ixgbevf.h" 56 57const char ixgbevf_driver_name[] = "ixgbevf"; 58static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61#define DRV_VERSION "2.6.0-k" 62const char ixgbevf_driver_version[] = DRV_VERSION; 63static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69}; 70 71/* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79static struct pci_device_id ixgbevf_pci_tbl[] = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 81 board_82599_vf}, 82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 83 board_X540_vf}, 84 85 /* required last entry */ 86 {0, } 87}; 88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 89 90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 92MODULE_LICENSE("GPL"); 93MODULE_VERSION(DRV_VERSION); 94 95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 96static int debug = -1; 97module_param(debug, int, 0); 98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99 100/* forward decls */ 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 102 103static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 104 struct ixgbevf_ring *rx_ring, 105 u32 val) 106{ 107 /* 108 * Force memory writes to complete before letting h/w 109 * know there are new descriptors to fetch. (Only 110 * applicable for weak-ordered memory model archs, 111 * such as IA-64). 112 */ 113 wmb(); 114 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 115} 116 117/** 118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 119 * @adapter: pointer to adapter struct 120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 121 * @queue: queue to map the corresponding interrupt to 122 * @msix_vector: the vector to map to the corresponding queue 123 * 124 */ 125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 126 u8 queue, u8 msix_vector) 127{ 128 u32 ivar, index; 129 struct ixgbe_hw *hw = &adapter->hw; 130 if (direction == -1) { 131 /* other causes */ 132 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 134 ivar &= ~0xFF; 135 ivar |= msix_vector; 136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 137 } else { 138 /* tx or rx causes */ 139 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 140 index = ((16 * (queue & 1)) + (8 * direction)); 141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 142 ivar &= ~(0xFF << index); 143 ivar |= (msix_vector << index); 144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 145 } 146} 147 148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 149 struct ixgbevf_tx_buffer 150 *tx_buffer_info) 151{ 152 if (tx_buffer_info->dma) { 153 if (tx_buffer_info->mapped_as_page) 154 dma_unmap_page(tx_ring->dev, 155 tx_buffer_info->dma, 156 tx_buffer_info->length, 157 DMA_TO_DEVICE); 158 else 159 dma_unmap_single(tx_ring->dev, 160 tx_buffer_info->dma, 161 tx_buffer_info->length, 162 DMA_TO_DEVICE); 163 tx_buffer_info->dma = 0; 164 } 165 if (tx_buffer_info->skb) { 166 dev_kfree_skb_any(tx_buffer_info->skb); 167 tx_buffer_info->skb = NULL; 168 } 169 tx_buffer_info->time_stamp = 0; 170 /* tx_buffer_info must be completely set up in the transmit path */ 171} 172 173#define IXGBE_MAX_TXD_PWR 14 174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 175 176/* Tx Descriptors needed, worst case */ 177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 178#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 179 180static void ixgbevf_tx_timeout(struct net_device *netdev); 181 182/** 183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 184 * @q_vector: board private structure 185 * @tx_ring: tx ring to clean 186 **/ 187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 188 struct ixgbevf_ring *tx_ring) 189{ 190 struct ixgbevf_adapter *adapter = q_vector->adapter; 191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 192 struct ixgbevf_tx_buffer *tx_buffer_info; 193 unsigned int i, eop, count = 0; 194 unsigned int total_bytes = 0, total_packets = 0; 195 196 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 197 return true; 198 199 i = tx_ring->next_to_clean; 200 eop = tx_ring->tx_buffer_info[i].next_to_watch; 201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 202 203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 204 (count < tx_ring->count)) { 205 bool cleaned = false; 206 rmb(); /* read buffer_info after eop_desc */ 207 /* eop could change between read and DD-check */ 208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 209 goto cont_loop; 210 for ( ; !cleaned; count++) { 211 struct sk_buff *skb; 212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 213 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 214 cleaned = (i == eop); 215 skb = tx_buffer_info->skb; 216 217 if (cleaned && skb) { 218 unsigned int segs, bytecount; 219 220 /* gso_segs is currently only valid for tcp */ 221 segs = skb_shinfo(skb)->gso_segs ?: 1; 222 /* multiply data chunks by size of headers */ 223 bytecount = ((segs - 1) * skb_headlen(skb)) + 224 skb->len; 225 total_packets += segs; 226 total_bytes += bytecount; 227 } 228 229 ixgbevf_unmap_and_free_tx_resource(tx_ring, 230 tx_buffer_info); 231 232 tx_desc->wb.status = 0; 233 234 i++; 235 if (i == tx_ring->count) 236 i = 0; 237 } 238 239cont_loop: 240 eop = tx_ring->tx_buffer_info[i].next_to_watch; 241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 242 } 243 244 tx_ring->next_to_clean = i; 245 246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 249 /* Make sure that anybody stopping the queue after this 250 * sees the new next_to_clean. 251 */ 252 smp_mb(); 253 if (__netif_subqueue_stopped(tx_ring->netdev, 254 tx_ring->queue_index) && 255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 256 netif_wake_subqueue(tx_ring->netdev, 257 tx_ring->queue_index); 258 ++adapter->restart_queue; 259 } 260 } 261 262 u64_stats_update_begin(&tx_ring->syncp); 263 tx_ring->total_bytes += total_bytes; 264 tx_ring->total_packets += total_packets; 265 u64_stats_update_end(&tx_ring->syncp); 266 267 return count < tx_ring->count; 268} 269 270/** 271 * ixgbevf_receive_skb - Send a completed packet up the stack 272 * @q_vector: structure containing interrupt and ring information 273 * @skb: packet to send up 274 * @status: hardware indication of status of receive 275 * @rx_ring: rx descriptor ring (for a specific queue) to setup 276 * @rx_desc: rx descriptor 277 **/ 278static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 279 struct sk_buff *skb, u8 status, 280 struct ixgbevf_ring *ring, 281 union ixgbe_adv_rx_desc *rx_desc) 282{ 283 struct ixgbevf_adapter *adapter = q_vector->adapter; 284 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 286 287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 288 __vlan_hwaccel_put_tag(skb, tag); 289 290 napi_gro_receive(&q_vector->napi, skb); 291} 292 293/** 294 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 295 * @adapter: address of board private structure 296 * @status_err: hardware indication of status of receive 297 * @skb: skb currently being received and modified 298 **/ 299static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 300 struct ixgbevf_ring *ring, 301 u32 status_err, struct sk_buff *skb) 302{ 303 skb_checksum_none_assert(skb); 304 305 /* Rx csum disabled */ 306 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 307 return; 308 309 /* if IP and error */ 310 if ((status_err & IXGBE_RXD_STAT_IPCS) && 311 (status_err & IXGBE_RXDADV_ERR_IPE)) { 312 adapter->hw_csum_rx_error++; 313 return; 314 } 315 316 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 317 return; 318 319 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 320 adapter->hw_csum_rx_error++; 321 return; 322 } 323 324 /* It must be a TCP or UDP packet with a valid checksum */ 325 skb->ip_summed = CHECKSUM_UNNECESSARY; 326 adapter->hw_csum_rx_good++; 327} 328 329/** 330 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 331 * @adapter: address of board private structure 332 **/ 333static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 334 struct ixgbevf_ring *rx_ring, 335 int cleaned_count) 336{ 337 struct pci_dev *pdev = adapter->pdev; 338 union ixgbe_adv_rx_desc *rx_desc; 339 struct ixgbevf_rx_buffer *bi; 340 struct sk_buff *skb; 341 unsigned int i = rx_ring->next_to_use; 342 343 bi = &rx_ring->rx_buffer_info[i]; 344 345 while (cleaned_count--) { 346 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 347 skb = bi->skb; 348 if (!skb) { 349 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 350 rx_ring->rx_buf_len); 351 if (!skb) { 352 adapter->alloc_rx_buff_failed++; 353 goto no_buffers; 354 } 355 bi->skb = skb; 356 } 357 if (!bi->dma) { 358 bi->dma = dma_map_single(&pdev->dev, skb->data, 359 rx_ring->rx_buf_len, 360 DMA_FROM_DEVICE); 361 } 362 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 363 364 i++; 365 if (i == rx_ring->count) 366 i = 0; 367 bi = &rx_ring->rx_buffer_info[i]; 368 } 369 370no_buffers: 371 if (rx_ring->next_to_use != i) { 372 rx_ring->next_to_use = i; 373 374 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 375 } 376} 377 378static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 379 u32 qmask) 380{ 381 struct ixgbe_hw *hw = &adapter->hw; 382 383 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 384} 385 386static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 387 struct ixgbevf_ring *rx_ring, 388 int budget) 389{ 390 struct ixgbevf_adapter *adapter = q_vector->adapter; 391 struct pci_dev *pdev = adapter->pdev; 392 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 393 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 394 struct sk_buff *skb; 395 unsigned int i; 396 u32 len, staterr; 397 int cleaned_count = 0; 398 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 399 400 i = rx_ring->next_to_clean; 401 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 402 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 403 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 404 405 while (staterr & IXGBE_RXD_STAT_DD) { 406 if (!budget) 407 break; 408 budget--; 409 410 rmb(); /* read descriptor and rx_buffer_info after status DD */ 411 len = le16_to_cpu(rx_desc->wb.upper.length); 412 skb = rx_buffer_info->skb; 413 prefetch(skb->data - NET_IP_ALIGN); 414 rx_buffer_info->skb = NULL; 415 416 if (rx_buffer_info->dma) { 417 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 418 rx_ring->rx_buf_len, 419 DMA_FROM_DEVICE); 420 rx_buffer_info->dma = 0; 421 skb_put(skb, len); 422 } 423 424 i++; 425 if (i == rx_ring->count) 426 i = 0; 427 428 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 429 prefetch(next_rxd); 430 cleaned_count++; 431 432 next_buffer = &rx_ring->rx_buffer_info[i]; 433 434 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 435 skb->next = next_buffer->skb; 436 skb->next->prev = skb; 437 adapter->non_eop_descs++; 438 goto next_desc; 439 } 440 441 /* ERR_MASK will only have valid bits if EOP set */ 442 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 443 dev_kfree_skb_irq(skb); 444 goto next_desc; 445 } 446 447 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); 448 449 /* probably a little skewed due to removing CRC */ 450 total_rx_bytes += skb->len; 451 total_rx_packets++; 452 453 /* 454 * Work around issue of some types of VM to VM loop back 455 * packets not getting split correctly 456 */ 457 if (staterr & IXGBE_RXD_STAT_LB) { 458 u32 header_fixup_len = skb_headlen(skb); 459 if (header_fixup_len < 14) 460 skb_push(skb, header_fixup_len); 461 } 462 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 463 464 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 465 466next_desc: 467 rx_desc->wb.upper.status_error = 0; 468 469 /* return some buffers to hardware, one at a time is too slow */ 470 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 471 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 472 cleaned_count); 473 cleaned_count = 0; 474 } 475 476 /* use prefetched values */ 477 rx_desc = next_rxd; 478 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 479 480 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 481 } 482 483 rx_ring->next_to_clean = i; 484 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 485 486 if (cleaned_count) 487 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 488 489 u64_stats_update_begin(&rx_ring->syncp); 490 rx_ring->total_packets += total_rx_packets; 491 rx_ring->total_bytes += total_rx_bytes; 492 u64_stats_update_end(&rx_ring->syncp); 493 494 return !!budget; 495} 496 497/** 498 * ixgbevf_poll - NAPI polling calback 499 * @napi: napi struct with our devices info in it 500 * @budget: amount of work driver is allowed to do this pass, in packets 501 * 502 * This function will clean more than one or more rings associated with a 503 * q_vector. 504 **/ 505static int ixgbevf_poll(struct napi_struct *napi, int budget) 506{ 507 struct ixgbevf_q_vector *q_vector = 508 container_of(napi, struct ixgbevf_q_vector, napi); 509 struct ixgbevf_adapter *adapter = q_vector->adapter; 510 struct ixgbevf_ring *ring; 511 int per_ring_budget; 512 bool clean_complete = true; 513 514 ixgbevf_for_each_ring(ring, q_vector->tx) 515 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 516 517 /* attempt to distribute budget to each queue fairly, but don't allow 518 * the budget to go below 1 because we'll exit polling */ 519 if (q_vector->rx.count > 1) 520 per_ring_budget = max(budget/q_vector->rx.count, 1); 521 else 522 per_ring_budget = budget; 523 524 ixgbevf_for_each_ring(ring, q_vector->rx) 525 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 526 per_ring_budget); 527 528 /* If all work not completed, return budget and keep polling */ 529 if (!clean_complete) 530 return budget; 531 /* all work done, exit the polling mode */ 532 napi_complete(napi); 533 if (adapter->rx_itr_setting & 1) 534 ixgbevf_set_itr(q_vector); 535 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 536 ixgbevf_irq_enable_queues(adapter, 537 1 << q_vector->v_idx); 538 539 return 0; 540} 541 542/** 543 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 544 * @q_vector: structure containing interrupt and ring information 545 */ 546static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 547{ 548 struct ixgbevf_adapter *adapter = q_vector->adapter; 549 struct ixgbe_hw *hw = &adapter->hw; 550 int v_idx = q_vector->v_idx; 551 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 552 553 /* 554 * set the WDIS bit to not clear the timer bits and cause an 555 * immediate assertion of the interrupt 556 */ 557 itr_reg |= IXGBE_EITR_CNT_WDIS; 558 559 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 560} 561 562/** 563 * ixgbevf_configure_msix - Configure MSI-X hardware 564 * @adapter: board private structure 565 * 566 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 567 * interrupts. 568 **/ 569static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 570{ 571 struct ixgbevf_q_vector *q_vector; 572 int q_vectors, v_idx; 573 574 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 575 adapter->eims_enable_mask = 0; 576 577 /* 578 * Populate the IVAR table and set the ITR values to the 579 * corresponding register. 580 */ 581 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 582 struct ixgbevf_ring *ring; 583 q_vector = adapter->q_vector[v_idx]; 584 585 ixgbevf_for_each_ring(ring, q_vector->rx) 586 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 587 588 ixgbevf_for_each_ring(ring, q_vector->tx) 589 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 590 591 if (q_vector->tx.ring && !q_vector->rx.ring) { 592 /* tx only vector */ 593 if (adapter->tx_itr_setting == 1) 594 q_vector->itr = IXGBE_10K_ITR; 595 else 596 q_vector->itr = adapter->tx_itr_setting; 597 } else { 598 /* rx or rx/tx vector */ 599 if (adapter->rx_itr_setting == 1) 600 q_vector->itr = IXGBE_20K_ITR; 601 else 602 q_vector->itr = adapter->rx_itr_setting; 603 } 604 605 /* add q_vector eims value to global eims_enable_mask */ 606 adapter->eims_enable_mask |= 1 << v_idx; 607 608 ixgbevf_write_eitr(q_vector); 609 } 610 611 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 612 /* setup eims_other and add value to global eims_enable_mask */ 613 adapter->eims_other = 1 << v_idx; 614 adapter->eims_enable_mask |= adapter->eims_other; 615} 616 617enum latency_range { 618 lowest_latency = 0, 619 low_latency = 1, 620 bulk_latency = 2, 621 latency_invalid = 255 622}; 623 624/** 625 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 626 * @q_vector: structure containing interrupt and ring information 627 * @ring_container: structure containing ring performance data 628 * 629 * Stores a new ITR value based on packets and byte 630 * counts during the last interrupt. The advantage of per interrupt 631 * computation is faster updates and more accurate ITR for the current 632 * traffic pattern. Constants in this function were computed 633 * based on theoretical maximum wire speed and thresholds were set based 634 * on testing data as well as attempting to minimize response time 635 * while increasing bulk throughput. 636 **/ 637static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 638 struct ixgbevf_ring_container *ring_container) 639{ 640 int bytes = ring_container->total_bytes; 641 int packets = ring_container->total_packets; 642 u32 timepassed_us; 643 u64 bytes_perint; 644 u8 itr_setting = ring_container->itr; 645 646 if (packets == 0) 647 return; 648 649 /* simple throttlerate management 650 * 0-20MB/s lowest (100000 ints/s) 651 * 20-100MB/s low (20000 ints/s) 652 * 100-1249MB/s bulk (8000 ints/s) 653 */ 654 /* what was last interrupt timeslice? */ 655 timepassed_us = q_vector->itr >> 2; 656 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 657 658 switch (itr_setting) { 659 case lowest_latency: 660 if (bytes_perint > 10) 661 itr_setting = low_latency; 662 break; 663 case low_latency: 664 if (bytes_perint > 20) 665 itr_setting = bulk_latency; 666 else if (bytes_perint <= 10) 667 itr_setting = lowest_latency; 668 break; 669 case bulk_latency: 670 if (bytes_perint <= 20) 671 itr_setting = low_latency; 672 break; 673 } 674 675 /* clear work counters since we have the values we need */ 676 ring_container->total_bytes = 0; 677 ring_container->total_packets = 0; 678 679 /* write updated itr to ring container */ 680 ring_container->itr = itr_setting; 681} 682 683static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 684{ 685 u32 new_itr = q_vector->itr; 686 u8 current_itr; 687 688 ixgbevf_update_itr(q_vector, &q_vector->tx); 689 ixgbevf_update_itr(q_vector, &q_vector->rx); 690 691 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 692 693 switch (current_itr) { 694 /* counts and packets in update_itr are dependent on these numbers */ 695 case lowest_latency: 696 new_itr = IXGBE_100K_ITR; 697 break; 698 case low_latency: 699 new_itr = IXGBE_20K_ITR; 700 break; 701 case bulk_latency: 702 default: 703 new_itr = IXGBE_8K_ITR; 704 break; 705 } 706 707 if (new_itr != q_vector->itr) { 708 /* do an exponential smoothing */ 709 new_itr = (10 * new_itr * q_vector->itr) / 710 ((9 * new_itr) + q_vector->itr); 711 712 /* save the algorithm value here */ 713 q_vector->itr = new_itr; 714 715 ixgbevf_write_eitr(q_vector); 716 } 717} 718 719static irqreturn_t ixgbevf_msix_other(int irq, void *data) 720{ 721 struct ixgbevf_adapter *adapter = data; 722 struct ixgbe_hw *hw = &adapter->hw; 723 724 hw->mac.get_link_status = 1; 725 726 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 727 mod_timer(&adapter->watchdog_timer, jiffies); 728 729 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 730 731 return IRQ_HANDLED; 732} 733 734 735/** 736 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 737 * @irq: unused 738 * @data: pointer to our q_vector struct for this interrupt vector 739 **/ 740static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 741{ 742 struct ixgbevf_q_vector *q_vector = data; 743 744 /* EIAM disabled interrupts (on this vector) for us */ 745 if (q_vector->rx.ring || q_vector->tx.ring) 746 napi_schedule(&q_vector->napi); 747 748 return IRQ_HANDLED; 749} 750 751static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 752 int r_idx) 753{ 754 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 755 756 a->rx_ring[r_idx].next = q_vector->rx.ring; 757 q_vector->rx.ring = &a->rx_ring[r_idx]; 758 q_vector->rx.count++; 759} 760 761static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 762 int t_idx) 763{ 764 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 765 766 a->tx_ring[t_idx].next = q_vector->tx.ring; 767 q_vector->tx.ring = &a->tx_ring[t_idx]; 768 q_vector->tx.count++; 769} 770 771/** 772 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 773 * @adapter: board private structure to initialize 774 * 775 * This function maps descriptor rings to the queue-specific vectors 776 * we were allotted through the MSI-X enabling code. Ideally, we'd have 777 * one vector per ring/queue, but on a constrained vector budget, we 778 * group the rings as "efficiently" as possible. You would add new 779 * mapping configurations in here. 780 **/ 781static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 782{ 783 int q_vectors; 784 int v_start = 0; 785 int rxr_idx = 0, txr_idx = 0; 786 int rxr_remaining = adapter->num_rx_queues; 787 int txr_remaining = adapter->num_tx_queues; 788 int i, j; 789 int rqpv, tqpv; 790 int err = 0; 791 792 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 793 794 /* 795 * The ideal configuration... 796 * We have enough vectors to map one per queue. 797 */ 798 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 799 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 800 map_vector_to_rxq(adapter, v_start, rxr_idx); 801 802 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 803 map_vector_to_txq(adapter, v_start, txr_idx); 804 goto out; 805 } 806 807 /* 808 * If we don't have enough vectors for a 1-to-1 809 * mapping, we'll have to group them so there are 810 * multiple queues per vector. 811 */ 812 /* Re-adjusting *qpv takes care of the remainder. */ 813 for (i = v_start; i < q_vectors; i++) { 814 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 815 for (j = 0; j < rqpv; j++) { 816 map_vector_to_rxq(adapter, i, rxr_idx); 817 rxr_idx++; 818 rxr_remaining--; 819 } 820 } 821 for (i = v_start; i < q_vectors; i++) { 822 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 823 for (j = 0; j < tqpv; j++) { 824 map_vector_to_txq(adapter, i, txr_idx); 825 txr_idx++; 826 txr_remaining--; 827 } 828 } 829 830out: 831 return err; 832} 833 834/** 835 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 836 * @adapter: board private structure 837 * 838 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 839 * interrupts from the kernel. 840 **/ 841static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 842{ 843 struct net_device *netdev = adapter->netdev; 844 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 845 int vector, err; 846 int ri = 0, ti = 0; 847 848 for (vector = 0; vector < q_vectors; vector++) { 849 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 850 struct msix_entry *entry = &adapter->msix_entries[vector]; 851 852 if (q_vector->tx.ring && q_vector->rx.ring) { 853 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 854 "%s-%s-%d", netdev->name, "TxRx", ri++); 855 ti++; 856 } else if (q_vector->rx.ring) { 857 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 858 "%s-%s-%d", netdev->name, "rx", ri++); 859 } else if (q_vector->tx.ring) { 860 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 861 "%s-%s-%d", netdev->name, "tx", ti++); 862 } else { 863 /* skip this unused q_vector */ 864 continue; 865 } 866 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 867 q_vector->name, q_vector); 868 if (err) { 869 hw_dbg(&adapter->hw, 870 "request_irq failed for MSIX interrupt " 871 "Error: %d\n", err); 872 goto free_queue_irqs; 873 } 874 } 875 876 err = request_irq(adapter->msix_entries[vector].vector, 877 &ixgbevf_msix_other, 0, netdev->name, adapter); 878 if (err) { 879 hw_dbg(&adapter->hw, 880 "request_irq for msix_other failed: %d\n", err); 881 goto free_queue_irqs; 882 } 883 884 return 0; 885 886free_queue_irqs: 887 while (vector) { 888 vector--; 889 free_irq(adapter->msix_entries[vector].vector, 890 adapter->q_vector[vector]); 891 } 892 pci_disable_msix(adapter->pdev); 893 kfree(adapter->msix_entries); 894 adapter->msix_entries = NULL; 895 return err; 896} 897 898static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 899{ 900 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 901 902 for (i = 0; i < q_vectors; i++) { 903 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 904 q_vector->rx.ring = NULL; 905 q_vector->tx.ring = NULL; 906 q_vector->rx.count = 0; 907 q_vector->tx.count = 0; 908 } 909} 910 911/** 912 * ixgbevf_request_irq - initialize interrupts 913 * @adapter: board private structure 914 * 915 * Attempts to configure interrupts using the best available 916 * capabilities of the hardware and kernel. 917 **/ 918static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 919{ 920 int err = 0; 921 922 err = ixgbevf_request_msix_irqs(adapter); 923 924 if (err) 925 hw_dbg(&adapter->hw, 926 "request_irq failed, Error %d\n", err); 927 928 return err; 929} 930 931static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 932{ 933 int i, q_vectors; 934 935 q_vectors = adapter->num_msix_vectors; 936 i = q_vectors - 1; 937 938 free_irq(adapter->msix_entries[i].vector, adapter); 939 i--; 940 941 for (; i >= 0; i--) { 942 /* free only the irqs that were actually requested */ 943 if (!adapter->q_vector[i]->rx.ring && 944 !adapter->q_vector[i]->tx.ring) 945 continue; 946 947 free_irq(adapter->msix_entries[i].vector, 948 adapter->q_vector[i]); 949 } 950 951 ixgbevf_reset_q_vectors(adapter); 952} 953 954/** 955 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 956 * @adapter: board private structure 957 **/ 958static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 959{ 960 struct ixgbe_hw *hw = &adapter->hw; 961 int i; 962 963 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 964 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 965 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 966 967 IXGBE_WRITE_FLUSH(hw); 968 969 for (i = 0; i < adapter->num_msix_vectors; i++) 970 synchronize_irq(adapter->msix_entries[i].vector); 971} 972 973/** 974 * ixgbevf_irq_enable - Enable default interrupt generation settings 975 * @adapter: board private structure 976 **/ 977static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 978{ 979 struct ixgbe_hw *hw = &adapter->hw; 980 981 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 982 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 983 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 984} 985 986/** 987 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 988 * @adapter: board private structure 989 * 990 * Configure the Tx unit of the MAC after a reset. 991 **/ 992static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 993{ 994 u64 tdba; 995 struct ixgbe_hw *hw = &adapter->hw; 996 u32 i, j, tdlen, txctrl; 997 998 /* Setup the HW Tx Head and Tail descriptor pointers */ 999 for (i = 0; i < adapter->num_tx_queues; i++) { 1000 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1001 j = ring->reg_idx; 1002 tdba = ring->dma; 1003 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1004 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1005 (tdba & DMA_BIT_MASK(32))); 1006 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1007 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1008 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1009 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1010 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1011 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1012 /* Disable Tx Head Writeback RO bit, since this hoses 1013 * bookkeeping if things aren't delivered in order. 1014 */ 1015 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1016 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1017 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1018 } 1019} 1020 1021#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1022 1023static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1024{ 1025 struct ixgbevf_ring *rx_ring; 1026 struct ixgbe_hw *hw = &adapter->hw; 1027 u32 srrctl; 1028 1029 rx_ring = &adapter->rx_ring[index]; 1030 1031 srrctl = IXGBE_SRRCTL_DROP_EN; 1032 1033 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1034 1035 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1036 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1037 1038 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1039} 1040 1041static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1042{ 1043 struct ixgbe_hw *hw = &adapter->hw; 1044 struct net_device *netdev = adapter->netdev; 1045 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1046 int i; 1047 u16 rx_buf_len; 1048 1049 /* notify the PF of our intent to use this size of frame */ 1050 ixgbevf_rlpml_set_vf(hw, max_frame); 1051 1052 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1053 max_frame += VLAN_HLEN; 1054 1055 /* 1056 * Make best use of allocation by using all but 1K of a 1057 * power of 2 allocation that will be used for skb->head. 1058 */ 1059 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1060 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1061 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1062 else if (max_frame <= IXGBEVF_RXBUFFER_3K) 1063 rx_buf_len = IXGBEVF_RXBUFFER_3K; 1064 else if (max_frame <= IXGBEVF_RXBUFFER_7K) 1065 rx_buf_len = IXGBEVF_RXBUFFER_7K; 1066 else if (max_frame <= IXGBEVF_RXBUFFER_15K) 1067 rx_buf_len = IXGBEVF_RXBUFFER_15K; 1068 else 1069 rx_buf_len = IXGBEVF_MAX_RXBUFFER; 1070 1071 for (i = 0; i < adapter->num_rx_queues; i++) 1072 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1073} 1074 1075/** 1076 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1077 * @adapter: board private structure 1078 * 1079 * Configure the Rx unit of the MAC after a reset. 1080 **/ 1081static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1082{ 1083 u64 rdba; 1084 struct ixgbe_hw *hw = &adapter->hw; 1085 int i, j; 1086 u32 rdlen; 1087 1088 /* PSRTYPE must be initialized in 82599 */ 1089 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1090 1091 /* set_rx_buffer_len must be called before ring initialization */ 1092 ixgbevf_set_rx_buffer_len(adapter); 1093 1094 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1095 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1096 * the Base and Length of the Rx Descriptor Ring */ 1097 for (i = 0; i < adapter->num_rx_queues; i++) { 1098 rdba = adapter->rx_ring[i].dma; 1099 j = adapter->rx_ring[i].reg_idx; 1100 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1101 (rdba & DMA_BIT_MASK(32))); 1102 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1104 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1105 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1106 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1107 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1108 1109 ixgbevf_configure_srrctl(adapter, j); 1110 } 1111} 1112 1113static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1114{ 1115 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1116 struct ixgbe_hw *hw = &adapter->hw; 1117 1118 spin_lock(&adapter->mbx_lock); 1119 1120 /* add VID to filter table */ 1121 if (hw->mac.ops.set_vfta) 1122 hw->mac.ops.set_vfta(hw, vid, 0, true); 1123 1124 spin_unlock(&adapter->mbx_lock); 1125 1126 set_bit(vid, adapter->active_vlans); 1127 1128 return 0; 1129} 1130 1131static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1132{ 1133 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1134 struct ixgbe_hw *hw = &adapter->hw; 1135 1136 spin_lock(&adapter->mbx_lock); 1137 1138 /* remove VID from filter table */ 1139 if (hw->mac.ops.set_vfta) 1140 hw->mac.ops.set_vfta(hw, vid, 0, false); 1141 1142 spin_unlock(&adapter->mbx_lock); 1143 1144 clear_bit(vid, adapter->active_vlans); 1145 1146 return 0; 1147} 1148 1149static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1150{ 1151 u16 vid; 1152 1153 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1154 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1155} 1156 1157static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1158{ 1159 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1160 struct ixgbe_hw *hw = &adapter->hw; 1161 int count = 0; 1162 1163 if ((netdev_uc_count(netdev)) > 10) { 1164 pr_err("Too many unicast filters - No Space\n"); 1165 return -ENOSPC; 1166 } 1167 1168 if (!netdev_uc_empty(netdev)) { 1169 struct netdev_hw_addr *ha; 1170 netdev_for_each_uc_addr(ha, netdev) { 1171 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1172 udelay(200); 1173 } 1174 } else { 1175 /* 1176 * If the list is empty then send message to PF driver to 1177 * clear all macvlans on this VF. 1178 */ 1179 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1180 } 1181 1182 return count; 1183} 1184 1185/** 1186 * ixgbevf_set_rx_mode - Multicast set 1187 * @netdev: network interface device structure 1188 * 1189 * The set_rx_method entry point is called whenever the multicast address 1190 * list or the network interface flags are updated. This routine is 1191 * responsible for configuring the hardware for proper multicast mode. 1192 **/ 1193static void ixgbevf_set_rx_mode(struct net_device *netdev) 1194{ 1195 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1196 struct ixgbe_hw *hw = &adapter->hw; 1197 1198 spin_lock(&adapter->mbx_lock); 1199 1200 /* reprogram multicast list */ 1201 if (hw->mac.ops.update_mc_addr_list) 1202 hw->mac.ops.update_mc_addr_list(hw, netdev); 1203 1204 ixgbevf_write_uc_addr_list(netdev); 1205 1206 spin_unlock(&adapter->mbx_lock); 1207} 1208 1209static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1210{ 1211 int q_idx; 1212 struct ixgbevf_q_vector *q_vector; 1213 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1214 1215 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1216 q_vector = adapter->q_vector[q_idx]; 1217 napi_enable(&q_vector->napi); 1218 } 1219} 1220 1221static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1222{ 1223 int q_idx; 1224 struct ixgbevf_q_vector *q_vector; 1225 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1226 1227 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1228 q_vector = adapter->q_vector[q_idx]; 1229 napi_disable(&q_vector->napi); 1230 } 1231} 1232 1233static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1234{ 1235 struct net_device *netdev = adapter->netdev; 1236 int i; 1237 1238 ixgbevf_set_rx_mode(netdev); 1239 1240 ixgbevf_restore_vlan(adapter); 1241 1242 ixgbevf_configure_tx(adapter); 1243 ixgbevf_configure_rx(adapter); 1244 for (i = 0; i < adapter->num_rx_queues; i++) { 1245 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1246 ixgbevf_alloc_rx_buffers(adapter, ring, 1247 IXGBE_DESC_UNUSED(ring)); 1248 } 1249} 1250 1251#define IXGBE_MAX_RX_DESC_POLL 10 1252static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1253 int rxr) 1254{ 1255 struct ixgbe_hw *hw = &adapter->hw; 1256 int j = adapter->rx_ring[rxr].reg_idx; 1257 int k; 1258 1259 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1260 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1261 break; 1262 else 1263 msleep(1); 1264 } 1265 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1266 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1267 "not set within the polling period\n", rxr); 1268 } 1269 1270 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 1271 (adapter->rx_ring[rxr].count - 1)); 1272} 1273 1274static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1275{ 1276 /* Only save pre-reset stats if there are some */ 1277 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1278 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1279 adapter->stats.base_vfgprc; 1280 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1281 adapter->stats.base_vfgptc; 1282 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1283 adapter->stats.base_vfgorc; 1284 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1285 adapter->stats.base_vfgotc; 1286 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1287 adapter->stats.base_vfmprc; 1288 } 1289} 1290 1291static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1292{ 1293 struct ixgbe_hw *hw = &adapter->hw; 1294 1295 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1296 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1297 adapter->stats.last_vfgorc |= 1298 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1299 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1300 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1301 adapter->stats.last_vfgotc |= 1302 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1303 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1304 1305 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1306 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1307 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1308 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1309 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1310} 1311 1312static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1313{ 1314 struct ixgbe_hw *hw = &adapter->hw; 1315 int api[] = { ixgbe_mbox_api_10, 1316 ixgbe_mbox_api_unknown }; 1317 int err = 0, idx = 0; 1318 1319 spin_lock(&adapter->mbx_lock); 1320 1321 while (api[idx] != ixgbe_mbox_api_unknown) { 1322 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1323 if (!err) 1324 break; 1325 idx++; 1326 } 1327 1328 spin_unlock(&adapter->mbx_lock); 1329} 1330 1331static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1332{ 1333 struct net_device *netdev = adapter->netdev; 1334 struct ixgbe_hw *hw = &adapter->hw; 1335 int i, j = 0; 1336 int num_rx_rings = adapter->num_rx_queues; 1337 u32 txdctl, rxdctl; 1338 1339 for (i = 0; i < adapter->num_tx_queues; i++) { 1340 j = adapter->tx_ring[i].reg_idx; 1341 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1342 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1343 txdctl |= (8 << 16); 1344 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1345 } 1346 1347 for (i = 0; i < adapter->num_tx_queues; i++) { 1348 j = adapter->tx_ring[i].reg_idx; 1349 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1350 txdctl |= IXGBE_TXDCTL_ENABLE; 1351 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1352 } 1353 1354 for (i = 0; i < num_rx_rings; i++) { 1355 j = adapter->rx_ring[i].reg_idx; 1356 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1357 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1358 if (hw->mac.type == ixgbe_mac_X540_vf) { 1359 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1360 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1361 IXGBE_RXDCTL_RLPML_EN); 1362 } 1363 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1364 ixgbevf_rx_desc_queue_enable(adapter, i); 1365 } 1366 1367 ixgbevf_configure_msix(adapter); 1368 1369 spin_lock(&adapter->mbx_lock); 1370 1371 if (hw->mac.ops.set_rar) { 1372 if (is_valid_ether_addr(hw->mac.addr)) 1373 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1374 else 1375 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1376 } 1377 1378 spin_unlock(&adapter->mbx_lock); 1379 1380 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1381 ixgbevf_napi_enable_all(adapter); 1382 1383 /* enable transmits */ 1384 netif_tx_start_all_queues(netdev); 1385 1386 ixgbevf_save_reset_stats(adapter); 1387 ixgbevf_init_last_counter_stats(adapter); 1388 1389 hw->mac.get_link_status = 1; 1390 mod_timer(&adapter->watchdog_timer, jiffies); 1391} 1392 1393void ixgbevf_up(struct ixgbevf_adapter *adapter) 1394{ 1395 struct ixgbe_hw *hw = &adapter->hw; 1396 1397 ixgbevf_negotiate_api(adapter); 1398 1399 ixgbevf_configure(adapter); 1400 1401 ixgbevf_up_complete(adapter); 1402 1403 /* clear any pending interrupts, may auto mask */ 1404 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1405 1406 ixgbevf_irq_enable(adapter); 1407} 1408 1409/** 1410 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1411 * @adapter: board private structure 1412 * @rx_ring: ring to free buffers from 1413 **/ 1414static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1415 struct ixgbevf_ring *rx_ring) 1416{ 1417 struct pci_dev *pdev = adapter->pdev; 1418 unsigned long size; 1419 unsigned int i; 1420 1421 if (!rx_ring->rx_buffer_info) 1422 return; 1423 1424 /* Free all the Rx ring sk_buffs */ 1425 for (i = 0; i < rx_ring->count; i++) { 1426 struct ixgbevf_rx_buffer *rx_buffer_info; 1427 1428 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1429 if (rx_buffer_info->dma) { 1430 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1431 rx_ring->rx_buf_len, 1432 DMA_FROM_DEVICE); 1433 rx_buffer_info->dma = 0; 1434 } 1435 if (rx_buffer_info->skb) { 1436 struct sk_buff *skb = rx_buffer_info->skb; 1437 rx_buffer_info->skb = NULL; 1438 do { 1439 struct sk_buff *this = skb; 1440 skb = skb->prev; 1441 dev_kfree_skb(this); 1442 } while (skb); 1443 } 1444 } 1445 1446 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1447 memset(rx_ring->rx_buffer_info, 0, size); 1448 1449 /* Zero out the descriptor ring */ 1450 memset(rx_ring->desc, 0, rx_ring->size); 1451 1452 rx_ring->next_to_clean = 0; 1453 rx_ring->next_to_use = 0; 1454 1455 if (rx_ring->head) 1456 writel(0, adapter->hw.hw_addr + rx_ring->head); 1457 if (rx_ring->tail) 1458 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1459} 1460 1461/** 1462 * ixgbevf_clean_tx_ring - Free Tx Buffers 1463 * @adapter: board private structure 1464 * @tx_ring: ring to be cleaned 1465 **/ 1466static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1467 struct ixgbevf_ring *tx_ring) 1468{ 1469 struct ixgbevf_tx_buffer *tx_buffer_info; 1470 unsigned long size; 1471 unsigned int i; 1472 1473 if (!tx_ring->tx_buffer_info) 1474 return; 1475 1476 /* Free all the Tx ring sk_buffs */ 1477 1478 for (i = 0; i < tx_ring->count; i++) { 1479 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1480 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1481 } 1482 1483 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1484 memset(tx_ring->tx_buffer_info, 0, size); 1485 1486 memset(tx_ring->desc, 0, tx_ring->size); 1487 1488 tx_ring->next_to_use = 0; 1489 tx_ring->next_to_clean = 0; 1490 1491 if (tx_ring->head) 1492 writel(0, adapter->hw.hw_addr + tx_ring->head); 1493 if (tx_ring->tail) 1494 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1495} 1496 1497/** 1498 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1499 * @adapter: board private structure 1500 **/ 1501static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1502{ 1503 int i; 1504 1505 for (i = 0; i < adapter->num_rx_queues; i++) 1506 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1507} 1508 1509/** 1510 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1511 * @adapter: board private structure 1512 **/ 1513static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1514{ 1515 int i; 1516 1517 for (i = 0; i < adapter->num_tx_queues; i++) 1518 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1519} 1520 1521void ixgbevf_down(struct ixgbevf_adapter *adapter) 1522{ 1523 struct net_device *netdev = adapter->netdev; 1524 struct ixgbe_hw *hw = &adapter->hw; 1525 u32 txdctl; 1526 int i, j; 1527 1528 /* signal that we are down to the interrupt handler */ 1529 set_bit(__IXGBEVF_DOWN, &adapter->state); 1530 /* disable receives */ 1531 1532 netif_tx_disable(netdev); 1533 1534 msleep(10); 1535 1536 netif_tx_stop_all_queues(netdev); 1537 1538 ixgbevf_irq_disable(adapter); 1539 1540 ixgbevf_napi_disable_all(adapter); 1541 1542 del_timer_sync(&adapter->watchdog_timer); 1543 /* can't call flush scheduled work here because it can deadlock 1544 * if linkwatch_event tries to acquire the rtnl_lock which we are 1545 * holding */ 1546 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1547 msleep(1); 1548 1549 /* disable transmits in the hardware now that interrupts are off */ 1550 for (i = 0; i < adapter->num_tx_queues; i++) { 1551 j = adapter->tx_ring[i].reg_idx; 1552 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1553 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1554 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1555 } 1556 1557 netif_carrier_off(netdev); 1558 1559 if (!pci_channel_offline(adapter->pdev)) 1560 ixgbevf_reset(adapter); 1561 1562 ixgbevf_clean_all_tx_rings(adapter); 1563 ixgbevf_clean_all_rx_rings(adapter); 1564} 1565 1566void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1567{ 1568 WARN_ON(in_interrupt()); 1569 1570 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1571 msleep(1); 1572 1573 /* 1574 * Check if PF is up before re-init. If not then skip until 1575 * later when the PF is up and ready to service requests from 1576 * the VF via mailbox. If the VF is up and running then the 1577 * watchdog task will continue to schedule reset tasks until 1578 * the PF is up and running. 1579 */ 1580 ixgbevf_down(adapter); 1581 ixgbevf_up(adapter); 1582 1583 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1584} 1585 1586void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1587{ 1588 struct ixgbe_hw *hw = &adapter->hw; 1589 struct net_device *netdev = adapter->netdev; 1590 1591 spin_lock(&adapter->mbx_lock); 1592 1593 if (hw->mac.ops.reset_hw(hw)) 1594 hw_dbg(hw, "PF still resetting\n"); 1595 else 1596 hw->mac.ops.init_hw(hw); 1597 1598 spin_unlock(&adapter->mbx_lock); 1599 1600 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1601 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1602 netdev->addr_len); 1603 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1604 netdev->addr_len); 1605 } 1606} 1607 1608static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1609 int vectors) 1610{ 1611 int err, vector_threshold; 1612 1613 /* We'll want at least 2 (vector_threshold): 1614 * 1) TxQ[0] + RxQ[0] handler 1615 * 2) Other (Link Status Change, etc.) 1616 */ 1617 vector_threshold = MIN_MSIX_COUNT; 1618 1619 /* The more we get, the more we will assign to Tx/Rx Cleanup 1620 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1621 * Right now, we simply care about how many we'll get; we'll 1622 * set them up later while requesting irq's. 1623 */ 1624 while (vectors >= vector_threshold) { 1625 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1626 vectors); 1627 if (!err) /* Success in acquiring all requested vectors. */ 1628 break; 1629 else if (err < 0) 1630 vectors = 0; /* Nasty failure, quit now */ 1631 else /* err == number of vectors we should try again with */ 1632 vectors = err; 1633 } 1634 1635 if (vectors < vector_threshold) { 1636 /* Can't allocate enough MSI-X interrupts? Oh well. 1637 * This just means we'll go with either a single MSI 1638 * vector or fall back to legacy interrupts. 1639 */ 1640 hw_dbg(&adapter->hw, 1641 "Unable to allocate MSI-X interrupts\n"); 1642 kfree(adapter->msix_entries); 1643 adapter->msix_entries = NULL; 1644 } else { 1645 /* 1646 * Adjust for only the vectors we'll use, which is minimum 1647 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1648 * vectors we were allocated. 1649 */ 1650 adapter->num_msix_vectors = vectors; 1651 } 1652} 1653 1654/** 1655 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1656 * @adapter: board private structure to initialize 1657 * 1658 * This is the top level queue allocation routine. The order here is very 1659 * important, starting with the "most" number of features turned on at once, 1660 * and ending with the smallest set of features. This way large combinations 1661 * can be allocated if they're turned on, and smaller combinations are the 1662 * fallthrough conditions. 1663 * 1664 **/ 1665static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1666{ 1667 /* Start with base case */ 1668 adapter->num_rx_queues = 1; 1669 adapter->num_tx_queues = 1; 1670} 1671 1672/** 1673 * ixgbevf_alloc_queues - Allocate memory for all rings 1674 * @adapter: board private structure to initialize 1675 * 1676 * We allocate one ring per queue at run-time since we don't know the 1677 * number of queues at compile-time. The polling_netdev array is 1678 * intended for Multiqueue, but should work fine with a single queue. 1679 **/ 1680static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1681{ 1682 int i; 1683 1684 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1685 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1686 if (!adapter->tx_ring) 1687 goto err_tx_ring_allocation; 1688 1689 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1690 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1691 if (!adapter->rx_ring) 1692 goto err_rx_ring_allocation; 1693 1694 for (i = 0; i < adapter->num_tx_queues; i++) { 1695 adapter->tx_ring[i].count = adapter->tx_ring_count; 1696 adapter->tx_ring[i].queue_index = i; 1697 adapter->tx_ring[i].reg_idx = i; 1698 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1699 adapter->tx_ring[i].netdev = adapter->netdev; 1700 } 1701 1702 for (i = 0; i < adapter->num_rx_queues; i++) { 1703 adapter->rx_ring[i].count = adapter->rx_ring_count; 1704 adapter->rx_ring[i].queue_index = i; 1705 adapter->rx_ring[i].reg_idx = i; 1706 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1707 adapter->rx_ring[i].netdev = adapter->netdev; 1708 } 1709 1710 return 0; 1711 1712err_rx_ring_allocation: 1713 kfree(adapter->tx_ring); 1714err_tx_ring_allocation: 1715 return -ENOMEM; 1716} 1717 1718/** 1719 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1720 * @adapter: board private structure to initialize 1721 * 1722 * Attempt to configure the interrupts using the best available 1723 * capabilities of the hardware and the kernel. 1724 **/ 1725static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1726{ 1727 int err = 0; 1728 int vector, v_budget; 1729 1730 /* 1731 * It's easy to be greedy for MSI-X vectors, but it really 1732 * doesn't do us much good if we have a lot more vectors 1733 * than CPU's. So let's be conservative and only ask for 1734 * (roughly) the same number of vectors as there are CPU's. 1735 * The default is to use pairs of vectors. 1736 */ 1737 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1738 v_budget = min_t(int, v_budget, num_online_cpus()); 1739 v_budget += NON_Q_VECTORS; 1740 1741 /* A failure in MSI-X entry allocation isn't fatal, but it does 1742 * mean we disable MSI-X capabilities of the adapter. */ 1743 adapter->msix_entries = kcalloc(v_budget, 1744 sizeof(struct msix_entry), GFP_KERNEL); 1745 if (!adapter->msix_entries) { 1746 err = -ENOMEM; 1747 goto out; 1748 } 1749 1750 for (vector = 0; vector < v_budget; vector++) 1751 adapter->msix_entries[vector].entry = vector; 1752 1753 ixgbevf_acquire_msix_vectors(adapter, v_budget); 1754 1755out: 1756 return err; 1757} 1758 1759/** 1760 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 1761 * @adapter: board private structure to initialize 1762 * 1763 * We allocate one q_vector per queue interrupt. If allocation fails we 1764 * return -ENOMEM. 1765 **/ 1766static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 1767{ 1768 int q_idx, num_q_vectors; 1769 struct ixgbevf_q_vector *q_vector; 1770 1771 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1772 1773 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1774 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 1775 if (!q_vector) 1776 goto err_out; 1777 q_vector->adapter = adapter; 1778 q_vector->v_idx = q_idx; 1779 netif_napi_add(adapter->netdev, &q_vector->napi, 1780 ixgbevf_poll, 64); 1781 adapter->q_vector[q_idx] = q_vector; 1782 } 1783 1784 return 0; 1785 1786err_out: 1787 while (q_idx) { 1788 q_idx--; 1789 q_vector = adapter->q_vector[q_idx]; 1790 netif_napi_del(&q_vector->napi); 1791 kfree(q_vector); 1792 adapter->q_vector[q_idx] = NULL; 1793 } 1794 return -ENOMEM; 1795} 1796 1797/** 1798 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 1799 * @adapter: board private structure to initialize 1800 * 1801 * This function frees the memory allocated to the q_vectors. In addition if 1802 * NAPI is enabled it will delete any references to the NAPI struct prior 1803 * to freeing the q_vector. 1804 **/ 1805static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1806{ 1807 int q_idx, num_q_vectors; 1808 int napi_vectors; 1809 1810 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1811 napi_vectors = adapter->num_rx_queues; 1812 1813 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1814 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1815 1816 adapter->q_vector[q_idx] = NULL; 1817 if (q_idx < napi_vectors) 1818 netif_napi_del(&q_vector->napi); 1819 kfree(q_vector); 1820 } 1821} 1822 1823/** 1824 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 1825 * @adapter: board private structure 1826 * 1827 **/ 1828static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 1829{ 1830 pci_disable_msix(adapter->pdev); 1831 kfree(adapter->msix_entries); 1832 adapter->msix_entries = NULL; 1833} 1834 1835/** 1836 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 1837 * @adapter: board private structure to initialize 1838 * 1839 **/ 1840static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 1841{ 1842 int err; 1843 1844 /* Number of supported queues */ 1845 ixgbevf_set_num_queues(adapter); 1846 1847 err = ixgbevf_set_interrupt_capability(adapter); 1848 if (err) { 1849 hw_dbg(&adapter->hw, 1850 "Unable to setup interrupt capabilities\n"); 1851 goto err_set_interrupt; 1852 } 1853 1854 err = ixgbevf_alloc_q_vectors(adapter); 1855 if (err) { 1856 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 1857 "vectors\n"); 1858 goto err_alloc_q_vectors; 1859 } 1860 1861 err = ixgbevf_alloc_queues(adapter); 1862 if (err) { 1863 pr_err("Unable to allocate memory for queues\n"); 1864 goto err_alloc_queues; 1865 } 1866 1867 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 1868 "Tx Queue count = %u\n", 1869 (adapter->num_rx_queues > 1) ? "Enabled" : 1870 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 1871 1872 set_bit(__IXGBEVF_DOWN, &adapter->state); 1873 1874 return 0; 1875err_alloc_queues: 1876 ixgbevf_free_q_vectors(adapter); 1877err_alloc_q_vectors: 1878 ixgbevf_reset_interrupt_capability(adapter); 1879err_set_interrupt: 1880 return err; 1881} 1882 1883/** 1884 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 1885 * @adapter: board private structure to clear interrupt scheme on 1886 * 1887 * We go through and clear interrupt specific resources and reset the structure 1888 * to pre-load conditions 1889 **/ 1890static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 1891{ 1892 adapter->num_tx_queues = 0; 1893 adapter->num_rx_queues = 0; 1894 1895 ixgbevf_free_q_vectors(adapter); 1896 ixgbevf_reset_interrupt_capability(adapter); 1897} 1898 1899/** 1900 * ixgbevf_sw_init - Initialize general software structures 1901 * (struct ixgbevf_adapter) 1902 * @adapter: board private structure to initialize 1903 * 1904 * ixgbevf_sw_init initializes the Adapter private data structure. 1905 * Fields are initialized based on PCI device information and 1906 * OS network device settings (MTU size). 1907 **/ 1908static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 1909{ 1910 struct ixgbe_hw *hw = &adapter->hw; 1911 struct pci_dev *pdev = adapter->pdev; 1912 int err; 1913 1914 /* PCI config space info */ 1915 1916 hw->vendor_id = pdev->vendor; 1917 hw->device_id = pdev->device; 1918 hw->revision_id = pdev->revision; 1919 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1920 hw->subsystem_device_id = pdev->subsystem_device; 1921 1922 hw->mbx.ops.init_params(hw); 1923 hw->mac.max_tx_queues = MAX_TX_QUEUES; 1924 hw->mac.max_rx_queues = MAX_RX_QUEUES; 1925 err = hw->mac.ops.reset_hw(hw); 1926 if (err) { 1927 dev_info(&pdev->dev, 1928 "PF still in reset state, assigning new address\n"); 1929 eth_hw_addr_random(adapter->netdev); 1930 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, 1931 adapter->netdev->addr_len); 1932 } else { 1933 err = hw->mac.ops.init_hw(hw); 1934 if (err) { 1935 pr_err("init_shared_code failed: %d\n", err); 1936 goto out; 1937 } 1938 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 1939 adapter->netdev->addr_len); 1940 } 1941 1942 /* lock to protect mailbox accesses */ 1943 spin_lock_init(&adapter->mbx_lock); 1944 1945 /* Enable dynamic interrupt throttling rates */ 1946 adapter->rx_itr_setting = 1; 1947 adapter->tx_itr_setting = 1; 1948 1949 /* set default ring sizes */ 1950 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 1951 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 1952 1953 set_bit(__IXGBEVF_DOWN, &adapter->state); 1954 return 0; 1955 1956out: 1957 return err; 1958} 1959 1960#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 1961 { \ 1962 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 1963 if (current_counter < last_counter) \ 1964 counter += 0x100000000LL; \ 1965 last_counter = current_counter; \ 1966 counter &= 0xFFFFFFFF00000000LL; \ 1967 counter |= current_counter; \ 1968 } 1969 1970#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 1971 { \ 1972 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 1973 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 1974 u64 current_counter = (current_counter_msb << 32) | \ 1975 current_counter_lsb; \ 1976 if (current_counter < last_counter) \ 1977 counter += 0x1000000000LL; \ 1978 last_counter = current_counter; \ 1979 counter &= 0xFFFFFFF000000000LL; \ 1980 counter |= current_counter; \ 1981 } 1982/** 1983 * ixgbevf_update_stats - Update the board statistics counters. 1984 * @adapter: board private structure 1985 **/ 1986void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 1987{ 1988 struct ixgbe_hw *hw = &adapter->hw; 1989 1990 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 1991 adapter->stats.vfgprc); 1992 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 1993 adapter->stats.vfgptc); 1994 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 1995 adapter->stats.last_vfgorc, 1996 adapter->stats.vfgorc); 1997 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 1998 adapter->stats.last_vfgotc, 1999 adapter->stats.vfgotc); 2000 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2001 adapter->stats.vfmprc); 2002} 2003 2004/** 2005 * ixgbevf_watchdog - Timer Call-back 2006 * @data: pointer to adapter cast into an unsigned long 2007 **/ 2008static void ixgbevf_watchdog(unsigned long data) 2009{ 2010 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2011 struct ixgbe_hw *hw = &adapter->hw; 2012 u32 eics = 0; 2013 int i; 2014 2015 /* 2016 * Do the watchdog outside of interrupt context due to the lovely 2017 * delays that some of the newer hardware requires 2018 */ 2019 2020 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2021 goto watchdog_short_circuit; 2022 2023 /* get one bit for every active tx/rx interrupt vector */ 2024 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2025 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2026 if (qv->rx.ring || qv->tx.ring) 2027 eics |= 1 << i; 2028 } 2029 2030 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2031 2032watchdog_short_circuit: 2033 schedule_work(&adapter->watchdog_task); 2034} 2035 2036/** 2037 * ixgbevf_tx_timeout - Respond to a Tx Hang 2038 * @netdev: network interface device structure 2039 **/ 2040static void ixgbevf_tx_timeout(struct net_device *netdev) 2041{ 2042 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2043 2044 /* Do the reset outside of interrupt context */ 2045 schedule_work(&adapter->reset_task); 2046} 2047 2048static void ixgbevf_reset_task(struct work_struct *work) 2049{ 2050 struct ixgbevf_adapter *adapter; 2051 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2052 2053 /* If we're already down or resetting, just bail */ 2054 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2055 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2056 return; 2057 2058 adapter->tx_timeout_count++; 2059 2060 ixgbevf_reinit_locked(adapter); 2061} 2062 2063/** 2064 * ixgbevf_watchdog_task - worker thread to bring link up 2065 * @work: pointer to work_struct containing our data 2066 **/ 2067static void ixgbevf_watchdog_task(struct work_struct *work) 2068{ 2069 struct ixgbevf_adapter *adapter = container_of(work, 2070 struct ixgbevf_adapter, 2071 watchdog_task); 2072 struct net_device *netdev = adapter->netdev; 2073 struct ixgbe_hw *hw = &adapter->hw; 2074 u32 link_speed = adapter->link_speed; 2075 bool link_up = adapter->link_up; 2076 2077 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2078 2079 /* 2080 * Always check the link on the watchdog because we have 2081 * no LSC interrupt 2082 */ 2083 if (hw->mac.ops.check_link) { 2084 s32 need_reset; 2085 2086 spin_lock(&adapter->mbx_lock); 2087 2088 need_reset = hw->mac.ops.check_link(hw, &link_speed, 2089 &link_up, false); 2090 2091 spin_unlock(&adapter->mbx_lock); 2092 2093 if (need_reset) { 2094 adapter->link_up = link_up; 2095 adapter->link_speed = link_speed; 2096 netif_carrier_off(netdev); 2097 netif_tx_stop_all_queues(netdev); 2098 schedule_work(&adapter->reset_task); 2099 goto pf_has_reset; 2100 } 2101 } else { 2102 /* always assume link is up, if no check link 2103 * function */ 2104 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 2105 link_up = true; 2106 } 2107 adapter->link_up = link_up; 2108 adapter->link_speed = link_speed; 2109 2110 if (link_up) { 2111 if (!netif_carrier_ok(netdev)) { 2112 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", 2113 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2114 10 : 1); 2115 netif_carrier_on(netdev); 2116 netif_tx_wake_all_queues(netdev); 2117 } 2118 } else { 2119 adapter->link_up = false; 2120 adapter->link_speed = 0; 2121 if (netif_carrier_ok(netdev)) { 2122 hw_dbg(&adapter->hw, "NIC Link is Down\n"); 2123 netif_carrier_off(netdev); 2124 netif_tx_stop_all_queues(netdev); 2125 } 2126 } 2127 2128 ixgbevf_update_stats(adapter); 2129 2130pf_has_reset: 2131 /* Reset the timer */ 2132 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2133 mod_timer(&adapter->watchdog_timer, 2134 round_jiffies(jiffies + (2 * HZ))); 2135 2136 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2137} 2138 2139/** 2140 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2141 * @adapter: board private structure 2142 * @tx_ring: Tx descriptor ring for a specific queue 2143 * 2144 * Free all transmit software resources 2145 **/ 2146void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2147 struct ixgbevf_ring *tx_ring) 2148{ 2149 struct pci_dev *pdev = adapter->pdev; 2150 2151 ixgbevf_clean_tx_ring(adapter, tx_ring); 2152 2153 vfree(tx_ring->tx_buffer_info); 2154 tx_ring->tx_buffer_info = NULL; 2155 2156 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2157 tx_ring->dma); 2158 2159 tx_ring->desc = NULL; 2160} 2161 2162/** 2163 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2164 * @adapter: board private structure 2165 * 2166 * Free all transmit software resources 2167 **/ 2168static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2169{ 2170 int i; 2171 2172 for (i = 0; i < adapter->num_tx_queues; i++) 2173 if (adapter->tx_ring[i].desc) 2174 ixgbevf_free_tx_resources(adapter, 2175 &adapter->tx_ring[i]); 2176 2177} 2178 2179/** 2180 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2181 * @adapter: board private structure 2182 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2183 * 2184 * Return 0 on success, negative on failure 2185 **/ 2186int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2187 struct ixgbevf_ring *tx_ring) 2188{ 2189 struct pci_dev *pdev = adapter->pdev; 2190 int size; 2191 2192 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2193 tx_ring->tx_buffer_info = vzalloc(size); 2194 if (!tx_ring->tx_buffer_info) 2195 goto err; 2196 2197 /* round up to nearest 4K */ 2198 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2199 tx_ring->size = ALIGN(tx_ring->size, 4096); 2200 2201 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2202 &tx_ring->dma, GFP_KERNEL); 2203 if (!tx_ring->desc) 2204 goto err; 2205 2206 tx_ring->next_to_use = 0; 2207 tx_ring->next_to_clean = 0; 2208 return 0; 2209 2210err: 2211 vfree(tx_ring->tx_buffer_info); 2212 tx_ring->tx_buffer_info = NULL; 2213 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2214 "descriptor ring\n"); 2215 return -ENOMEM; 2216} 2217 2218/** 2219 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2220 * @adapter: board private structure 2221 * 2222 * If this function returns with an error, then it's possible one or 2223 * more of the rings is populated (while the rest are not). It is the 2224 * callers duty to clean those orphaned rings. 2225 * 2226 * Return 0 on success, negative on failure 2227 **/ 2228static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2229{ 2230 int i, err = 0; 2231 2232 for (i = 0; i < adapter->num_tx_queues; i++) { 2233 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2234 if (!err) 2235 continue; 2236 hw_dbg(&adapter->hw, 2237 "Allocation for Tx Queue %u failed\n", i); 2238 break; 2239 } 2240 2241 return err; 2242} 2243 2244/** 2245 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2246 * @adapter: board private structure 2247 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2248 * 2249 * Returns 0 on success, negative on failure 2250 **/ 2251int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2252 struct ixgbevf_ring *rx_ring) 2253{ 2254 struct pci_dev *pdev = adapter->pdev; 2255 int size; 2256 2257 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2258 rx_ring->rx_buffer_info = vzalloc(size); 2259 if (!rx_ring->rx_buffer_info) 2260 goto alloc_failed; 2261 2262 /* Round up to nearest 4K */ 2263 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2264 rx_ring->size = ALIGN(rx_ring->size, 4096); 2265 2266 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2267 &rx_ring->dma, GFP_KERNEL); 2268 2269 if (!rx_ring->desc) { 2270 hw_dbg(&adapter->hw, 2271 "Unable to allocate memory for " 2272 "the receive descriptor ring\n"); 2273 vfree(rx_ring->rx_buffer_info); 2274 rx_ring->rx_buffer_info = NULL; 2275 goto alloc_failed; 2276 } 2277 2278 rx_ring->next_to_clean = 0; 2279 rx_ring->next_to_use = 0; 2280 2281 return 0; 2282alloc_failed: 2283 return -ENOMEM; 2284} 2285 2286/** 2287 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2288 * @adapter: board private structure 2289 * 2290 * If this function returns with an error, then it's possible one or 2291 * more of the rings is populated (while the rest are not). It is the 2292 * callers duty to clean those orphaned rings. 2293 * 2294 * Return 0 on success, negative on failure 2295 **/ 2296static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2297{ 2298 int i, err = 0; 2299 2300 for (i = 0; i < adapter->num_rx_queues; i++) { 2301 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2302 if (!err) 2303 continue; 2304 hw_dbg(&adapter->hw, 2305 "Allocation for Rx Queue %u failed\n", i); 2306 break; 2307 } 2308 return err; 2309} 2310 2311/** 2312 * ixgbevf_free_rx_resources - Free Rx Resources 2313 * @adapter: board private structure 2314 * @rx_ring: ring to clean the resources from 2315 * 2316 * Free all receive software resources 2317 **/ 2318void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2319 struct ixgbevf_ring *rx_ring) 2320{ 2321 struct pci_dev *pdev = adapter->pdev; 2322 2323 ixgbevf_clean_rx_ring(adapter, rx_ring); 2324 2325 vfree(rx_ring->rx_buffer_info); 2326 rx_ring->rx_buffer_info = NULL; 2327 2328 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2329 rx_ring->dma); 2330 2331 rx_ring->desc = NULL; 2332} 2333 2334/** 2335 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2336 * @adapter: board private structure 2337 * 2338 * Free all receive software resources 2339 **/ 2340static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2341{ 2342 int i; 2343 2344 for (i = 0; i < adapter->num_rx_queues; i++) 2345 if (adapter->rx_ring[i].desc) 2346 ixgbevf_free_rx_resources(adapter, 2347 &adapter->rx_ring[i]); 2348} 2349 2350/** 2351 * ixgbevf_open - Called when a network interface is made active 2352 * @netdev: network interface device structure 2353 * 2354 * Returns 0 on success, negative value on failure 2355 * 2356 * The open entry point is called when a network interface is made 2357 * active by the system (IFF_UP). At this point all resources needed 2358 * for transmit and receive operations are allocated, the interrupt 2359 * handler is registered with the OS, the watchdog timer is started, 2360 * and the stack is notified that the interface is ready. 2361 **/ 2362static int ixgbevf_open(struct net_device *netdev) 2363{ 2364 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2365 struct ixgbe_hw *hw = &adapter->hw; 2366 int err; 2367 2368 /* disallow open during test */ 2369 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2370 return -EBUSY; 2371 2372 if (hw->adapter_stopped) { 2373 ixgbevf_reset(adapter); 2374 /* if adapter is still stopped then PF isn't up and 2375 * the vf can't start. */ 2376 if (hw->adapter_stopped) { 2377 err = IXGBE_ERR_MBX; 2378 pr_err("Unable to start - perhaps the PF Driver isn't " 2379 "up yet\n"); 2380 goto err_setup_reset; 2381 } 2382 } 2383 2384 ixgbevf_negotiate_api(adapter); 2385 2386 /* allocate transmit descriptors */ 2387 err = ixgbevf_setup_all_tx_resources(adapter); 2388 if (err) 2389 goto err_setup_tx; 2390 2391 /* allocate receive descriptors */ 2392 err = ixgbevf_setup_all_rx_resources(adapter); 2393 if (err) 2394 goto err_setup_rx; 2395 2396 ixgbevf_configure(adapter); 2397 2398 /* 2399 * Map the Tx/Rx rings to the vectors we were allotted. 2400 * if request_irq will be called in this function map_rings 2401 * must be called *before* up_complete 2402 */ 2403 ixgbevf_map_rings_to_vectors(adapter); 2404 2405 ixgbevf_up_complete(adapter); 2406 2407 /* clear any pending interrupts, may auto mask */ 2408 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2409 err = ixgbevf_request_irq(adapter); 2410 if (err) 2411 goto err_req_irq; 2412 2413 ixgbevf_irq_enable(adapter); 2414 2415 return 0; 2416 2417err_req_irq: 2418 ixgbevf_down(adapter); 2419 ixgbevf_free_irq(adapter); 2420err_setup_rx: 2421 ixgbevf_free_all_rx_resources(adapter); 2422err_setup_tx: 2423 ixgbevf_free_all_tx_resources(adapter); 2424 ixgbevf_reset(adapter); 2425 2426err_setup_reset: 2427 2428 return err; 2429} 2430 2431/** 2432 * ixgbevf_close - Disables a network interface 2433 * @netdev: network interface device structure 2434 * 2435 * Returns 0, this is not allowed to fail 2436 * 2437 * The close entry point is called when an interface is de-activated 2438 * by the OS. The hardware is still under the drivers control, but 2439 * needs to be disabled. A global MAC reset is issued to stop the 2440 * hardware, and all transmit and receive resources are freed. 2441 **/ 2442static int ixgbevf_close(struct net_device *netdev) 2443{ 2444 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2445 2446 ixgbevf_down(adapter); 2447 ixgbevf_free_irq(adapter); 2448 2449 ixgbevf_free_all_tx_resources(adapter); 2450 ixgbevf_free_all_rx_resources(adapter); 2451 2452 return 0; 2453} 2454 2455static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2456 u32 vlan_macip_lens, u32 type_tucmd, 2457 u32 mss_l4len_idx) 2458{ 2459 struct ixgbe_adv_tx_context_desc *context_desc; 2460 u16 i = tx_ring->next_to_use; 2461 2462 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2463 2464 i++; 2465 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2466 2467 /* set bits to identify this as an advanced context descriptor */ 2468 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2469 2470 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2471 context_desc->seqnum_seed = 0; 2472 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2473 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2474} 2475 2476static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2477 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2478{ 2479 u32 vlan_macip_lens, type_tucmd; 2480 u32 mss_l4len_idx, l4len; 2481 2482 if (!skb_is_gso(skb)) 2483 return 0; 2484 2485 if (skb_header_cloned(skb)) { 2486 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2487 if (err) 2488 return err; 2489 } 2490 2491 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2492 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2493 2494 if (skb->protocol == htons(ETH_P_IP)) { 2495 struct iphdr *iph = ip_hdr(skb); 2496 iph->tot_len = 0; 2497 iph->check = 0; 2498 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2499 iph->daddr, 0, 2500 IPPROTO_TCP, 2501 0); 2502 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2503 } else if (skb_is_gso_v6(skb)) { 2504 ipv6_hdr(skb)->payload_len = 0; 2505 tcp_hdr(skb)->check = 2506 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2507 &ipv6_hdr(skb)->daddr, 2508 0, IPPROTO_TCP, 0); 2509 } 2510 2511 /* compute header lengths */ 2512 l4len = tcp_hdrlen(skb); 2513 *hdr_len += l4len; 2514 *hdr_len = skb_transport_offset(skb) + l4len; 2515 2516 /* mss_l4len_id: use 1 as index for TSO */ 2517 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2518 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2519 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2520 2521 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2522 vlan_macip_lens = skb_network_header_len(skb); 2523 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2524 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2525 2526 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2527 type_tucmd, mss_l4len_idx); 2528 2529 return 1; 2530} 2531 2532static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2533 struct sk_buff *skb, u32 tx_flags) 2534{ 2535 2536 2537 2538 u32 vlan_macip_lens = 0; 2539 u32 mss_l4len_idx = 0; 2540 u32 type_tucmd = 0; 2541 2542 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2543 u8 l4_hdr = 0; 2544 switch (skb->protocol) { 2545 case __constant_htons(ETH_P_IP): 2546 vlan_macip_lens |= skb_network_header_len(skb); 2547 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2548 l4_hdr = ip_hdr(skb)->protocol; 2549 break; 2550 case __constant_htons(ETH_P_IPV6): 2551 vlan_macip_lens |= skb_network_header_len(skb); 2552 l4_hdr = ipv6_hdr(skb)->nexthdr; 2553 break; 2554 default: 2555 if (unlikely(net_ratelimit())) { 2556 dev_warn(tx_ring->dev, 2557 "partial checksum but proto=%x!\n", 2558 skb->protocol); 2559 } 2560 break; 2561 } 2562 2563 switch (l4_hdr) { 2564 case IPPROTO_TCP: 2565 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2566 mss_l4len_idx = tcp_hdrlen(skb) << 2567 IXGBE_ADVTXD_L4LEN_SHIFT; 2568 break; 2569 case IPPROTO_SCTP: 2570 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2571 mss_l4len_idx = sizeof(struct sctphdr) << 2572 IXGBE_ADVTXD_L4LEN_SHIFT; 2573 break; 2574 case IPPROTO_UDP: 2575 mss_l4len_idx = sizeof(struct udphdr) << 2576 IXGBE_ADVTXD_L4LEN_SHIFT; 2577 break; 2578 default: 2579 if (unlikely(net_ratelimit())) { 2580 dev_warn(tx_ring->dev, 2581 "partial checksum but l4 proto=%x!\n", 2582 l4_hdr); 2583 } 2584 break; 2585 } 2586 } 2587 2588 /* vlan_macip_lens: MACLEN, VLAN tag */ 2589 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2590 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2591 2592 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2593 type_tucmd, mss_l4len_idx); 2594 2595 return (skb->ip_summed == CHECKSUM_PARTIAL); 2596} 2597 2598static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2599 struct sk_buff *skb, u32 tx_flags, 2600 unsigned int first) 2601{ 2602 struct ixgbevf_tx_buffer *tx_buffer_info; 2603 unsigned int len; 2604 unsigned int total = skb->len; 2605 unsigned int offset = 0, size; 2606 int count = 0; 2607 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2608 unsigned int f; 2609 int i; 2610 2611 i = tx_ring->next_to_use; 2612 2613 len = min(skb_headlen(skb), total); 2614 while (len) { 2615 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2616 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2617 2618 tx_buffer_info->length = size; 2619 tx_buffer_info->mapped_as_page = false; 2620 tx_buffer_info->dma = dma_map_single(tx_ring->dev, 2621 skb->data + offset, 2622 size, DMA_TO_DEVICE); 2623 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2624 goto dma_error; 2625 tx_buffer_info->next_to_watch = i; 2626 2627 len -= size; 2628 total -= size; 2629 offset += size; 2630 count++; 2631 i++; 2632 if (i == tx_ring->count) 2633 i = 0; 2634 } 2635 2636 for (f = 0; f < nr_frags; f++) { 2637 const struct skb_frag_struct *frag; 2638 2639 frag = &skb_shinfo(skb)->frags[f]; 2640 len = min((unsigned int)skb_frag_size(frag), total); 2641 offset = 0; 2642 2643 while (len) { 2644 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2645 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2646 2647 tx_buffer_info->length = size; 2648 tx_buffer_info->dma = 2649 skb_frag_dma_map(tx_ring->dev, frag, 2650 offset, size, DMA_TO_DEVICE); 2651 tx_buffer_info->mapped_as_page = true; 2652 if (dma_mapping_error(tx_ring->dev, 2653 tx_buffer_info->dma)) 2654 goto dma_error; 2655 tx_buffer_info->next_to_watch = i; 2656 2657 len -= size; 2658 total -= size; 2659 offset += size; 2660 count++; 2661 i++; 2662 if (i == tx_ring->count) 2663 i = 0; 2664 } 2665 if (total == 0) 2666 break; 2667 } 2668 2669 if (i == 0) 2670 i = tx_ring->count - 1; 2671 else 2672 i = i - 1; 2673 tx_ring->tx_buffer_info[i].skb = skb; 2674 tx_ring->tx_buffer_info[first].next_to_watch = i; 2675 tx_ring->tx_buffer_info[first].time_stamp = jiffies; 2676 2677 return count; 2678 2679dma_error: 2680 dev_err(tx_ring->dev, "TX DMA map failed\n"); 2681 2682 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2683 tx_buffer_info->dma = 0; 2684 tx_buffer_info->next_to_watch = 0; 2685 count--; 2686 2687 /* clear timestamp and dma mappings for remaining portion of packet */ 2688 while (count >= 0) { 2689 count--; 2690 i--; 2691 if (i < 0) 2692 i += tx_ring->count; 2693 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2694 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2695 } 2696 2697 return count; 2698} 2699 2700static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2701 int count, u32 paylen, u8 hdr_len) 2702{ 2703 union ixgbe_adv_tx_desc *tx_desc = NULL; 2704 struct ixgbevf_tx_buffer *tx_buffer_info; 2705 u32 olinfo_status = 0, cmd_type_len = 0; 2706 unsigned int i; 2707 2708 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 2709 2710 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 2711 2712 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 2713 2714 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2715 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2716 2717 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2718 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 2719 2720 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2721 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2722 2723 /* use index 1 context for tso */ 2724 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2725 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2726 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 2727 2728 } 2729 2730 /* 2731 * Check Context must be set if Tx switch is enabled, which it 2732 * always is for case where virtual functions are running 2733 */ 2734 olinfo_status |= IXGBE_ADVTXD_CC; 2735 2736 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2737 2738 i = tx_ring->next_to_use; 2739 while (count--) { 2740 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2741 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2742 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 2743 tx_desc->read.cmd_type_len = 2744 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 2745 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2746 i++; 2747 if (i == tx_ring->count) 2748 i = 0; 2749 } 2750 2751 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2752 2753 tx_ring->next_to_use = i; 2754} 2755 2756static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2757{ 2758 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 2759 2760 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 2761 /* Herbert's original patch had: 2762 * smp_mb__after_netif_stop_queue(); 2763 * but since that doesn't exist yet, just open code it. */ 2764 smp_mb(); 2765 2766 /* We need to check again in a case another CPU has just 2767 * made room available. */ 2768 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 2769 return -EBUSY; 2770 2771 /* A reprieve! - use start_queue because it doesn't call schedule */ 2772 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 2773 ++adapter->restart_queue; 2774 return 0; 2775} 2776 2777static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2778{ 2779 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 2780 return 0; 2781 return __ixgbevf_maybe_stop_tx(tx_ring, size); 2782} 2783 2784static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2785{ 2786 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2787 struct ixgbevf_ring *tx_ring; 2788 unsigned int first; 2789 unsigned int tx_flags = 0; 2790 u8 hdr_len = 0; 2791 int r_idx = 0, tso; 2792 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 2793#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2794 unsigned short f; 2795#endif 2796 2797 tx_ring = &adapter->tx_ring[r_idx]; 2798 2799 /* 2800 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 2801 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 2802 * + 2 desc gap to keep tail from touching head, 2803 * + 1 desc for context descriptor, 2804 * otherwise try next time 2805 */ 2806#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2807 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 2808 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2809#else 2810 count += skb_shinfo(skb)->nr_frags; 2811#endif 2812 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 2813 adapter->tx_busy++; 2814 return NETDEV_TX_BUSY; 2815 } 2816 2817 if (vlan_tx_tag_present(skb)) { 2818 tx_flags |= vlan_tx_tag_get(skb); 2819 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 2820 tx_flags |= IXGBE_TX_FLAGS_VLAN; 2821 } 2822 2823 first = tx_ring->next_to_use; 2824 2825 if (skb->protocol == htons(ETH_P_IP)) 2826 tx_flags |= IXGBE_TX_FLAGS_IPV4; 2827 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); 2828 if (tso < 0) { 2829 dev_kfree_skb_any(skb); 2830 return NETDEV_TX_OK; 2831 } 2832 2833 if (tso) 2834 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; 2835 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) 2836 tx_flags |= IXGBE_TX_FLAGS_CSUM; 2837 2838 ixgbevf_tx_queue(tx_ring, tx_flags, 2839 ixgbevf_tx_map(tx_ring, skb, tx_flags, first), 2840 skb->len, hdr_len); 2841 /* 2842 * Force memory writes to complete before letting h/w 2843 * know there are new descriptors to fetch. (Only 2844 * applicable for weak-ordered memory model archs, 2845 * such as IA-64). 2846 */ 2847 wmb(); 2848 2849 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 2850 2851 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 2852 2853 return NETDEV_TX_OK; 2854} 2855 2856/** 2857 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 2858 * @netdev: network interface device structure 2859 * @p: pointer to an address structure 2860 * 2861 * Returns 0 on success, negative on failure 2862 **/ 2863static int ixgbevf_set_mac(struct net_device *netdev, void *p) 2864{ 2865 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2866 struct ixgbe_hw *hw = &adapter->hw; 2867 struct sockaddr *addr = p; 2868 2869 if (!is_valid_ether_addr(addr->sa_data)) 2870 return -EADDRNOTAVAIL; 2871 2872 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2873 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 2874 2875 spin_lock(&adapter->mbx_lock); 2876 2877 if (hw->mac.ops.set_rar) 2878 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 2879 2880 spin_unlock(&adapter->mbx_lock); 2881 2882 return 0; 2883} 2884 2885/** 2886 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 2887 * @netdev: network interface device structure 2888 * @new_mtu: new value for maximum frame size 2889 * 2890 * Returns 0 on success, negative on failure 2891 **/ 2892static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 2893{ 2894 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2895 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2896 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 2897 2898 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 2899 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 2900 2901 /* MTU < 68 is an error and causes problems on some kernels */ 2902 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 2903 return -EINVAL; 2904 2905 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 2906 netdev->mtu, new_mtu); 2907 /* must set new MTU before calling down or up */ 2908 netdev->mtu = new_mtu; 2909 2910 if (netif_running(netdev)) 2911 ixgbevf_reinit_locked(adapter); 2912 2913 return 0; 2914} 2915 2916static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 2917{ 2918 struct net_device *netdev = pci_get_drvdata(pdev); 2919 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2920#ifdef CONFIG_PM 2921 int retval = 0; 2922#endif 2923 2924 netif_device_detach(netdev); 2925 2926 if (netif_running(netdev)) { 2927 rtnl_lock(); 2928 ixgbevf_down(adapter); 2929 ixgbevf_free_irq(adapter); 2930 ixgbevf_free_all_tx_resources(adapter); 2931 ixgbevf_free_all_rx_resources(adapter); 2932 rtnl_unlock(); 2933 } 2934 2935 ixgbevf_clear_interrupt_scheme(adapter); 2936 2937#ifdef CONFIG_PM 2938 retval = pci_save_state(pdev); 2939 if (retval) 2940 return retval; 2941 2942#endif 2943 pci_disable_device(pdev); 2944 2945 return 0; 2946} 2947 2948#ifdef CONFIG_PM 2949static int ixgbevf_resume(struct pci_dev *pdev) 2950{ 2951 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); 2952 struct net_device *netdev = adapter->netdev; 2953 u32 err; 2954 2955 pci_set_power_state(pdev, PCI_D0); 2956 pci_restore_state(pdev); 2957 /* 2958 * pci_restore_state clears dev->state_saved so call 2959 * pci_save_state to restore it. 2960 */ 2961 pci_save_state(pdev); 2962 2963 err = pci_enable_device_mem(pdev); 2964 if (err) { 2965 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 2966 return err; 2967 } 2968 pci_set_master(pdev); 2969 2970 rtnl_lock(); 2971 err = ixgbevf_init_interrupt_scheme(adapter); 2972 rtnl_unlock(); 2973 if (err) { 2974 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 2975 return err; 2976 } 2977 2978 ixgbevf_reset(adapter); 2979 2980 if (netif_running(netdev)) { 2981 err = ixgbevf_open(netdev); 2982 if (err) 2983 return err; 2984 } 2985 2986 netif_device_attach(netdev); 2987 2988 return err; 2989} 2990 2991#endif /* CONFIG_PM */ 2992static void ixgbevf_shutdown(struct pci_dev *pdev) 2993{ 2994 ixgbevf_suspend(pdev, PMSG_SUSPEND); 2995} 2996 2997static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 2998 struct rtnl_link_stats64 *stats) 2999{ 3000 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3001 unsigned int start; 3002 u64 bytes, packets; 3003 const struct ixgbevf_ring *ring; 3004 int i; 3005 3006 ixgbevf_update_stats(adapter); 3007 3008 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3009 3010 for (i = 0; i < adapter->num_rx_queues; i++) { 3011 ring = &adapter->rx_ring[i]; 3012 do { 3013 start = u64_stats_fetch_begin_bh(&ring->syncp); 3014 bytes = ring->total_bytes; 3015 packets = ring->total_packets; 3016 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3017 stats->rx_bytes += bytes; 3018 stats->rx_packets += packets; 3019 } 3020 3021 for (i = 0; i < adapter->num_tx_queues; i++) { 3022 ring = &adapter->tx_ring[i]; 3023 do { 3024 start = u64_stats_fetch_begin_bh(&ring->syncp); 3025 bytes = ring->total_bytes; 3026 packets = ring->total_packets; 3027 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3028 stats->tx_bytes += bytes; 3029 stats->tx_packets += packets; 3030 } 3031 3032 return stats; 3033} 3034 3035static const struct net_device_ops ixgbevf_netdev_ops = { 3036 .ndo_open = ixgbevf_open, 3037 .ndo_stop = ixgbevf_close, 3038 .ndo_start_xmit = ixgbevf_xmit_frame, 3039 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3040 .ndo_get_stats64 = ixgbevf_get_stats, 3041 .ndo_validate_addr = eth_validate_addr, 3042 .ndo_set_mac_address = ixgbevf_set_mac, 3043 .ndo_change_mtu = ixgbevf_change_mtu, 3044 .ndo_tx_timeout = ixgbevf_tx_timeout, 3045 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3046 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3047}; 3048 3049static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3050{ 3051 dev->netdev_ops = &ixgbevf_netdev_ops; 3052 ixgbevf_set_ethtool_ops(dev); 3053 dev->watchdog_timeo = 5 * HZ; 3054} 3055 3056/** 3057 * ixgbevf_probe - Device Initialization Routine 3058 * @pdev: PCI device information struct 3059 * @ent: entry in ixgbevf_pci_tbl 3060 * 3061 * Returns 0 on success, negative on failure 3062 * 3063 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3064 * The OS initialization, configuring of the adapter private structure, 3065 * and a hardware reset occur. 3066 **/ 3067static int __devinit ixgbevf_probe(struct pci_dev *pdev, 3068 const struct pci_device_id *ent) 3069{ 3070 struct net_device *netdev; 3071 struct ixgbevf_adapter *adapter = NULL; 3072 struct ixgbe_hw *hw = NULL; 3073 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3074 static int cards_found; 3075 int err, pci_using_dac; 3076 3077 err = pci_enable_device(pdev); 3078 if (err) 3079 return err; 3080 3081 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3082 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3083 pci_using_dac = 1; 3084 } else { 3085 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3086 if (err) { 3087 err = dma_set_coherent_mask(&pdev->dev, 3088 DMA_BIT_MASK(32)); 3089 if (err) { 3090 dev_err(&pdev->dev, "No usable DMA " 3091 "configuration, aborting\n"); 3092 goto err_dma; 3093 } 3094 } 3095 pci_using_dac = 0; 3096 } 3097 3098 err = pci_request_regions(pdev, ixgbevf_driver_name); 3099 if (err) { 3100 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3101 goto err_pci_reg; 3102 } 3103 3104 pci_set_master(pdev); 3105 3106 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3107 MAX_TX_QUEUES); 3108 if (!netdev) { 3109 err = -ENOMEM; 3110 goto err_alloc_etherdev; 3111 } 3112 3113 SET_NETDEV_DEV(netdev, &pdev->dev); 3114 3115 pci_set_drvdata(pdev, netdev); 3116 adapter = netdev_priv(netdev); 3117 3118 adapter->netdev = netdev; 3119 adapter->pdev = pdev; 3120 hw = &adapter->hw; 3121 hw->back = adapter; 3122 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3123 3124 /* 3125 * call save state here in standalone driver because it relies on 3126 * adapter struct to exist, and needs to call netdev_priv 3127 */ 3128 pci_save_state(pdev); 3129 3130 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3131 pci_resource_len(pdev, 0)); 3132 if (!hw->hw_addr) { 3133 err = -EIO; 3134 goto err_ioremap; 3135 } 3136 3137 ixgbevf_assign_netdev_ops(netdev); 3138 3139 adapter->bd_number = cards_found; 3140 3141 /* Setup hw api */ 3142 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3143 hw->mac.type = ii->mac; 3144 3145 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3146 sizeof(struct ixgbe_mbx_operations)); 3147 3148 /* setup the private structure */ 3149 err = ixgbevf_sw_init(adapter); 3150 if (err) 3151 goto err_sw_init; 3152 3153 /* The HW MAC address was set and/or determined in sw_init */ 3154 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 3155 3156 if (!is_valid_ether_addr(netdev->dev_addr)) { 3157 pr_err("invalid MAC address\n"); 3158 err = -EIO; 3159 goto err_sw_init; 3160 } 3161 3162 netdev->hw_features = NETIF_F_SG | 3163 NETIF_F_IP_CSUM | 3164 NETIF_F_IPV6_CSUM | 3165 NETIF_F_TSO | 3166 NETIF_F_TSO6 | 3167 NETIF_F_RXCSUM; 3168 3169 netdev->features = netdev->hw_features | 3170 NETIF_F_HW_VLAN_TX | 3171 NETIF_F_HW_VLAN_RX | 3172 NETIF_F_HW_VLAN_FILTER; 3173 3174 netdev->vlan_features |= NETIF_F_TSO; 3175 netdev->vlan_features |= NETIF_F_TSO6; 3176 netdev->vlan_features |= NETIF_F_IP_CSUM; 3177 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3178 netdev->vlan_features |= NETIF_F_SG; 3179 3180 if (pci_using_dac) 3181 netdev->features |= NETIF_F_HIGHDMA; 3182 3183 netdev->priv_flags |= IFF_UNICAST_FLT; 3184 3185 init_timer(&adapter->watchdog_timer); 3186 adapter->watchdog_timer.function = ixgbevf_watchdog; 3187 adapter->watchdog_timer.data = (unsigned long)adapter; 3188 3189 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3190 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3191 3192 err = ixgbevf_init_interrupt_scheme(adapter); 3193 if (err) 3194 goto err_sw_init; 3195 3196 /* pick up the PCI bus settings for reporting later */ 3197 if (hw->mac.ops.get_bus_info) 3198 hw->mac.ops.get_bus_info(hw); 3199 3200 strcpy(netdev->name, "eth%d"); 3201 3202 err = register_netdev(netdev); 3203 if (err) 3204 goto err_register; 3205 3206 netif_carrier_off(netdev); 3207 3208 ixgbevf_init_last_counter_stats(adapter); 3209 3210 /* print the MAC address */ 3211 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3212 3213 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3214 3215 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3216 cards_found++; 3217 return 0; 3218 3219err_register: 3220 ixgbevf_clear_interrupt_scheme(adapter); 3221err_sw_init: 3222 ixgbevf_reset_interrupt_capability(adapter); 3223 iounmap(hw->hw_addr); 3224err_ioremap: 3225 free_netdev(netdev); 3226err_alloc_etherdev: 3227 pci_release_regions(pdev); 3228err_pci_reg: 3229err_dma: 3230 pci_disable_device(pdev); 3231 return err; 3232} 3233 3234/** 3235 * ixgbevf_remove - Device Removal Routine 3236 * @pdev: PCI device information struct 3237 * 3238 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3239 * that it should release a PCI device. The could be caused by a 3240 * Hot-Plug event, or because the driver is going to be removed from 3241 * memory. 3242 **/ 3243static void __devexit ixgbevf_remove(struct pci_dev *pdev) 3244{ 3245 struct net_device *netdev = pci_get_drvdata(pdev); 3246 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3247 3248 set_bit(__IXGBEVF_DOWN, &adapter->state); 3249 3250 del_timer_sync(&adapter->watchdog_timer); 3251 3252 cancel_work_sync(&adapter->reset_task); 3253 cancel_work_sync(&adapter->watchdog_task); 3254 3255 if (netdev->reg_state == NETREG_REGISTERED) 3256 unregister_netdev(netdev); 3257 3258 ixgbevf_clear_interrupt_scheme(adapter); 3259 ixgbevf_reset_interrupt_capability(adapter); 3260 3261 iounmap(adapter->hw.hw_addr); 3262 pci_release_regions(pdev); 3263 3264 hw_dbg(&adapter->hw, "Remove complete\n"); 3265 3266 kfree(adapter->tx_ring); 3267 kfree(adapter->rx_ring); 3268 3269 free_netdev(netdev); 3270 3271 pci_disable_device(pdev); 3272} 3273 3274/** 3275 * ixgbevf_io_error_detected - called when PCI error is detected 3276 * @pdev: Pointer to PCI device 3277 * @state: The current pci connection state 3278 * 3279 * This function is called after a PCI bus error affecting 3280 * this device has been detected. 3281 */ 3282static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3283 pci_channel_state_t state) 3284{ 3285 struct net_device *netdev = pci_get_drvdata(pdev); 3286 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3287 3288 netif_device_detach(netdev); 3289 3290 if (state == pci_channel_io_perm_failure) 3291 return PCI_ERS_RESULT_DISCONNECT; 3292 3293 if (netif_running(netdev)) 3294 ixgbevf_down(adapter); 3295 3296 pci_disable_device(pdev); 3297 3298 /* Request a slot slot reset. */ 3299 return PCI_ERS_RESULT_NEED_RESET; 3300} 3301 3302/** 3303 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3304 * @pdev: Pointer to PCI device 3305 * 3306 * Restart the card from scratch, as if from a cold-boot. Implementation 3307 * resembles the first-half of the ixgbevf_resume routine. 3308 */ 3309static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3310{ 3311 struct net_device *netdev = pci_get_drvdata(pdev); 3312 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3313 3314 if (pci_enable_device_mem(pdev)) { 3315 dev_err(&pdev->dev, 3316 "Cannot re-enable PCI device after reset.\n"); 3317 return PCI_ERS_RESULT_DISCONNECT; 3318 } 3319 3320 pci_set_master(pdev); 3321 3322 ixgbevf_reset(adapter); 3323 3324 return PCI_ERS_RESULT_RECOVERED; 3325} 3326 3327/** 3328 * ixgbevf_io_resume - called when traffic can start flowing again. 3329 * @pdev: Pointer to PCI device 3330 * 3331 * This callback is called when the error recovery driver tells us that 3332 * its OK to resume normal operation. Implementation resembles the 3333 * second-half of the ixgbevf_resume routine. 3334 */ 3335static void ixgbevf_io_resume(struct pci_dev *pdev) 3336{ 3337 struct net_device *netdev = pci_get_drvdata(pdev); 3338 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3339 3340 if (netif_running(netdev)) 3341 ixgbevf_up(adapter); 3342 3343 netif_device_attach(netdev); 3344} 3345 3346/* PCI Error Recovery (ERS) */ 3347static struct pci_error_handlers ixgbevf_err_handler = { 3348 .error_detected = ixgbevf_io_error_detected, 3349 .slot_reset = ixgbevf_io_slot_reset, 3350 .resume = ixgbevf_io_resume, 3351}; 3352 3353static struct pci_driver ixgbevf_driver = { 3354 .name = ixgbevf_driver_name, 3355 .id_table = ixgbevf_pci_tbl, 3356 .probe = ixgbevf_probe, 3357 .remove = __devexit_p(ixgbevf_remove), 3358#ifdef CONFIG_PM 3359 /* Power Management Hooks */ 3360 .suspend = ixgbevf_suspend, 3361 .resume = ixgbevf_resume, 3362#endif 3363 .shutdown = ixgbevf_shutdown, 3364 .err_handler = &ixgbevf_err_handler 3365}; 3366 3367/** 3368 * ixgbevf_init_module - Driver Registration Routine 3369 * 3370 * ixgbevf_init_module is the first routine called when the driver is 3371 * loaded. All it does is register with the PCI subsystem. 3372 **/ 3373static int __init ixgbevf_init_module(void) 3374{ 3375 int ret; 3376 pr_info("%s - version %s\n", ixgbevf_driver_string, 3377 ixgbevf_driver_version); 3378 3379 pr_info("%s\n", ixgbevf_copyright); 3380 3381 ret = pci_register_driver(&ixgbevf_driver); 3382 return ret; 3383} 3384 3385module_init(ixgbevf_init_module); 3386 3387/** 3388 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3389 * 3390 * ixgbevf_exit_module is called just before the driver is removed 3391 * from memory. 3392 **/ 3393static void __exit ixgbevf_exit_module(void) 3394{ 3395 pci_unregister_driver(&ixgbevf_driver); 3396} 3397 3398#ifdef DEBUG 3399/** 3400 * ixgbevf_get_hw_dev_name - return device name string 3401 * used by hardware layer to print debugging information 3402 **/ 3403char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3404{ 3405 struct ixgbevf_adapter *adapter = hw->back; 3406 return adapter->netdev->name; 3407} 3408 3409#endif 3410module_exit(ixgbevf_exit_module); 3411 3412/* ixgbevf_main.c */ 3413