ixgbevf_main.c revision fb40195cc975b14c5d4e44863ea996f999ba5aee
1/******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28 29/****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31******************************************************************************/ 32 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35#include <linux/types.h> 36#include <linux/bitops.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/vmalloc.h> 41#include <linux/string.h> 42#include <linux/in.h> 43#include <linux/ip.h> 44#include <linux/tcp.h> 45#include <linux/ipv6.h> 46#include <linux/slab.h> 47#include <net/checksum.h> 48#include <net/ip6_checksum.h> 49#include <linux/ethtool.h> 50#include <linux/if.h> 51#include <linux/if_vlan.h> 52#include <linux/prefetch.h> 53 54#include "ixgbevf.h" 55 56const char ixgbevf_driver_name[] = "ixgbevf"; 57static const char ixgbevf_driver_string[] = 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 59 60#define DRV_VERSION "2.6.0-k" 61const char ixgbevf_driver_version[] = DRV_VERSION; 62static char ixgbevf_copyright[] = 63 "Copyright (c) 2009 - 2012 Intel Corporation."; 64 65static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 66 [board_82599_vf] = &ixgbevf_82599_vf_info, 67 [board_X540_vf] = &ixgbevf_X540_vf_info, 68}; 69 70/* ixgbevf_pci_tbl - PCI Device ID Table 71 * 72 * Wildcard entries (PCI_ANY_ID) should come last 73 * Last entry must be all 0s 74 * 75 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 76 * Class, Class Mask, private data (not used) } 77 */ 78static struct pci_device_id ixgbevf_pci_tbl[] = { 79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 80 board_82599_vf}, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 82 board_X540_vf}, 83 84 /* required last entry */ 85 {0, } 86}; 87MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 88 89MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 90MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 91MODULE_LICENSE("GPL"); 92MODULE_VERSION(DRV_VERSION); 93 94#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 95static int debug = -1; 96module_param(debug, int, 0); 97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 98 99/* forward decls */ 100static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 101 102static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 103 struct ixgbevf_ring *rx_ring, 104 u32 val) 105{ 106 /* 107 * Force memory writes to complete before letting h/w 108 * know there are new descriptors to fetch. (Only 109 * applicable for weak-ordered memory model archs, 110 * such as IA-64). 111 */ 112 wmb(); 113 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 114} 115 116/** 117 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 118 * @adapter: pointer to adapter struct 119 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 120 * @queue: queue to map the corresponding interrupt to 121 * @msix_vector: the vector to map to the corresponding queue 122 * 123 */ 124static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 125 u8 queue, u8 msix_vector) 126{ 127 u32 ivar, index; 128 struct ixgbe_hw *hw = &adapter->hw; 129 if (direction == -1) { 130 /* other causes */ 131 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 133 ivar &= ~0xFF; 134 ivar |= msix_vector; 135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 136 } else { 137 /* tx or rx causes */ 138 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 139 index = ((16 * (queue & 1)) + (8 * direction)); 140 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 141 ivar &= ~(0xFF << index); 142 ivar |= (msix_vector << index); 143 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 144 } 145} 146 147static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, 148 struct ixgbevf_tx_buffer 149 *tx_buffer_info) 150{ 151 if (tx_buffer_info->dma) { 152 if (tx_buffer_info->mapped_as_page) 153 dma_unmap_page(&adapter->pdev->dev, 154 tx_buffer_info->dma, 155 tx_buffer_info->length, 156 DMA_TO_DEVICE); 157 else 158 dma_unmap_single(&adapter->pdev->dev, 159 tx_buffer_info->dma, 160 tx_buffer_info->length, 161 DMA_TO_DEVICE); 162 tx_buffer_info->dma = 0; 163 } 164 if (tx_buffer_info->skb) { 165 dev_kfree_skb_any(tx_buffer_info->skb); 166 tx_buffer_info->skb = NULL; 167 } 168 tx_buffer_info->time_stamp = 0; 169 /* tx_buffer_info must be completely set up in the transmit path */ 170} 171 172#define IXGBE_MAX_TXD_PWR 14 173#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 174 175/* Tx Descriptors needed, worst case */ 176#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 177#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 178 179static void ixgbevf_tx_timeout(struct net_device *netdev); 180 181/** 182 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 183 * @q_vector: board private structure 184 * @tx_ring: tx ring to clean 185 **/ 186static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 187 struct ixgbevf_ring *tx_ring) 188{ 189 struct ixgbevf_adapter *adapter = q_vector->adapter; 190 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 191 struct ixgbevf_tx_buffer *tx_buffer_info; 192 unsigned int i, eop, count = 0; 193 unsigned int total_bytes = 0, total_packets = 0; 194 195 i = tx_ring->next_to_clean; 196 eop = tx_ring->tx_buffer_info[i].next_to_watch; 197 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 198 199 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 200 (count < tx_ring->count)) { 201 bool cleaned = false; 202 rmb(); /* read buffer_info after eop_desc */ 203 /* eop could change between read and DD-check */ 204 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 205 goto cont_loop; 206 for ( ; !cleaned; count++) { 207 struct sk_buff *skb; 208 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 209 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 210 cleaned = (i == eop); 211 skb = tx_buffer_info->skb; 212 213 if (cleaned && skb) { 214 unsigned int segs, bytecount; 215 216 /* gso_segs is currently only valid for tcp */ 217 segs = skb_shinfo(skb)->gso_segs ?: 1; 218 /* multiply data chunks by size of headers */ 219 bytecount = ((segs - 1) * skb_headlen(skb)) + 220 skb->len; 221 total_packets += segs; 222 total_bytes += bytecount; 223 } 224 225 ixgbevf_unmap_and_free_tx_resource(adapter, 226 tx_buffer_info); 227 228 tx_desc->wb.status = 0; 229 230 i++; 231 if (i == tx_ring->count) 232 i = 0; 233 } 234 235cont_loop: 236 eop = tx_ring->tx_buffer_info[i].next_to_watch; 237 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 238 } 239 240 tx_ring->next_to_clean = i; 241 242#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 243 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 244 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 245 /* Make sure that anybody stopping the queue after this 246 * sees the new next_to_clean. 247 */ 248 smp_mb(); 249 if (__netif_subqueue_stopped(tx_ring->netdev, 250 tx_ring->queue_index) && 251 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 252 netif_wake_subqueue(tx_ring->netdev, 253 tx_ring->queue_index); 254 ++adapter->restart_queue; 255 } 256 } 257 258 u64_stats_update_begin(&tx_ring->syncp); 259 tx_ring->total_bytes += total_bytes; 260 tx_ring->total_packets += total_packets; 261 u64_stats_update_end(&tx_ring->syncp); 262 263 return count < tx_ring->count; 264} 265 266/** 267 * ixgbevf_receive_skb - Send a completed packet up the stack 268 * @q_vector: structure containing interrupt and ring information 269 * @skb: packet to send up 270 * @status: hardware indication of status of receive 271 * @rx_ring: rx descriptor ring (for a specific queue) to setup 272 * @rx_desc: rx descriptor 273 **/ 274static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 275 struct sk_buff *skb, u8 status, 276 struct ixgbevf_ring *ring, 277 union ixgbe_adv_rx_desc *rx_desc) 278{ 279 struct ixgbevf_adapter *adapter = q_vector->adapter; 280 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 281 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 282 283 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 284 __vlan_hwaccel_put_tag(skb, tag); 285 286 napi_gro_receive(&q_vector->napi, skb); 287} 288 289/** 290 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 291 * @adapter: address of board private structure 292 * @status_err: hardware indication of status of receive 293 * @skb: skb currently being received and modified 294 **/ 295static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 296 struct ixgbevf_ring *ring, 297 u32 status_err, struct sk_buff *skb) 298{ 299 skb_checksum_none_assert(skb); 300 301 /* Rx csum disabled */ 302 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 303 return; 304 305 /* if IP and error */ 306 if ((status_err & IXGBE_RXD_STAT_IPCS) && 307 (status_err & IXGBE_RXDADV_ERR_IPE)) { 308 adapter->hw_csum_rx_error++; 309 return; 310 } 311 312 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 313 return; 314 315 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 316 adapter->hw_csum_rx_error++; 317 return; 318 } 319 320 /* It must be a TCP or UDP packet with a valid checksum */ 321 skb->ip_summed = CHECKSUM_UNNECESSARY; 322 adapter->hw_csum_rx_good++; 323} 324 325/** 326 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 327 * @adapter: address of board private structure 328 **/ 329static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 330 struct ixgbevf_ring *rx_ring, 331 int cleaned_count) 332{ 333 struct pci_dev *pdev = adapter->pdev; 334 union ixgbe_adv_rx_desc *rx_desc; 335 struct ixgbevf_rx_buffer *bi; 336 struct sk_buff *skb; 337 unsigned int i = rx_ring->next_to_use; 338 339 bi = &rx_ring->rx_buffer_info[i]; 340 341 while (cleaned_count--) { 342 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 343 skb = bi->skb; 344 if (!skb) { 345 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 346 rx_ring->rx_buf_len); 347 if (!skb) { 348 adapter->alloc_rx_buff_failed++; 349 goto no_buffers; 350 } 351 352 bi->skb = skb; 353 } 354 if (!bi->dma) { 355 bi->dma = dma_map_single(&pdev->dev, skb->data, 356 rx_ring->rx_buf_len, 357 DMA_FROM_DEVICE); 358 } 359 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 360 361 i++; 362 if (i == rx_ring->count) 363 i = 0; 364 bi = &rx_ring->rx_buffer_info[i]; 365 } 366 367no_buffers: 368 if (rx_ring->next_to_use != i) { 369 rx_ring->next_to_use = i; 370 371 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 372 } 373} 374 375static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 376 u32 qmask) 377{ 378 struct ixgbe_hw *hw = &adapter->hw; 379 380 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 381} 382 383static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 384 struct ixgbevf_ring *rx_ring, 385 int budget) 386{ 387 struct ixgbevf_adapter *adapter = q_vector->adapter; 388 struct pci_dev *pdev = adapter->pdev; 389 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 390 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 391 struct sk_buff *skb; 392 unsigned int i; 393 u32 len, staterr; 394 int cleaned_count = 0; 395 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 396 397 i = rx_ring->next_to_clean; 398 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 399 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 400 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 401 402 while (staterr & IXGBE_RXD_STAT_DD) { 403 if (!budget) 404 break; 405 budget--; 406 407 rmb(); /* read descriptor and rx_buffer_info after status DD */ 408 len = le16_to_cpu(rx_desc->wb.upper.length); 409 skb = rx_buffer_info->skb; 410 prefetch(skb->data - NET_IP_ALIGN); 411 rx_buffer_info->skb = NULL; 412 413 if (rx_buffer_info->dma) { 414 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 415 rx_ring->rx_buf_len, 416 DMA_FROM_DEVICE); 417 rx_buffer_info->dma = 0; 418 skb_put(skb, len); 419 } 420 421 i++; 422 if (i == rx_ring->count) 423 i = 0; 424 425 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 426 prefetch(next_rxd); 427 cleaned_count++; 428 429 next_buffer = &rx_ring->rx_buffer_info[i]; 430 431 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 432 skb->next = next_buffer->skb; 433 skb->next->prev = skb; 434 adapter->non_eop_descs++; 435 goto next_desc; 436 } 437 438 /* ERR_MASK will only have valid bits if EOP set */ 439 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 440 dev_kfree_skb_irq(skb); 441 goto next_desc; 442 } 443 444 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); 445 446 /* probably a little skewed due to removing CRC */ 447 total_rx_bytes += skb->len; 448 total_rx_packets++; 449 450 /* 451 * Work around issue of some types of VM to VM loop back 452 * packets not getting split correctly 453 */ 454 if (staterr & IXGBE_RXD_STAT_LB) { 455 u32 header_fixup_len = skb_headlen(skb); 456 if (header_fixup_len < 14) 457 skb_push(skb, header_fixup_len); 458 } 459 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 460 461 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 462 463next_desc: 464 rx_desc->wb.upper.status_error = 0; 465 466 /* return some buffers to hardware, one at a time is too slow */ 467 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 468 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 469 cleaned_count); 470 cleaned_count = 0; 471 } 472 473 /* use prefetched values */ 474 rx_desc = next_rxd; 475 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 476 477 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 478 } 479 480 rx_ring->next_to_clean = i; 481 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 482 483 if (cleaned_count) 484 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 485 486 u64_stats_update_begin(&rx_ring->syncp); 487 rx_ring->total_packets += total_rx_packets; 488 rx_ring->total_bytes += total_rx_bytes; 489 u64_stats_update_end(&rx_ring->syncp); 490 491 return !!budget; 492} 493 494/** 495 * ixgbevf_poll - NAPI polling calback 496 * @napi: napi struct with our devices info in it 497 * @budget: amount of work driver is allowed to do this pass, in packets 498 * 499 * This function will clean more than one or more rings associated with a 500 * q_vector. 501 **/ 502static int ixgbevf_poll(struct napi_struct *napi, int budget) 503{ 504 struct ixgbevf_q_vector *q_vector = 505 container_of(napi, struct ixgbevf_q_vector, napi); 506 struct ixgbevf_adapter *adapter = q_vector->adapter; 507 struct ixgbevf_ring *ring; 508 int per_ring_budget; 509 bool clean_complete = true; 510 511 ixgbevf_for_each_ring(ring, q_vector->tx) 512 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 513 514 /* attempt to distribute budget to each queue fairly, but don't allow 515 * the budget to go below 1 because we'll exit polling */ 516 if (q_vector->rx.count > 1) 517 per_ring_budget = max(budget/q_vector->rx.count, 1); 518 else 519 per_ring_budget = budget; 520 521 ixgbevf_for_each_ring(ring, q_vector->rx) 522 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 523 per_ring_budget); 524 525 /* If all work not completed, return budget and keep polling */ 526 if (!clean_complete) 527 return budget; 528 /* all work done, exit the polling mode */ 529 napi_complete(napi); 530 if (adapter->rx_itr_setting & 1) 531 ixgbevf_set_itr(q_vector); 532 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 533 ixgbevf_irq_enable_queues(adapter, 534 1 << q_vector->v_idx); 535 536 return 0; 537} 538 539 540/** 541 * ixgbevf_configure_msix - Configure MSI-X hardware 542 * @adapter: board private structure 543 * 544 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 545 * interrupts. 546 **/ 547static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 548{ 549 struct ixgbevf_q_vector *q_vector; 550 int q_vectors, v_idx; 551 552 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 553 adapter->eims_enable_mask = 0; 554 555 /* 556 * Populate the IVAR table and set the ITR values to the 557 * corresponding register. 558 */ 559 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 560 struct ixgbevf_ring *ring; 561 q_vector = adapter->q_vector[v_idx]; 562 563 ixgbevf_for_each_ring(ring, q_vector->rx) 564 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 565 566 ixgbevf_for_each_ring(ring, q_vector->tx) 567 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 568 569 if (q_vector->tx.ring && !q_vector->rx.ring) { 570 /* tx only vector */ 571 if (adapter->tx_itr_setting == 1) 572 q_vector->itr = IXGBE_10K_ITR; 573 else 574 q_vector->itr = adapter->tx_itr_setting; 575 } else { 576 /* rx or rx/tx vector */ 577 if (adapter->rx_itr_setting == 1) 578 q_vector->itr = IXGBE_20K_ITR; 579 else 580 q_vector->itr = adapter->rx_itr_setting; 581 } 582 583 /* add q_vector eims value to global eims_enable_mask */ 584 adapter->eims_enable_mask |= 1 << v_idx; 585 586 ixgbevf_write_eitr(q_vector); 587 } 588 589 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 590 /* setup eims_other and add value to global eims_enable_mask */ 591 adapter->eims_other = 1 << v_idx; 592 adapter->eims_enable_mask |= adapter->eims_other; 593} 594 595enum latency_range { 596 lowest_latency = 0, 597 low_latency = 1, 598 bulk_latency = 2, 599 latency_invalid = 255 600}; 601 602/** 603 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 604 * @q_vector: structure containing interrupt and ring information 605 * @ring_container: structure containing ring performance data 606 * 607 * Stores a new ITR value based on packets and byte 608 * counts during the last interrupt. The advantage of per interrupt 609 * computation is faster updates and more accurate ITR for the current 610 * traffic pattern. Constants in this function were computed 611 * based on theoretical maximum wire speed and thresholds were set based 612 * on testing data as well as attempting to minimize response time 613 * while increasing bulk throughput. 614 **/ 615static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 616 struct ixgbevf_ring_container *ring_container) 617{ 618 int bytes = ring_container->total_bytes; 619 int packets = ring_container->total_packets; 620 u32 timepassed_us; 621 u64 bytes_perint; 622 u8 itr_setting = ring_container->itr; 623 624 if (packets == 0) 625 return; 626 627 /* simple throttlerate management 628 * 0-20MB/s lowest (100000 ints/s) 629 * 20-100MB/s low (20000 ints/s) 630 * 100-1249MB/s bulk (8000 ints/s) 631 */ 632 /* what was last interrupt timeslice? */ 633 timepassed_us = q_vector->itr >> 2; 634 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 635 636 switch (itr_setting) { 637 case lowest_latency: 638 if (bytes_perint > 10) 639 itr_setting = low_latency; 640 break; 641 case low_latency: 642 if (bytes_perint > 20) 643 itr_setting = bulk_latency; 644 else if (bytes_perint <= 10) 645 itr_setting = lowest_latency; 646 break; 647 case bulk_latency: 648 if (bytes_perint <= 20) 649 itr_setting = low_latency; 650 break; 651 } 652 653 /* clear work counters since we have the values we need */ 654 ring_container->total_bytes = 0; 655 ring_container->total_packets = 0; 656 657 /* write updated itr to ring container */ 658 ring_container->itr = itr_setting; 659} 660 661/** 662 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 663 * @q_vector: structure containing interrupt and ring information 664 * 665 * This function is made to be called by ethtool and by the driver 666 * when it needs to update VTEITR registers at runtime. Hardware 667 * specific quirks/differences are taken care of here. 668 */ 669void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 670{ 671 struct ixgbevf_adapter *adapter = q_vector->adapter; 672 struct ixgbe_hw *hw = &adapter->hw; 673 int v_idx = q_vector->v_idx; 674 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 675 676 /* 677 * set the WDIS bit to not clear the timer bits and cause an 678 * immediate assertion of the interrupt 679 */ 680 itr_reg |= IXGBE_EITR_CNT_WDIS; 681 682 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 683} 684 685static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 686{ 687 u32 new_itr = q_vector->itr; 688 u8 current_itr; 689 690 ixgbevf_update_itr(q_vector, &q_vector->tx); 691 ixgbevf_update_itr(q_vector, &q_vector->rx); 692 693 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 694 695 switch (current_itr) { 696 /* counts and packets in update_itr are dependent on these numbers */ 697 case lowest_latency: 698 new_itr = IXGBE_100K_ITR; 699 break; 700 case low_latency: 701 new_itr = IXGBE_20K_ITR; 702 break; 703 case bulk_latency: 704 default: 705 new_itr = IXGBE_8K_ITR; 706 break; 707 } 708 709 if (new_itr != q_vector->itr) { 710 /* do an exponential smoothing */ 711 new_itr = (10 * new_itr * q_vector->itr) / 712 ((9 * new_itr) + q_vector->itr); 713 714 /* save the algorithm value here */ 715 q_vector->itr = new_itr; 716 717 ixgbevf_write_eitr(q_vector); 718 } 719} 720 721static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) 722{ 723 struct ixgbevf_adapter *adapter = data; 724 struct ixgbe_hw *hw = &adapter->hw; 725 u32 msg; 726 bool got_ack = false; 727 728 if (!hw->mbx.ops.check_for_ack(hw)) 729 got_ack = true; 730 731 if (!hw->mbx.ops.check_for_msg(hw)) { 732 hw->mbx.ops.read(hw, &msg, 1); 733 734 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 735 mod_timer(&adapter->watchdog_timer, 736 round_jiffies(jiffies + 1)); 737 738 if (msg & IXGBE_VT_MSGTYPE_NACK) 739 pr_warn("Last Request of type %2.2x to PF Nacked\n", 740 msg & 0xFF); 741 /* 742 * Restore the PFSTS bit in case someone is polling for a 743 * return message from the PF 744 */ 745 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; 746 } 747 748 /* 749 * checking for the ack clears the PFACK bit. Place 750 * it back in the v2p_mailbox cache so that anyone 751 * polling for an ack will not miss it 752 */ 753 if (got_ack) 754 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 755 756 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 757 758 return IRQ_HANDLED; 759} 760 761 762/** 763 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 764 * @irq: unused 765 * @data: pointer to our q_vector struct for this interrupt vector 766 **/ 767static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 768{ 769 struct ixgbevf_q_vector *q_vector = data; 770 771 /* EIAM disabled interrupts (on this vector) for us */ 772 if (q_vector->rx.ring || q_vector->tx.ring) 773 napi_schedule(&q_vector->napi); 774 775 return IRQ_HANDLED; 776} 777 778static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 779 int r_idx) 780{ 781 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 782 783 a->rx_ring[r_idx].next = q_vector->rx.ring; 784 q_vector->rx.ring = &a->rx_ring[r_idx]; 785 q_vector->rx.count++; 786} 787 788static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 789 int t_idx) 790{ 791 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 792 793 a->tx_ring[t_idx].next = q_vector->tx.ring; 794 q_vector->tx.ring = &a->tx_ring[t_idx]; 795 q_vector->tx.count++; 796} 797 798/** 799 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 800 * @adapter: board private structure to initialize 801 * 802 * This function maps descriptor rings to the queue-specific vectors 803 * we were allotted through the MSI-X enabling code. Ideally, we'd have 804 * one vector per ring/queue, but on a constrained vector budget, we 805 * group the rings as "efficiently" as possible. You would add new 806 * mapping configurations in here. 807 **/ 808static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 809{ 810 int q_vectors; 811 int v_start = 0; 812 int rxr_idx = 0, txr_idx = 0; 813 int rxr_remaining = adapter->num_rx_queues; 814 int txr_remaining = adapter->num_tx_queues; 815 int i, j; 816 int rqpv, tqpv; 817 int err = 0; 818 819 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 820 821 /* 822 * The ideal configuration... 823 * We have enough vectors to map one per queue. 824 */ 825 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 826 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 827 map_vector_to_rxq(adapter, v_start, rxr_idx); 828 829 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 830 map_vector_to_txq(adapter, v_start, txr_idx); 831 goto out; 832 } 833 834 /* 835 * If we don't have enough vectors for a 1-to-1 836 * mapping, we'll have to group them so there are 837 * multiple queues per vector. 838 */ 839 /* Re-adjusting *qpv takes care of the remainder. */ 840 for (i = v_start; i < q_vectors; i++) { 841 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 842 for (j = 0; j < rqpv; j++) { 843 map_vector_to_rxq(adapter, i, rxr_idx); 844 rxr_idx++; 845 rxr_remaining--; 846 } 847 } 848 for (i = v_start; i < q_vectors; i++) { 849 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 850 for (j = 0; j < tqpv; j++) { 851 map_vector_to_txq(adapter, i, txr_idx); 852 txr_idx++; 853 txr_remaining--; 854 } 855 } 856 857out: 858 return err; 859} 860 861/** 862 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 863 * @adapter: board private structure 864 * 865 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 866 * interrupts from the kernel. 867 **/ 868static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 869{ 870 struct net_device *netdev = adapter->netdev; 871 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 872 int vector, err; 873 int ri = 0, ti = 0; 874 875 for (vector = 0; vector < q_vectors; vector++) { 876 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 877 struct msix_entry *entry = &adapter->msix_entries[vector]; 878 879 if (q_vector->tx.ring && q_vector->rx.ring) { 880 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 881 "%s-%s-%d", netdev->name, "TxRx", ri++); 882 ti++; 883 } else if (q_vector->rx.ring) { 884 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 885 "%s-%s-%d", netdev->name, "rx", ri++); 886 } else if (q_vector->tx.ring) { 887 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 888 "%s-%s-%d", netdev->name, "tx", ti++); 889 } else { 890 /* skip this unused q_vector */ 891 continue; 892 } 893 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 894 q_vector->name, q_vector); 895 if (err) { 896 hw_dbg(&adapter->hw, 897 "request_irq failed for MSIX interrupt " 898 "Error: %d\n", err); 899 goto free_queue_irqs; 900 } 901 } 902 903 err = request_irq(adapter->msix_entries[vector].vector, 904 &ixgbevf_msix_mbx, 0, netdev->name, adapter); 905 if (err) { 906 hw_dbg(&adapter->hw, 907 "request_irq for msix_mbx failed: %d\n", err); 908 goto free_queue_irqs; 909 } 910 911 return 0; 912 913free_queue_irqs: 914 while (vector) { 915 vector--; 916 free_irq(adapter->msix_entries[vector].vector, 917 adapter->q_vector[vector]); 918 } 919 pci_disable_msix(adapter->pdev); 920 kfree(adapter->msix_entries); 921 adapter->msix_entries = NULL; 922 return err; 923} 924 925static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 926{ 927 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 928 929 for (i = 0; i < q_vectors; i++) { 930 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 931 q_vector->rx.ring = NULL; 932 q_vector->tx.ring = NULL; 933 q_vector->rx.count = 0; 934 q_vector->tx.count = 0; 935 } 936} 937 938/** 939 * ixgbevf_request_irq - initialize interrupts 940 * @adapter: board private structure 941 * 942 * Attempts to configure interrupts using the best available 943 * capabilities of the hardware and kernel. 944 **/ 945static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 946{ 947 int err = 0; 948 949 err = ixgbevf_request_msix_irqs(adapter); 950 951 if (err) 952 hw_dbg(&adapter->hw, 953 "request_irq failed, Error %d\n", err); 954 955 return err; 956} 957 958static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 959{ 960 int i, q_vectors; 961 962 q_vectors = adapter->num_msix_vectors; 963 i = q_vectors - 1; 964 965 free_irq(adapter->msix_entries[i].vector, adapter); 966 i--; 967 968 for (; i >= 0; i--) { 969 /* free only the irqs that were actually requested */ 970 if (!adapter->q_vector[i]->rx.ring && 971 !adapter->q_vector[i]->tx.ring) 972 continue; 973 974 free_irq(adapter->msix_entries[i].vector, 975 adapter->q_vector[i]); 976 } 977 978 ixgbevf_reset_q_vectors(adapter); 979} 980 981/** 982 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 983 * @adapter: board private structure 984 **/ 985static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 986{ 987 struct ixgbe_hw *hw = &adapter->hw; 988 int i; 989 990 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 991 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 992 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 993 994 IXGBE_WRITE_FLUSH(hw); 995 996 for (i = 0; i < adapter->num_msix_vectors; i++) 997 synchronize_irq(adapter->msix_entries[i].vector); 998} 999 1000/** 1001 * ixgbevf_irq_enable - Enable default interrupt generation settings 1002 * @adapter: board private structure 1003 **/ 1004static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1005{ 1006 struct ixgbe_hw *hw = &adapter->hw; 1007 1008 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1009 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1010 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1011} 1012 1013/** 1014 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1015 * @adapter: board private structure 1016 * 1017 * Configure the Tx unit of the MAC after a reset. 1018 **/ 1019static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1020{ 1021 u64 tdba; 1022 struct ixgbe_hw *hw = &adapter->hw; 1023 u32 i, j, tdlen, txctrl; 1024 1025 /* Setup the HW Tx Head and Tail descriptor pointers */ 1026 for (i = 0; i < adapter->num_tx_queues; i++) { 1027 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1028 j = ring->reg_idx; 1029 tdba = ring->dma; 1030 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1031 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1032 (tdba & DMA_BIT_MASK(32))); 1033 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1034 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1035 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1036 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1037 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1038 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1039 /* Disable Tx Head Writeback RO bit, since this hoses 1040 * bookkeeping if things aren't delivered in order. 1041 */ 1042 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1043 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1044 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1045 } 1046} 1047 1048#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1049 1050static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1051{ 1052 struct ixgbevf_ring *rx_ring; 1053 struct ixgbe_hw *hw = &adapter->hw; 1054 u32 srrctl; 1055 1056 rx_ring = &adapter->rx_ring[index]; 1057 1058 srrctl = IXGBE_SRRCTL_DROP_EN; 1059 1060 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1061 1062 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1063 srrctl |= IXGBEVF_RXBUFFER_2048 >> 1064 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1065 else 1066 srrctl |= rx_ring->rx_buf_len >> 1067 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1068 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1069} 1070 1071/** 1072 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1073 * @adapter: board private structure 1074 * 1075 * Configure the Rx unit of the MAC after a reset. 1076 **/ 1077static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1078{ 1079 u64 rdba; 1080 struct ixgbe_hw *hw = &adapter->hw; 1081 struct net_device *netdev = adapter->netdev; 1082 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1083 int i, j; 1084 u32 rdlen; 1085 int rx_buf_len; 1086 1087 /* PSRTYPE must be initialized in 82599 */ 1088 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1089 if (netdev->mtu <= ETH_DATA_LEN) 1090 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1091 else 1092 rx_buf_len = ALIGN(max_frame, 1024); 1093 1094 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1095 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1096 * the Base and Length of the Rx Descriptor Ring */ 1097 for (i = 0; i < adapter->num_rx_queues; i++) { 1098 rdba = adapter->rx_ring[i].dma; 1099 j = adapter->rx_ring[i].reg_idx; 1100 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1101 (rdba & DMA_BIT_MASK(32))); 1102 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1104 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1105 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1106 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1107 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1108 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1109 1110 ixgbevf_configure_srrctl(adapter, j); 1111 } 1112} 1113 1114static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1115{ 1116 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1117 struct ixgbe_hw *hw = &adapter->hw; 1118 1119 /* add VID to filter table */ 1120 if (hw->mac.ops.set_vfta) 1121 hw->mac.ops.set_vfta(hw, vid, 0, true); 1122 set_bit(vid, adapter->active_vlans); 1123 1124 return 0; 1125} 1126 1127static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1128{ 1129 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1130 struct ixgbe_hw *hw = &adapter->hw; 1131 1132 /* remove VID from filter table */ 1133 if (hw->mac.ops.set_vfta) 1134 hw->mac.ops.set_vfta(hw, vid, 0, false); 1135 clear_bit(vid, adapter->active_vlans); 1136 1137 return 0; 1138} 1139 1140static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1141{ 1142 u16 vid; 1143 1144 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1145 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1146} 1147 1148static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1149{ 1150 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1151 struct ixgbe_hw *hw = &adapter->hw; 1152 int count = 0; 1153 1154 if ((netdev_uc_count(netdev)) > 10) { 1155 pr_err("Too many unicast filters - No Space\n"); 1156 return -ENOSPC; 1157 } 1158 1159 if (!netdev_uc_empty(netdev)) { 1160 struct netdev_hw_addr *ha; 1161 netdev_for_each_uc_addr(ha, netdev) { 1162 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1163 udelay(200); 1164 } 1165 } else { 1166 /* 1167 * If the list is empty then send message to PF driver to 1168 * clear all macvlans on this VF. 1169 */ 1170 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1171 } 1172 1173 return count; 1174} 1175 1176/** 1177 * ixgbevf_set_rx_mode - Multicast set 1178 * @netdev: network interface device structure 1179 * 1180 * The set_rx_method entry point is called whenever the multicast address 1181 * list or the network interface flags are updated. This routine is 1182 * responsible for configuring the hardware for proper multicast mode. 1183 **/ 1184static void ixgbevf_set_rx_mode(struct net_device *netdev) 1185{ 1186 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1187 struct ixgbe_hw *hw = &adapter->hw; 1188 1189 /* reprogram multicast list */ 1190 if (hw->mac.ops.update_mc_addr_list) 1191 hw->mac.ops.update_mc_addr_list(hw, netdev); 1192 1193 ixgbevf_write_uc_addr_list(netdev); 1194} 1195 1196static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1197{ 1198 int q_idx; 1199 struct ixgbevf_q_vector *q_vector; 1200 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1201 1202 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1203 q_vector = adapter->q_vector[q_idx]; 1204 napi_enable(&q_vector->napi); 1205 } 1206} 1207 1208static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1209{ 1210 int q_idx; 1211 struct ixgbevf_q_vector *q_vector; 1212 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1213 1214 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1215 q_vector = adapter->q_vector[q_idx]; 1216 napi_disable(&q_vector->napi); 1217 } 1218} 1219 1220static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1221{ 1222 struct net_device *netdev = adapter->netdev; 1223 int i; 1224 1225 ixgbevf_set_rx_mode(netdev); 1226 1227 ixgbevf_restore_vlan(adapter); 1228 1229 ixgbevf_configure_tx(adapter); 1230 ixgbevf_configure_rx(adapter); 1231 for (i = 0; i < adapter->num_rx_queues; i++) { 1232 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1233 ixgbevf_alloc_rx_buffers(adapter, ring, 1234 IXGBE_DESC_UNUSED(ring)); 1235 } 1236} 1237 1238#define IXGBE_MAX_RX_DESC_POLL 10 1239static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1240 int rxr) 1241{ 1242 struct ixgbe_hw *hw = &adapter->hw; 1243 int j = adapter->rx_ring[rxr].reg_idx; 1244 int k; 1245 1246 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1247 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1248 break; 1249 else 1250 msleep(1); 1251 } 1252 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1253 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1254 "not set within the polling period\n", rxr); 1255 } 1256 1257 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 1258 (adapter->rx_ring[rxr].count - 1)); 1259} 1260 1261static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1262{ 1263 /* Only save pre-reset stats if there are some */ 1264 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1265 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1266 adapter->stats.base_vfgprc; 1267 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1268 adapter->stats.base_vfgptc; 1269 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1270 adapter->stats.base_vfgorc; 1271 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1272 adapter->stats.base_vfgotc; 1273 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1274 adapter->stats.base_vfmprc; 1275 } 1276} 1277 1278static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1279{ 1280 struct ixgbe_hw *hw = &adapter->hw; 1281 1282 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1283 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1284 adapter->stats.last_vfgorc |= 1285 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1286 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1287 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1288 adapter->stats.last_vfgotc |= 1289 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1290 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1291 1292 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1293 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1294 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1295 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1296 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1297} 1298 1299static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1300{ 1301 struct net_device *netdev = adapter->netdev; 1302 struct ixgbe_hw *hw = &adapter->hw; 1303 int i, j = 0; 1304 int num_rx_rings = adapter->num_rx_queues; 1305 u32 txdctl, rxdctl; 1306 u32 msg[2]; 1307 1308 for (i = 0; i < adapter->num_tx_queues; i++) { 1309 j = adapter->tx_ring[i].reg_idx; 1310 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1311 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1312 txdctl |= (8 << 16); 1313 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1314 } 1315 1316 for (i = 0; i < adapter->num_tx_queues; i++) { 1317 j = adapter->tx_ring[i].reg_idx; 1318 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1319 txdctl |= IXGBE_TXDCTL_ENABLE; 1320 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1321 } 1322 1323 for (i = 0; i < num_rx_rings; i++) { 1324 j = adapter->rx_ring[i].reg_idx; 1325 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1326 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1327 if (hw->mac.type == ixgbe_mac_X540_vf) { 1328 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1329 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1330 IXGBE_RXDCTL_RLPML_EN); 1331 } 1332 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1333 ixgbevf_rx_desc_queue_enable(adapter, i); 1334 } 1335 1336 ixgbevf_configure_msix(adapter); 1337 1338 if (hw->mac.ops.set_rar) { 1339 if (is_valid_ether_addr(hw->mac.addr)) 1340 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1341 else 1342 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1343 } 1344 1345 msg[0] = IXGBE_VF_SET_LPE; 1346 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1347 hw->mbx.ops.write_posted(hw, msg, 2); 1348 1349 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1350 ixgbevf_napi_enable_all(adapter); 1351 1352 /* enable transmits */ 1353 netif_tx_start_all_queues(netdev); 1354 1355 ixgbevf_save_reset_stats(adapter); 1356 ixgbevf_init_last_counter_stats(adapter); 1357 1358 mod_timer(&adapter->watchdog_timer, jiffies); 1359} 1360 1361void ixgbevf_up(struct ixgbevf_adapter *adapter) 1362{ 1363 struct ixgbe_hw *hw = &adapter->hw; 1364 1365 ixgbevf_configure(adapter); 1366 1367 ixgbevf_up_complete(adapter); 1368 1369 /* clear any pending interrupts, may auto mask */ 1370 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1371 1372 ixgbevf_irq_enable(adapter); 1373} 1374 1375/** 1376 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1377 * @adapter: board private structure 1378 * @rx_ring: ring to free buffers from 1379 **/ 1380static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1381 struct ixgbevf_ring *rx_ring) 1382{ 1383 struct pci_dev *pdev = adapter->pdev; 1384 unsigned long size; 1385 unsigned int i; 1386 1387 if (!rx_ring->rx_buffer_info) 1388 return; 1389 1390 /* Free all the Rx ring sk_buffs */ 1391 for (i = 0; i < rx_ring->count; i++) { 1392 struct ixgbevf_rx_buffer *rx_buffer_info; 1393 1394 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1395 if (rx_buffer_info->dma) { 1396 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1397 rx_ring->rx_buf_len, 1398 DMA_FROM_DEVICE); 1399 rx_buffer_info->dma = 0; 1400 } 1401 if (rx_buffer_info->skb) { 1402 struct sk_buff *skb = rx_buffer_info->skb; 1403 rx_buffer_info->skb = NULL; 1404 do { 1405 struct sk_buff *this = skb; 1406 skb = skb->prev; 1407 dev_kfree_skb(this); 1408 } while (skb); 1409 } 1410 } 1411 1412 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1413 memset(rx_ring->rx_buffer_info, 0, size); 1414 1415 /* Zero out the descriptor ring */ 1416 memset(rx_ring->desc, 0, rx_ring->size); 1417 1418 rx_ring->next_to_clean = 0; 1419 rx_ring->next_to_use = 0; 1420 1421 if (rx_ring->head) 1422 writel(0, adapter->hw.hw_addr + rx_ring->head); 1423 if (rx_ring->tail) 1424 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1425} 1426 1427/** 1428 * ixgbevf_clean_tx_ring - Free Tx Buffers 1429 * @adapter: board private structure 1430 * @tx_ring: ring to be cleaned 1431 **/ 1432static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1433 struct ixgbevf_ring *tx_ring) 1434{ 1435 struct ixgbevf_tx_buffer *tx_buffer_info; 1436 unsigned long size; 1437 unsigned int i; 1438 1439 if (!tx_ring->tx_buffer_info) 1440 return; 1441 1442 /* Free all the Tx ring sk_buffs */ 1443 1444 for (i = 0; i < tx_ring->count; i++) { 1445 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1446 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1447 } 1448 1449 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1450 memset(tx_ring->tx_buffer_info, 0, size); 1451 1452 memset(tx_ring->desc, 0, tx_ring->size); 1453 1454 tx_ring->next_to_use = 0; 1455 tx_ring->next_to_clean = 0; 1456 1457 if (tx_ring->head) 1458 writel(0, adapter->hw.hw_addr + tx_ring->head); 1459 if (tx_ring->tail) 1460 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1461} 1462 1463/** 1464 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1465 * @adapter: board private structure 1466 **/ 1467static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1468{ 1469 int i; 1470 1471 for (i = 0; i < adapter->num_rx_queues; i++) 1472 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1473} 1474 1475/** 1476 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1477 * @adapter: board private structure 1478 **/ 1479static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1480{ 1481 int i; 1482 1483 for (i = 0; i < adapter->num_tx_queues; i++) 1484 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1485} 1486 1487void ixgbevf_down(struct ixgbevf_adapter *adapter) 1488{ 1489 struct net_device *netdev = adapter->netdev; 1490 struct ixgbe_hw *hw = &adapter->hw; 1491 u32 txdctl; 1492 int i, j; 1493 1494 /* signal that we are down to the interrupt handler */ 1495 set_bit(__IXGBEVF_DOWN, &adapter->state); 1496 /* disable receives */ 1497 1498 netif_tx_disable(netdev); 1499 1500 msleep(10); 1501 1502 netif_tx_stop_all_queues(netdev); 1503 1504 ixgbevf_irq_disable(adapter); 1505 1506 ixgbevf_napi_disable_all(adapter); 1507 1508 del_timer_sync(&adapter->watchdog_timer); 1509 /* can't call flush scheduled work here because it can deadlock 1510 * if linkwatch_event tries to acquire the rtnl_lock which we are 1511 * holding */ 1512 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1513 msleep(1); 1514 1515 /* disable transmits in the hardware now that interrupts are off */ 1516 for (i = 0; i < adapter->num_tx_queues; i++) { 1517 j = adapter->tx_ring[i].reg_idx; 1518 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1519 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1520 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1521 } 1522 1523 netif_carrier_off(netdev); 1524 1525 if (!pci_channel_offline(adapter->pdev)) 1526 ixgbevf_reset(adapter); 1527 1528 ixgbevf_clean_all_tx_rings(adapter); 1529 ixgbevf_clean_all_rx_rings(adapter); 1530} 1531 1532void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1533{ 1534 struct ixgbe_hw *hw = &adapter->hw; 1535 1536 WARN_ON(in_interrupt()); 1537 1538 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1539 msleep(1); 1540 1541 /* 1542 * Check if PF is up before re-init. If not then skip until 1543 * later when the PF is up and ready to service requests from 1544 * the VF via mailbox. If the VF is up and running then the 1545 * watchdog task will continue to schedule reset tasks until 1546 * the PF is up and running. 1547 */ 1548 if (!hw->mac.ops.reset_hw(hw)) { 1549 ixgbevf_down(adapter); 1550 ixgbevf_up(adapter); 1551 } 1552 1553 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1554} 1555 1556void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1557{ 1558 struct ixgbe_hw *hw = &adapter->hw; 1559 struct net_device *netdev = adapter->netdev; 1560 1561 if (hw->mac.ops.reset_hw(hw)) 1562 hw_dbg(hw, "PF still resetting\n"); 1563 else 1564 hw->mac.ops.init_hw(hw); 1565 1566 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1567 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1568 netdev->addr_len); 1569 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1570 netdev->addr_len); 1571 } 1572} 1573 1574static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1575 int vectors) 1576{ 1577 int err, vector_threshold; 1578 1579 /* We'll want at least 2 (vector_threshold): 1580 * 1) TxQ[0] + RxQ[0] handler 1581 * 2) Other (Link Status Change, etc.) 1582 */ 1583 vector_threshold = MIN_MSIX_COUNT; 1584 1585 /* The more we get, the more we will assign to Tx/Rx Cleanup 1586 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1587 * Right now, we simply care about how many we'll get; we'll 1588 * set them up later while requesting irq's. 1589 */ 1590 while (vectors >= vector_threshold) { 1591 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1592 vectors); 1593 if (!err) /* Success in acquiring all requested vectors. */ 1594 break; 1595 else if (err < 0) 1596 vectors = 0; /* Nasty failure, quit now */ 1597 else /* err == number of vectors we should try again with */ 1598 vectors = err; 1599 } 1600 1601 if (vectors < vector_threshold) { 1602 /* Can't allocate enough MSI-X interrupts? Oh well. 1603 * This just means we'll go with either a single MSI 1604 * vector or fall back to legacy interrupts. 1605 */ 1606 hw_dbg(&adapter->hw, 1607 "Unable to allocate MSI-X interrupts\n"); 1608 kfree(adapter->msix_entries); 1609 adapter->msix_entries = NULL; 1610 } else { 1611 /* 1612 * Adjust for only the vectors we'll use, which is minimum 1613 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1614 * vectors we were allocated. 1615 */ 1616 adapter->num_msix_vectors = vectors; 1617 } 1618} 1619 1620/** 1621 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1622 * @adapter: board private structure to initialize 1623 * 1624 * This is the top level queue allocation routine. The order here is very 1625 * important, starting with the "most" number of features turned on at once, 1626 * and ending with the smallest set of features. This way large combinations 1627 * can be allocated if they're turned on, and smaller combinations are the 1628 * fallthrough conditions. 1629 * 1630 **/ 1631static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1632{ 1633 /* Start with base case */ 1634 adapter->num_rx_queues = 1; 1635 adapter->num_tx_queues = 1; 1636} 1637 1638/** 1639 * ixgbevf_alloc_queues - Allocate memory for all rings 1640 * @adapter: board private structure to initialize 1641 * 1642 * We allocate one ring per queue at run-time since we don't know the 1643 * number of queues at compile-time. The polling_netdev array is 1644 * intended for Multiqueue, but should work fine with a single queue. 1645 **/ 1646static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1647{ 1648 int i; 1649 1650 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1651 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1652 if (!adapter->tx_ring) 1653 goto err_tx_ring_allocation; 1654 1655 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1656 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1657 if (!adapter->rx_ring) 1658 goto err_rx_ring_allocation; 1659 1660 for (i = 0; i < adapter->num_tx_queues; i++) { 1661 adapter->tx_ring[i].count = adapter->tx_ring_count; 1662 adapter->tx_ring[i].queue_index = i; 1663 adapter->tx_ring[i].reg_idx = i; 1664 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1665 adapter->tx_ring[i].netdev = adapter->netdev; 1666 } 1667 1668 for (i = 0; i < adapter->num_rx_queues; i++) { 1669 adapter->rx_ring[i].count = adapter->rx_ring_count; 1670 adapter->rx_ring[i].queue_index = i; 1671 adapter->rx_ring[i].reg_idx = i; 1672 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1673 adapter->rx_ring[i].netdev = adapter->netdev; 1674 } 1675 1676 return 0; 1677 1678err_rx_ring_allocation: 1679 kfree(adapter->tx_ring); 1680err_tx_ring_allocation: 1681 return -ENOMEM; 1682} 1683 1684/** 1685 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1686 * @adapter: board private structure to initialize 1687 * 1688 * Attempt to configure the interrupts using the best available 1689 * capabilities of the hardware and the kernel. 1690 **/ 1691static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1692{ 1693 int err = 0; 1694 int vector, v_budget; 1695 1696 /* 1697 * It's easy to be greedy for MSI-X vectors, but it really 1698 * doesn't do us much good if we have a lot more vectors 1699 * than CPU's. So let's be conservative and only ask for 1700 * (roughly) the same number of vectors as there are CPU's. 1701 * The default is to use pairs of vectors. 1702 */ 1703 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1704 v_budget = min_t(int, v_budget, num_online_cpus()); 1705 v_budget += NON_Q_VECTORS; 1706 1707 /* A failure in MSI-X entry allocation isn't fatal, but it does 1708 * mean we disable MSI-X capabilities of the adapter. */ 1709 adapter->msix_entries = kcalloc(v_budget, 1710 sizeof(struct msix_entry), GFP_KERNEL); 1711 if (!adapter->msix_entries) { 1712 err = -ENOMEM; 1713 goto out; 1714 } 1715 1716 for (vector = 0; vector < v_budget; vector++) 1717 adapter->msix_entries[vector].entry = vector; 1718 1719 ixgbevf_acquire_msix_vectors(adapter, v_budget); 1720 1721out: 1722 return err; 1723} 1724 1725/** 1726 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 1727 * @adapter: board private structure to initialize 1728 * 1729 * We allocate one q_vector per queue interrupt. If allocation fails we 1730 * return -ENOMEM. 1731 **/ 1732static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 1733{ 1734 int q_idx, num_q_vectors; 1735 struct ixgbevf_q_vector *q_vector; 1736 1737 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1738 1739 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1740 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 1741 if (!q_vector) 1742 goto err_out; 1743 q_vector->adapter = adapter; 1744 q_vector->v_idx = q_idx; 1745 netif_napi_add(adapter->netdev, &q_vector->napi, 1746 ixgbevf_poll, 64); 1747 adapter->q_vector[q_idx] = q_vector; 1748 } 1749 1750 return 0; 1751 1752err_out: 1753 while (q_idx) { 1754 q_idx--; 1755 q_vector = adapter->q_vector[q_idx]; 1756 netif_napi_del(&q_vector->napi); 1757 kfree(q_vector); 1758 adapter->q_vector[q_idx] = NULL; 1759 } 1760 return -ENOMEM; 1761} 1762 1763/** 1764 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 1765 * @adapter: board private structure to initialize 1766 * 1767 * This function frees the memory allocated to the q_vectors. In addition if 1768 * NAPI is enabled it will delete any references to the NAPI struct prior 1769 * to freeing the q_vector. 1770 **/ 1771static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1772{ 1773 int q_idx, num_q_vectors; 1774 int napi_vectors; 1775 1776 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1777 napi_vectors = adapter->num_rx_queues; 1778 1779 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1780 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1781 1782 adapter->q_vector[q_idx] = NULL; 1783 if (q_idx < napi_vectors) 1784 netif_napi_del(&q_vector->napi); 1785 kfree(q_vector); 1786 } 1787} 1788 1789/** 1790 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 1791 * @adapter: board private structure 1792 * 1793 **/ 1794static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 1795{ 1796 pci_disable_msix(adapter->pdev); 1797 kfree(adapter->msix_entries); 1798 adapter->msix_entries = NULL; 1799} 1800 1801/** 1802 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 1803 * @adapter: board private structure to initialize 1804 * 1805 **/ 1806static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 1807{ 1808 int err; 1809 1810 /* Number of supported queues */ 1811 ixgbevf_set_num_queues(adapter); 1812 1813 err = ixgbevf_set_interrupt_capability(adapter); 1814 if (err) { 1815 hw_dbg(&adapter->hw, 1816 "Unable to setup interrupt capabilities\n"); 1817 goto err_set_interrupt; 1818 } 1819 1820 err = ixgbevf_alloc_q_vectors(adapter); 1821 if (err) { 1822 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 1823 "vectors\n"); 1824 goto err_alloc_q_vectors; 1825 } 1826 1827 err = ixgbevf_alloc_queues(adapter); 1828 if (err) { 1829 pr_err("Unable to allocate memory for queues\n"); 1830 goto err_alloc_queues; 1831 } 1832 1833 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 1834 "Tx Queue count = %u\n", 1835 (adapter->num_rx_queues > 1) ? "Enabled" : 1836 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 1837 1838 set_bit(__IXGBEVF_DOWN, &adapter->state); 1839 1840 return 0; 1841err_alloc_queues: 1842 ixgbevf_free_q_vectors(adapter); 1843err_alloc_q_vectors: 1844 ixgbevf_reset_interrupt_capability(adapter); 1845err_set_interrupt: 1846 return err; 1847} 1848 1849/** 1850 * ixgbevf_sw_init - Initialize general software structures 1851 * (struct ixgbevf_adapter) 1852 * @adapter: board private structure to initialize 1853 * 1854 * ixgbevf_sw_init initializes the Adapter private data structure. 1855 * Fields are initialized based on PCI device information and 1856 * OS network device settings (MTU size). 1857 **/ 1858static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 1859{ 1860 struct ixgbe_hw *hw = &adapter->hw; 1861 struct pci_dev *pdev = adapter->pdev; 1862 int err; 1863 1864 /* PCI config space info */ 1865 1866 hw->vendor_id = pdev->vendor; 1867 hw->device_id = pdev->device; 1868 hw->revision_id = pdev->revision; 1869 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1870 hw->subsystem_device_id = pdev->subsystem_device; 1871 1872 hw->mbx.ops.init_params(hw); 1873 hw->mac.max_tx_queues = MAX_TX_QUEUES; 1874 hw->mac.max_rx_queues = MAX_RX_QUEUES; 1875 err = hw->mac.ops.reset_hw(hw); 1876 if (err) { 1877 dev_info(&pdev->dev, 1878 "PF still in reset state, assigning new address\n"); 1879 eth_hw_addr_random(adapter->netdev); 1880 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, 1881 adapter->netdev->addr_len); 1882 } else { 1883 err = hw->mac.ops.init_hw(hw); 1884 if (err) { 1885 pr_err("init_shared_code failed: %d\n", err); 1886 goto out; 1887 } 1888 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 1889 adapter->netdev->addr_len); 1890 } 1891 1892 /* Enable dynamic interrupt throttling rates */ 1893 adapter->rx_itr_setting = 1; 1894 adapter->tx_itr_setting = 1; 1895 1896 /* set default ring sizes */ 1897 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 1898 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 1899 1900 set_bit(__IXGBEVF_DOWN, &adapter->state); 1901 return 0; 1902 1903out: 1904 return err; 1905} 1906 1907#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 1908 { \ 1909 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 1910 if (current_counter < last_counter) \ 1911 counter += 0x100000000LL; \ 1912 last_counter = current_counter; \ 1913 counter &= 0xFFFFFFFF00000000LL; \ 1914 counter |= current_counter; \ 1915 } 1916 1917#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 1918 { \ 1919 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 1920 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 1921 u64 current_counter = (current_counter_msb << 32) | \ 1922 current_counter_lsb; \ 1923 if (current_counter < last_counter) \ 1924 counter += 0x1000000000LL; \ 1925 last_counter = current_counter; \ 1926 counter &= 0xFFFFFFF000000000LL; \ 1927 counter |= current_counter; \ 1928 } 1929/** 1930 * ixgbevf_update_stats - Update the board statistics counters. 1931 * @adapter: board private structure 1932 **/ 1933void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 1934{ 1935 struct ixgbe_hw *hw = &adapter->hw; 1936 1937 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 1938 adapter->stats.vfgprc); 1939 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 1940 adapter->stats.vfgptc); 1941 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 1942 adapter->stats.last_vfgorc, 1943 adapter->stats.vfgorc); 1944 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 1945 adapter->stats.last_vfgotc, 1946 adapter->stats.vfgotc); 1947 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 1948 adapter->stats.vfmprc); 1949} 1950 1951/** 1952 * ixgbevf_watchdog - Timer Call-back 1953 * @data: pointer to adapter cast into an unsigned long 1954 **/ 1955static void ixgbevf_watchdog(unsigned long data) 1956{ 1957 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 1958 struct ixgbe_hw *hw = &adapter->hw; 1959 u32 eics = 0; 1960 int i; 1961 1962 /* 1963 * Do the watchdog outside of interrupt context due to the lovely 1964 * delays that some of the newer hardware requires 1965 */ 1966 1967 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 1968 goto watchdog_short_circuit; 1969 1970 /* get one bit for every active tx/rx interrupt vector */ 1971 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 1972 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 1973 if (qv->rx.ring || qv->tx.ring) 1974 eics |= 1 << i; 1975 } 1976 1977 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 1978 1979watchdog_short_circuit: 1980 schedule_work(&adapter->watchdog_task); 1981} 1982 1983/** 1984 * ixgbevf_tx_timeout - Respond to a Tx Hang 1985 * @netdev: network interface device structure 1986 **/ 1987static void ixgbevf_tx_timeout(struct net_device *netdev) 1988{ 1989 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1990 1991 /* Do the reset outside of interrupt context */ 1992 schedule_work(&adapter->reset_task); 1993} 1994 1995static void ixgbevf_reset_task(struct work_struct *work) 1996{ 1997 struct ixgbevf_adapter *adapter; 1998 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 1999 2000 /* If we're already down or resetting, just bail */ 2001 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2002 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2003 return; 2004 2005 adapter->tx_timeout_count++; 2006 2007 ixgbevf_reinit_locked(adapter); 2008} 2009 2010/** 2011 * ixgbevf_watchdog_task - worker thread to bring link up 2012 * @work: pointer to work_struct containing our data 2013 **/ 2014static void ixgbevf_watchdog_task(struct work_struct *work) 2015{ 2016 struct ixgbevf_adapter *adapter = container_of(work, 2017 struct ixgbevf_adapter, 2018 watchdog_task); 2019 struct net_device *netdev = adapter->netdev; 2020 struct ixgbe_hw *hw = &adapter->hw; 2021 u32 link_speed = adapter->link_speed; 2022 bool link_up = adapter->link_up; 2023 2024 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2025 2026 /* 2027 * Always check the link on the watchdog because we have 2028 * no LSC interrupt 2029 */ 2030 if (hw->mac.ops.check_link) { 2031 if ((hw->mac.ops.check_link(hw, &link_speed, 2032 &link_up, false)) != 0) { 2033 adapter->link_up = link_up; 2034 adapter->link_speed = link_speed; 2035 netif_carrier_off(netdev); 2036 netif_tx_stop_all_queues(netdev); 2037 schedule_work(&adapter->reset_task); 2038 goto pf_has_reset; 2039 } 2040 } else { 2041 /* always assume link is up, if no check link 2042 * function */ 2043 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 2044 link_up = true; 2045 } 2046 adapter->link_up = link_up; 2047 adapter->link_speed = link_speed; 2048 2049 if (link_up) { 2050 if (!netif_carrier_ok(netdev)) { 2051 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", 2052 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2053 10 : 1); 2054 netif_carrier_on(netdev); 2055 netif_tx_wake_all_queues(netdev); 2056 } 2057 } else { 2058 adapter->link_up = false; 2059 adapter->link_speed = 0; 2060 if (netif_carrier_ok(netdev)) { 2061 hw_dbg(&adapter->hw, "NIC Link is Down\n"); 2062 netif_carrier_off(netdev); 2063 netif_tx_stop_all_queues(netdev); 2064 } 2065 } 2066 2067 ixgbevf_update_stats(adapter); 2068 2069pf_has_reset: 2070 /* Reset the timer */ 2071 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2072 mod_timer(&adapter->watchdog_timer, 2073 round_jiffies(jiffies + (2 * HZ))); 2074 2075 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2076} 2077 2078/** 2079 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2080 * @adapter: board private structure 2081 * @tx_ring: Tx descriptor ring for a specific queue 2082 * 2083 * Free all transmit software resources 2084 **/ 2085void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2086 struct ixgbevf_ring *tx_ring) 2087{ 2088 struct pci_dev *pdev = adapter->pdev; 2089 2090 ixgbevf_clean_tx_ring(adapter, tx_ring); 2091 2092 vfree(tx_ring->tx_buffer_info); 2093 tx_ring->tx_buffer_info = NULL; 2094 2095 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2096 tx_ring->dma); 2097 2098 tx_ring->desc = NULL; 2099} 2100 2101/** 2102 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2103 * @adapter: board private structure 2104 * 2105 * Free all transmit software resources 2106 **/ 2107static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2108{ 2109 int i; 2110 2111 for (i = 0; i < adapter->num_tx_queues; i++) 2112 if (adapter->tx_ring[i].desc) 2113 ixgbevf_free_tx_resources(adapter, 2114 &adapter->tx_ring[i]); 2115 2116} 2117 2118/** 2119 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2120 * @adapter: board private structure 2121 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2122 * 2123 * Return 0 on success, negative on failure 2124 **/ 2125int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2126 struct ixgbevf_ring *tx_ring) 2127{ 2128 struct pci_dev *pdev = adapter->pdev; 2129 int size; 2130 2131 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2132 tx_ring->tx_buffer_info = vzalloc(size); 2133 if (!tx_ring->tx_buffer_info) 2134 goto err; 2135 2136 /* round up to nearest 4K */ 2137 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2138 tx_ring->size = ALIGN(tx_ring->size, 4096); 2139 2140 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2141 &tx_ring->dma, GFP_KERNEL); 2142 if (!tx_ring->desc) 2143 goto err; 2144 2145 tx_ring->next_to_use = 0; 2146 tx_ring->next_to_clean = 0; 2147 return 0; 2148 2149err: 2150 vfree(tx_ring->tx_buffer_info); 2151 tx_ring->tx_buffer_info = NULL; 2152 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2153 "descriptor ring\n"); 2154 return -ENOMEM; 2155} 2156 2157/** 2158 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2159 * @adapter: board private structure 2160 * 2161 * If this function returns with an error, then it's possible one or 2162 * more of the rings is populated (while the rest are not). It is the 2163 * callers duty to clean those orphaned rings. 2164 * 2165 * Return 0 on success, negative on failure 2166 **/ 2167static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2168{ 2169 int i, err = 0; 2170 2171 for (i = 0; i < adapter->num_tx_queues; i++) { 2172 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2173 if (!err) 2174 continue; 2175 hw_dbg(&adapter->hw, 2176 "Allocation for Tx Queue %u failed\n", i); 2177 break; 2178 } 2179 2180 return err; 2181} 2182 2183/** 2184 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2185 * @adapter: board private structure 2186 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2187 * 2188 * Returns 0 on success, negative on failure 2189 **/ 2190int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2191 struct ixgbevf_ring *rx_ring) 2192{ 2193 struct pci_dev *pdev = adapter->pdev; 2194 int size; 2195 2196 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2197 rx_ring->rx_buffer_info = vzalloc(size); 2198 if (!rx_ring->rx_buffer_info) 2199 goto alloc_failed; 2200 2201 /* Round up to nearest 4K */ 2202 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2203 rx_ring->size = ALIGN(rx_ring->size, 4096); 2204 2205 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2206 &rx_ring->dma, GFP_KERNEL); 2207 2208 if (!rx_ring->desc) { 2209 hw_dbg(&adapter->hw, 2210 "Unable to allocate memory for " 2211 "the receive descriptor ring\n"); 2212 vfree(rx_ring->rx_buffer_info); 2213 rx_ring->rx_buffer_info = NULL; 2214 goto alloc_failed; 2215 } 2216 2217 rx_ring->next_to_clean = 0; 2218 rx_ring->next_to_use = 0; 2219 2220 return 0; 2221alloc_failed: 2222 return -ENOMEM; 2223} 2224 2225/** 2226 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2227 * @adapter: board private structure 2228 * 2229 * If this function returns with an error, then it's possible one or 2230 * more of the rings is populated (while the rest are not). It is the 2231 * callers duty to clean those orphaned rings. 2232 * 2233 * Return 0 on success, negative on failure 2234 **/ 2235static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2236{ 2237 int i, err = 0; 2238 2239 for (i = 0; i < adapter->num_rx_queues; i++) { 2240 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2241 if (!err) 2242 continue; 2243 hw_dbg(&adapter->hw, 2244 "Allocation for Rx Queue %u failed\n", i); 2245 break; 2246 } 2247 return err; 2248} 2249 2250/** 2251 * ixgbevf_free_rx_resources - Free Rx Resources 2252 * @adapter: board private structure 2253 * @rx_ring: ring to clean the resources from 2254 * 2255 * Free all receive software resources 2256 **/ 2257void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2258 struct ixgbevf_ring *rx_ring) 2259{ 2260 struct pci_dev *pdev = adapter->pdev; 2261 2262 ixgbevf_clean_rx_ring(adapter, rx_ring); 2263 2264 vfree(rx_ring->rx_buffer_info); 2265 rx_ring->rx_buffer_info = NULL; 2266 2267 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2268 rx_ring->dma); 2269 2270 rx_ring->desc = NULL; 2271} 2272 2273/** 2274 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2275 * @adapter: board private structure 2276 * 2277 * Free all receive software resources 2278 **/ 2279static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2280{ 2281 int i; 2282 2283 for (i = 0; i < adapter->num_rx_queues; i++) 2284 if (adapter->rx_ring[i].desc) 2285 ixgbevf_free_rx_resources(adapter, 2286 &adapter->rx_ring[i]); 2287} 2288 2289/** 2290 * ixgbevf_open - Called when a network interface is made active 2291 * @netdev: network interface device structure 2292 * 2293 * Returns 0 on success, negative value on failure 2294 * 2295 * The open entry point is called when a network interface is made 2296 * active by the system (IFF_UP). At this point all resources needed 2297 * for transmit and receive operations are allocated, the interrupt 2298 * handler is registered with the OS, the watchdog timer is started, 2299 * and the stack is notified that the interface is ready. 2300 **/ 2301static int ixgbevf_open(struct net_device *netdev) 2302{ 2303 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2304 struct ixgbe_hw *hw = &adapter->hw; 2305 int err; 2306 2307 /* disallow open during test */ 2308 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2309 return -EBUSY; 2310 2311 if (hw->adapter_stopped) { 2312 ixgbevf_reset(adapter); 2313 /* if adapter is still stopped then PF isn't up and 2314 * the vf can't start. */ 2315 if (hw->adapter_stopped) { 2316 err = IXGBE_ERR_MBX; 2317 pr_err("Unable to start - perhaps the PF Driver isn't " 2318 "up yet\n"); 2319 goto err_setup_reset; 2320 } 2321 } 2322 2323 /* allocate transmit descriptors */ 2324 err = ixgbevf_setup_all_tx_resources(adapter); 2325 if (err) 2326 goto err_setup_tx; 2327 2328 /* allocate receive descriptors */ 2329 err = ixgbevf_setup_all_rx_resources(adapter); 2330 if (err) 2331 goto err_setup_rx; 2332 2333 ixgbevf_configure(adapter); 2334 2335 /* 2336 * Map the Tx/Rx rings to the vectors we were allotted. 2337 * if request_irq will be called in this function map_rings 2338 * must be called *before* up_complete 2339 */ 2340 ixgbevf_map_rings_to_vectors(adapter); 2341 2342 ixgbevf_up_complete(adapter); 2343 2344 /* clear any pending interrupts, may auto mask */ 2345 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2346 err = ixgbevf_request_irq(adapter); 2347 if (err) 2348 goto err_req_irq; 2349 2350 ixgbevf_irq_enable(adapter); 2351 2352 return 0; 2353 2354err_req_irq: 2355 ixgbevf_down(adapter); 2356 ixgbevf_free_irq(adapter); 2357err_setup_rx: 2358 ixgbevf_free_all_rx_resources(adapter); 2359err_setup_tx: 2360 ixgbevf_free_all_tx_resources(adapter); 2361 ixgbevf_reset(adapter); 2362 2363err_setup_reset: 2364 2365 return err; 2366} 2367 2368/** 2369 * ixgbevf_close - Disables a network interface 2370 * @netdev: network interface device structure 2371 * 2372 * Returns 0, this is not allowed to fail 2373 * 2374 * The close entry point is called when an interface is de-activated 2375 * by the OS. The hardware is still under the drivers control, but 2376 * needs to be disabled. A global MAC reset is issued to stop the 2377 * hardware, and all transmit and receive resources are freed. 2378 **/ 2379static int ixgbevf_close(struct net_device *netdev) 2380{ 2381 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2382 2383 ixgbevf_down(adapter); 2384 ixgbevf_free_irq(adapter); 2385 2386 ixgbevf_free_all_tx_resources(adapter); 2387 ixgbevf_free_all_rx_resources(adapter); 2388 2389 return 0; 2390} 2391 2392static int ixgbevf_tso(struct ixgbevf_adapter *adapter, 2393 struct ixgbevf_ring *tx_ring, 2394 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2395{ 2396 struct ixgbe_adv_tx_context_desc *context_desc; 2397 unsigned int i; 2398 int err; 2399 struct ixgbevf_tx_buffer *tx_buffer_info; 2400 u32 vlan_macip_lens = 0, type_tucmd_mlhl; 2401 u32 mss_l4len_idx, l4len; 2402 2403 if (skb_is_gso(skb)) { 2404 if (skb_header_cloned(skb)) { 2405 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2406 if (err) 2407 return err; 2408 } 2409 l4len = tcp_hdrlen(skb); 2410 *hdr_len += l4len; 2411 2412 if (skb->protocol == htons(ETH_P_IP)) { 2413 struct iphdr *iph = ip_hdr(skb); 2414 iph->tot_len = 0; 2415 iph->check = 0; 2416 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2417 iph->daddr, 0, 2418 IPPROTO_TCP, 2419 0); 2420 adapter->hw_tso_ctxt++; 2421 } else if (skb_is_gso_v6(skb)) { 2422 ipv6_hdr(skb)->payload_len = 0; 2423 tcp_hdr(skb)->check = 2424 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2425 &ipv6_hdr(skb)->daddr, 2426 0, IPPROTO_TCP, 0); 2427 adapter->hw_tso6_ctxt++; 2428 } 2429 2430 i = tx_ring->next_to_use; 2431 2432 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2433 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2434 2435 /* VLAN MACLEN IPLEN */ 2436 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2437 vlan_macip_lens |= 2438 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 2439 vlan_macip_lens |= ((skb_network_offset(skb)) << 2440 IXGBE_ADVTXD_MACLEN_SHIFT); 2441 *hdr_len += skb_network_offset(skb); 2442 vlan_macip_lens |= 2443 (skb_transport_header(skb) - skb_network_header(skb)); 2444 *hdr_len += 2445 (skb_transport_header(skb) - skb_network_header(skb)); 2446 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2447 context_desc->seqnum_seed = 0; 2448 2449 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2450 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 2451 IXGBE_ADVTXD_DTYP_CTXT); 2452 2453 if (skb->protocol == htons(ETH_P_IP)) 2454 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2455 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2456 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 2457 2458 /* MSS L4LEN IDX */ 2459 mss_l4len_idx = 2460 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 2461 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 2462 /* use index 1 for TSO */ 2463 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2464 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2465 2466 tx_buffer_info->time_stamp = jiffies; 2467 tx_buffer_info->next_to_watch = i; 2468 2469 i++; 2470 if (i == tx_ring->count) 2471 i = 0; 2472 tx_ring->next_to_use = i; 2473 2474 return true; 2475 } 2476 2477 return false; 2478} 2479 2480static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, 2481 struct ixgbevf_ring *tx_ring, 2482 struct sk_buff *skb, u32 tx_flags) 2483{ 2484 struct ixgbe_adv_tx_context_desc *context_desc; 2485 unsigned int i; 2486 struct ixgbevf_tx_buffer *tx_buffer_info; 2487 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 2488 2489 if (skb->ip_summed == CHECKSUM_PARTIAL || 2490 (tx_flags & IXGBE_TX_FLAGS_VLAN)) { 2491 i = tx_ring->next_to_use; 2492 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2493 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2494 2495 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2496 vlan_macip_lens |= (tx_flags & 2497 IXGBE_TX_FLAGS_VLAN_MASK); 2498 vlan_macip_lens |= (skb_network_offset(skb) << 2499 IXGBE_ADVTXD_MACLEN_SHIFT); 2500 if (skb->ip_summed == CHECKSUM_PARTIAL) 2501 vlan_macip_lens |= (skb_transport_header(skb) - 2502 skb_network_header(skb)); 2503 2504 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2505 context_desc->seqnum_seed = 0; 2506 2507 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 2508 IXGBE_ADVTXD_DTYP_CTXT); 2509 2510 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2511 switch (skb->protocol) { 2512 case __constant_htons(ETH_P_IP): 2513 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2514 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2515 type_tucmd_mlhl |= 2516 IXGBE_ADVTXD_TUCMD_L4T_TCP; 2517 break; 2518 case __constant_htons(ETH_P_IPV6): 2519 /* XXX what about other V6 headers?? */ 2520 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2521 type_tucmd_mlhl |= 2522 IXGBE_ADVTXD_TUCMD_L4T_TCP; 2523 break; 2524 default: 2525 if (unlikely(net_ratelimit())) { 2526 pr_warn("partial checksum but " 2527 "proto=%x!\n", skb->protocol); 2528 } 2529 break; 2530 } 2531 } 2532 2533 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 2534 /* use index zero for tx checksum offload */ 2535 context_desc->mss_l4len_idx = 0; 2536 2537 tx_buffer_info->time_stamp = jiffies; 2538 tx_buffer_info->next_to_watch = i; 2539 2540 adapter->hw_csum_tx_good++; 2541 i++; 2542 if (i == tx_ring->count) 2543 i = 0; 2544 tx_ring->next_to_use = i; 2545 2546 return true; 2547 } 2548 2549 return false; 2550} 2551 2552static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, 2553 struct ixgbevf_ring *tx_ring, 2554 struct sk_buff *skb, u32 tx_flags, 2555 unsigned int first) 2556{ 2557 struct pci_dev *pdev = adapter->pdev; 2558 struct ixgbevf_tx_buffer *tx_buffer_info; 2559 unsigned int len; 2560 unsigned int total = skb->len; 2561 unsigned int offset = 0, size; 2562 int count = 0; 2563 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2564 unsigned int f; 2565 int i; 2566 2567 i = tx_ring->next_to_use; 2568 2569 len = min(skb_headlen(skb), total); 2570 while (len) { 2571 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2572 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2573 2574 tx_buffer_info->length = size; 2575 tx_buffer_info->mapped_as_page = false; 2576 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, 2577 skb->data + offset, 2578 size, DMA_TO_DEVICE); 2579 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2580 goto dma_error; 2581 tx_buffer_info->time_stamp = jiffies; 2582 tx_buffer_info->next_to_watch = i; 2583 2584 len -= size; 2585 total -= size; 2586 offset += size; 2587 count++; 2588 i++; 2589 if (i == tx_ring->count) 2590 i = 0; 2591 } 2592 2593 for (f = 0; f < nr_frags; f++) { 2594 const struct skb_frag_struct *frag; 2595 2596 frag = &skb_shinfo(skb)->frags[f]; 2597 len = min((unsigned int)skb_frag_size(frag), total); 2598 offset = 0; 2599 2600 while (len) { 2601 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2602 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2603 2604 tx_buffer_info->length = size; 2605 tx_buffer_info->dma = 2606 skb_frag_dma_map(&adapter->pdev->dev, frag, 2607 offset, size, DMA_TO_DEVICE); 2608 tx_buffer_info->mapped_as_page = true; 2609 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2610 goto dma_error; 2611 tx_buffer_info->time_stamp = jiffies; 2612 tx_buffer_info->next_to_watch = i; 2613 2614 len -= size; 2615 total -= size; 2616 offset += size; 2617 count++; 2618 i++; 2619 if (i == tx_ring->count) 2620 i = 0; 2621 } 2622 if (total == 0) 2623 break; 2624 } 2625 2626 if (i == 0) 2627 i = tx_ring->count - 1; 2628 else 2629 i = i - 1; 2630 tx_ring->tx_buffer_info[i].skb = skb; 2631 tx_ring->tx_buffer_info[first].next_to_watch = i; 2632 2633 return count; 2634 2635dma_error: 2636 dev_err(&pdev->dev, "TX DMA map failed\n"); 2637 2638 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2639 tx_buffer_info->dma = 0; 2640 tx_buffer_info->time_stamp = 0; 2641 tx_buffer_info->next_to_watch = 0; 2642 count--; 2643 2644 /* clear timestamp and dma mappings for remaining portion of packet */ 2645 while (count >= 0) { 2646 count--; 2647 i--; 2648 if (i < 0) 2649 i += tx_ring->count; 2650 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2651 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 2652 } 2653 2654 return count; 2655} 2656 2657static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, 2658 struct ixgbevf_ring *tx_ring, int tx_flags, 2659 int count, u32 paylen, u8 hdr_len) 2660{ 2661 union ixgbe_adv_tx_desc *tx_desc = NULL; 2662 struct ixgbevf_tx_buffer *tx_buffer_info; 2663 u32 olinfo_status = 0, cmd_type_len = 0; 2664 unsigned int i; 2665 2666 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 2667 2668 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 2669 2670 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 2671 2672 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2673 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2674 2675 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2676 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2677 2678 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 2679 IXGBE_ADVTXD_POPTS_SHIFT; 2680 2681 /* use index 1 context for tso */ 2682 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2683 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2684 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 2685 IXGBE_ADVTXD_POPTS_SHIFT; 2686 2687 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2688 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 2689 IXGBE_ADVTXD_POPTS_SHIFT; 2690 2691 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2692 2693 i = tx_ring->next_to_use; 2694 while (count--) { 2695 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2696 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2697 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 2698 tx_desc->read.cmd_type_len = 2699 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 2700 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2701 i++; 2702 if (i == tx_ring->count) 2703 i = 0; 2704 } 2705 2706 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2707 2708 /* 2709 * Force memory writes to complete before letting h/w 2710 * know there are new descriptors to fetch. (Only 2711 * applicable for weak-ordered memory model archs, 2712 * such as IA-64). 2713 */ 2714 wmb(); 2715 2716 tx_ring->next_to_use = i; 2717 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2718} 2719 2720static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2721{ 2722 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 2723 2724 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 2725 /* Herbert's original patch had: 2726 * smp_mb__after_netif_stop_queue(); 2727 * but since that doesn't exist yet, just open code it. */ 2728 smp_mb(); 2729 2730 /* We need to check again in a case another CPU has just 2731 * made room available. */ 2732 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 2733 return -EBUSY; 2734 2735 /* A reprieve! - use start_queue because it doesn't call schedule */ 2736 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 2737 ++adapter->restart_queue; 2738 return 0; 2739} 2740 2741static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2742{ 2743 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 2744 return 0; 2745 return __ixgbevf_maybe_stop_tx(tx_ring, size); 2746} 2747 2748static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2749{ 2750 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2751 struct ixgbevf_ring *tx_ring; 2752 unsigned int first; 2753 unsigned int tx_flags = 0; 2754 u8 hdr_len = 0; 2755 int r_idx = 0, tso; 2756 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 2757#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2758 unsigned short f; 2759#endif 2760 2761 tx_ring = &adapter->tx_ring[r_idx]; 2762 2763 /* 2764 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 2765 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 2766 * + 2 desc gap to keep tail from touching head, 2767 * + 1 desc for context descriptor, 2768 * otherwise try next time 2769 */ 2770#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2771 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 2772 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2773#else 2774 count += skb_shinfo(skb)->nr_frags; 2775#endif 2776 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 2777 adapter->tx_busy++; 2778 return NETDEV_TX_BUSY; 2779 } 2780 2781 if (vlan_tx_tag_present(skb)) { 2782 tx_flags |= vlan_tx_tag_get(skb); 2783 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 2784 tx_flags |= IXGBE_TX_FLAGS_VLAN; 2785 } 2786 2787 first = tx_ring->next_to_use; 2788 2789 if (skb->protocol == htons(ETH_P_IP)) 2790 tx_flags |= IXGBE_TX_FLAGS_IPV4; 2791 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 2792 if (tso < 0) { 2793 dev_kfree_skb_any(skb); 2794 return NETDEV_TX_OK; 2795 } 2796 2797 if (tso) 2798 tx_flags |= IXGBE_TX_FLAGS_TSO; 2799 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2800 (skb->ip_summed == CHECKSUM_PARTIAL)) 2801 tx_flags |= IXGBE_TX_FLAGS_CSUM; 2802 2803 ixgbevf_tx_queue(adapter, tx_ring, tx_flags, 2804 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), 2805 skb->len, hdr_len); 2806 2807 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 2808 2809 return NETDEV_TX_OK; 2810} 2811 2812/** 2813 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 2814 * @netdev: network interface device structure 2815 * @p: pointer to an address structure 2816 * 2817 * Returns 0 on success, negative on failure 2818 **/ 2819static int ixgbevf_set_mac(struct net_device *netdev, void *p) 2820{ 2821 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2822 struct ixgbe_hw *hw = &adapter->hw; 2823 struct sockaddr *addr = p; 2824 2825 if (!is_valid_ether_addr(addr->sa_data)) 2826 return -EADDRNOTAVAIL; 2827 2828 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2829 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 2830 2831 if (hw->mac.ops.set_rar) 2832 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 2833 2834 return 0; 2835} 2836 2837/** 2838 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 2839 * @netdev: network interface device structure 2840 * @new_mtu: new value for maximum frame size 2841 * 2842 * Returns 0 on success, negative on failure 2843 **/ 2844static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 2845{ 2846 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2847 struct ixgbe_hw *hw = &adapter->hw; 2848 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2849 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 2850 u32 msg[2]; 2851 2852 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 2853 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 2854 2855 /* MTU < 68 is an error and causes problems on some kernels */ 2856 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 2857 return -EINVAL; 2858 2859 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 2860 netdev->mtu, new_mtu); 2861 /* must set new MTU before calling down or up */ 2862 netdev->mtu = new_mtu; 2863 2864 if (!netif_running(netdev)) { 2865 msg[0] = IXGBE_VF_SET_LPE; 2866 msg[1] = max_frame; 2867 hw->mbx.ops.write_posted(hw, msg, 2); 2868 } 2869 2870 if (netif_running(netdev)) 2871 ixgbevf_reinit_locked(adapter); 2872 2873 return 0; 2874} 2875 2876static void ixgbevf_shutdown(struct pci_dev *pdev) 2877{ 2878 struct net_device *netdev = pci_get_drvdata(pdev); 2879 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2880 2881 netif_device_detach(netdev); 2882 2883 if (netif_running(netdev)) { 2884 ixgbevf_down(adapter); 2885 ixgbevf_free_irq(adapter); 2886 ixgbevf_free_all_tx_resources(adapter); 2887 ixgbevf_free_all_rx_resources(adapter); 2888 } 2889 2890 pci_save_state(pdev); 2891 2892 pci_disable_device(pdev); 2893} 2894 2895static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 2896 struct rtnl_link_stats64 *stats) 2897{ 2898 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2899 unsigned int start; 2900 u64 bytes, packets; 2901 const struct ixgbevf_ring *ring; 2902 int i; 2903 2904 ixgbevf_update_stats(adapter); 2905 2906 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 2907 2908 for (i = 0; i < adapter->num_rx_queues; i++) { 2909 ring = &adapter->rx_ring[i]; 2910 do { 2911 start = u64_stats_fetch_begin_bh(&ring->syncp); 2912 bytes = ring->total_bytes; 2913 packets = ring->total_packets; 2914 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 2915 stats->rx_bytes += bytes; 2916 stats->rx_packets += packets; 2917 } 2918 2919 for (i = 0; i < adapter->num_tx_queues; i++) { 2920 ring = &adapter->tx_ring[i]; 2921 do { 2922 start = u64_stats_fetch_begin_bh(&ring->syncp); 2923 bytes = ring->total_bytes; 2924 packets = ring->total_packets; 2925 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 2926 stats->tx_bytes += bytes; 2927 stats->tx_packets += packets; 2928 } 2929 2930 return stats; 2931} 2932 2933static const struct net_device_ops ixgbe_netdev_ops = { 2934 .ndo_open = ixgbevf_open, 2935 .ndo_stop = ixgbevf_close, 2936 .ndo_start_xmit = ixgbevf_xmit_frame, 2937 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 2938 .ndo_get_stats64 = ixgbevf_get_stats, 2939 .ndo_validate_addr = eth_validate_addr, 2940 .ndo_set_mac_address = ixgbevf_set_mac, 2941 .ndo_change_mtu = ixgbevf_change_mtu, 2942 .ndo_tx_timeout = ixgbevf_tx_timeout, 2943 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 2944 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 2945}; 2946 2947static void ixgbevf_assign_netdev_ops(struct net_device *dev) 2948{ 2949 dev->netdev_ops = &ixgbe_netdev_ops; 2950 ixgbevf_set_ethtool_ops(dev); 2951 dev->watchdog_timeo = 5 * HZ; 2952} 2953 2954/** 2955 * ixgbevf_probe - Device Initialization Routine 2956 * @pdev: PCI device information struct 2957 * @ent: entry in ixgbevf_pci_tbl 2958 * 2959 * Returns 0 on success, negative on failure 2960 * 2961 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 2962 * The OS initialization, configuring of the adapter private structure, 2963 * and a hardware reset occur. 2964 **/ 2965static int __devinit ixgbevf_probe(struct pci_dev *pdev, 2966 const struct pci_device_id *ent) 2967{ 2968 struct net_device *netdev; 2969 struct ixgbevf_adapter *adapter = NULL; 2970 struct ixgbe_hw *hw = NULL; 2971 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 2972 static int cards_found; 2973 int err, pci_using_dac; 2974 2975 err = pci_enable_device(pdev); 2976 if (err) 2977 return err; 2978 2979 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 2980 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 2981 pci_using_dac = 1; 2982 } else { 2983 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2984 if (err) { 2985 err = dma_set_coherent_mask(&pdev->dev, 2986 DMA_BIT_MASK(32)); 2987 if (err) { 2988 dev_err(&pdev->dev, "No usable DMA " 2989 "configuration, aborting\n"); 2990 goto err_dma; 2991 } 2992 } 2993 pci_using_dac = 0; 2994 } 2995 2996 err = pci_request_regions(pdev, ixgbevf_driver_name); 2997 if (err) { 2998 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 2999 goto err_pci_reg; 3000 } 3001 3002 pci_set_master(pdev); 3003 3004 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3005 MAX_TX_QUEUES); 3006 if (!netdev) { 3007 err = -ENOMEM; 3008 goto err_alloc_etherdev; 3009 } 3010 3011 SET_NETDEV_DEV(netdev, &pdev->dev); 3012 3013 pci_set_drvdata(pdev, netdev); 3014 adapter = netdev_priv(netdev); 3015 3016 adapter->netdev = netdev; 3017 adapter->pdev = pdev; 3018 hw = &adapter->hw; 3019 hw->back = adapter; 3020 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3021 3022 /* 3023 * call save state here in standalone driver because it relies on 3024 * adapter struct to exist, and needs to call netdev_priv 3025 */ 3026 pci_save_state(pdev); 3027 3028 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3029 pci_resource_len(pdev, 0)); 3030 if (!hw->hw_addr) { 3031 err = -EIO; 3032 goto err_ioremap; 3033 } 3034 3035 ixgbevf_assign_netdev_ops(netdev); 3036 3037 adapter->bd_number = cards_found; 3038 3039 /* Setup hw api */ 3040 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3041 hw->mac.type = ii->mac; 3042 3043 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3044 sizeof(struct ixgbe_mbx_operations)); 3045 3046 /* setup the private structure */ 3047 err = ixgbevf_sw_init(adapter); 3048 if (err) 3049 goto err_sw_init; 3050 3051 /* The HW MAC address was set and/or determined in sw_init */ 3052 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 3053 3054 if (!is_valid_ether_addr(netdev->dev_addr)) { 3055 pr_err("invalid MAC address\n"); 3056 err = -EIO; 3057 goto err_sw_init; 3058 } 3059 3060 netdev->hw_features = NETIF_F_SG | 3061 NETIF_F_IP_CSUM | 3062 NETIF_F_IPV6_CSUM | 3063 NETIF_F_TSO | 3064 NETIF_F_TSO6 | 3065 NETIF_F_RXCSUM; 3066 3067 netdev->features = netdev->hw_features | 3068 NETIF_F_HW_VLAN_TX | 3069 NETIF_F_HW_VLAN_RX | 3070 NETIF_F_HW_VLAN_FILTER; 3071 3072 netdev->vlan_features |= NETIF_F_TSO; 3073 netdev->vlan_features |= NETIF_F_TSO6; 3074 netdev->vlan_features |= NETIF_F_IP_CSUM; 3075 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3076 netdev->vlan_features |= NETIF_F_SG; 3077 3078 if (pci_using_dac) 3079 netdev->features |= NETIF_F_HIGHDMA; 3080 3081 netdev->priv_flags |= IFF_UNICAST_FLT; 3082 3083 init_timer(&adapter->watchdog_timer); 3084 adapter->watchdog_timer.function = ixgbevf_watchdog; 3085 adapter->watchdog_timer.data = (unsigned long)adapter; 3086 3087 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3088 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3089 3090 err = ixgbevf_init_interrupt_scheme(adapter); 3091 if (err) 3092 goto err_sw_init; 3093 3094 /* pick up the PCI bus settings for reporting later */ 3095 if (hw->mac.ops.get_bus_info) 3096 hw->mac.ops.get_bus_info(hw); 3097 3098 strcpy(netdev->name, "eth%d"); 3099 3100 err = register_netdev(netdev); 3101 if (err) 3102 goto err_register; 3103 3104 netif_carrier_off(netdev); 3105 3106 ixgbevf_init_last_counter_stats(adapter); 3107 3108 /* print the MAC address */ 3109 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3110 3111 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3112 3113 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3114 cards_found++; 3115 return 0; 3116 3117err_register: 3118err_sw_init: 3119 ixgbevf_reset_interrupt_capability(adapter); 3120 iounmap(hw->hw_addr); 3121err_ioremap: 3122 free_netdev(netdev); 3123err_alloc_etherdev: 3124 pci_release_regions(pdev); 3125err_pci_reg: 3126err_dma: 3127 pci_disable_device(pdev); 3128 return err; 3129} 3130 3131/** 3132 * ixgbevf_remove - Device Removal Routine 3133 * @pdev: PCI device information struct 3134 * 3135 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3136 * that it should release a PCI device. The could be caused by a 3137 * Hot-Plug event, or because the driver is going to be removed from 3138 * memory. 3139 **/ 3140static void __devexit ixgbevf_remove(struct pci_dev *pdev) 3141{ 3142 struct net_device *netdev = pci_get_drvdata(pdev); 3143 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3144 3145 set_bit(__IXGBEVF_DOWN, &adapter->state); 3146 3147 del_timer_sync(&adapter->watchdog_timer); 3148 3149 cancel_work_sync(&adapter->reset_task); 3150 cancel_work_sync(&adapter->watchdog_task); 3151 3152 if (netdev->reg_state == NETREG_REGISTERED) 3153 unregister_netdev(netdev); 3154 3155 ixgbevf_reset_interrupt_capability(adapter); 3156 3157 iounmap(adapter->hw.hw_addr); 3158 pci_release_regions(pdev); 3159 3160 hw_dbg(&adapter->hw, "Remove complete\n"); 3161 3162 kfree(adapter->tx_ring); 3163 kfree(adapter->rx_ring); 3164 3165 free_netdev(netdev); 3166 3167 pci_disable_device(pdev); 3168} 3169 3170static struct pci_driver ixgbevf_driver = { 3171 .name = ixgbevf_driver_name, 3172 .id_table = ixgbevf_pci_tbl, 3173 .probe = ixgbevf_probe, 3174 .remove = __devexit_p(ixgbevf_remove), 3175 .shutdown = ixgbevf_shutdown, 3176}; 3177 3178/** 3179 * ixgbevf_init_module - Driver Registration Routine 3180 * 3181 * ixgbevf_init_module is the first routine called when the driver is 3182 * loaded. All it does is register with the PCI subsystem. 3183 **/ 3184static int __init ixgbevf_init_module(void) 3185{ 3186 int ret; 3187 pr_info("%s - version %s\n", ixgbevf_driver_string, 3188 ixgbevf_driver_version); 3189 3190 pr_info("%s\n", ixgbevf_copyright); 3191 3192 ret = pci_register_driver(&ixgbevf_driver); 3193 return ret; 3194} 3195 3196module_init(ixgbevf_init_module); 3197 3198/** 3199 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3200 * 3201 * ixgbevf_exit_module is called just before the driver is removed 3202 * from memory. 3203 **/ 3204static void __exit ixgbevf_exit_module(void) 3205{ 3206 pci_unregister_driver(&ixgbevf_driver); 3207} 3208 3209#ifdef DEBUG 3210/** 3211 * ixgbevf_get_hw_dev_name - return device name string 3212 * used by hardware layer to print debugging information 3213 **/ 3214char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3215{ 3216 struct ixgbevf_adapter *adapter = hw->back; 3217 return adapter->netdev->name; 3218} 3219 3220#endif 3221module_exit(ixgbevf_exit_module); 3222 3223/* ixgbevf_main.c */ 3224