ixgbevf_main.c revision f9d08f165b8a5a4af6f827318e692b57bc683825
1/******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28 29/****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31******************************************************************************/ 32 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35#include <linux/types.h> 36#include <linux/bitops.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/vmalloc.h> 41#include <linux/string.h> 42#include <linux/in.h> 43#include <linux/ip.h> 44#include <linux/tcp.h> 45#include <linux/sctp.h> 46#include <linux/ipv6.h> 47#include <linux/slab.h> 48#include <net/checksum.h> 49#include <net/ip6_checksum.h> 50#include <linux/ethtool.h> 51#include <linux/if.h> 52#include <linux/if_vlan.h> 53#include <linux/prefetch.h> 54 55#include "ixgbevf.h" 56 57const char ixgbevf_driver_name[] = "ixgbevf"; 58static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61#define DRV_VERSION "2.7.12-k" 62const char ixgbevf_driver_version[] = DRV_VERSION; 63static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69}; 70 71/* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79static struct pci_device_id ixgbevf_pci_tbl[] = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 81 board_82599_vf}, 82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 83 board_X540_vf}, 84 85 /* required last entry */ 86 {0, } 87}; 88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 89 90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 92MODULE_LICENSE("GPL"); 93MODULE_VERSION(DRV_VERSION); 94 95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 96static int debug = -1; 97module_param(debug, int, 0); 98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99 100/* forward decls */ 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 103 104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 105 struct ixgbevf_ring *rx_ring, 106 u32 val) 107{ 108 /* 109 * Force memory writes to complete before letting h/w 110 * know there are new descriptors to fetch. (Only 111 * applicable for weak-ordered memory model archs, 112 * such as IA-64). 113 */ 114 wmb(); 115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 116} 117 118/** 119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 120 * @adapter: pointer to adapter struct 121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 122 * @queue: queue to map the corresponding interrupt to 123 * @msix_vector: the vector to map to the corresponding queue 124 * 125 */ 126static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 127 u8 queue, u8 msix_vector) 128{ 129 u32 ivar, index; 130 struct ixgbe_hw *hw = &adapter->hw; 131 if (direction == -1) { 132 /* other causes */ 133 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 134 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 135 ivar &= ~0xFF; 136 ivar |= msix_vector; 137 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 138 } else { 139 /* tx or rx causes */ 140 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 141 index = ((16 * (queue & 1)) + (8 * direction)); 142 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 143 ivar &= ~(0xFF << index); 144 ivar |= (msix_vector << index); 145 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 146 } 147} 148 149static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 150 struct ixgbevf_tx_buffer 151 *tx_buffer_info) 152{ 153 if (tx_buffer_info->dma) { 154 if (tx_buffer_info->mapped_as_page) 155 dma_unmap_page(tx_ring->dev, 156 tx_buffer_info->dma, 157 tx_buffer_info->length, 158 DMA_TO_DEVICE); 159 else 160 dma_unmap_single(tx_ring->dev, 161 tx_buffer_info->dma, 162 tx_buffer_info->length, 163 DMA_TO_DEVICE); 164 tx_buffer_info->dma = 0; 165 } 166 if (tx_buffer_info->skb) { 167 dev_kfree_skb_any(tx_buffer_info->skb); 168 tx_buffer_info->skb = NULL; 169 } 170 tx_buffer_info->time_stamp = 0; 171 /* tx_buffer_info must be completely set up in the transmit path */ 172} 173 174#define IXGBE_MAX_TXD_PWR 14 175#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 176 177/* Tx Descriptors needed, worst case */ 178#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 179#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 180 181static void ixgbevf_tx_timeout(struct net_device *netdev); 182 183/** 184 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 185 * @q_vector: board private structure 186 * @tx_ring: tx ring to clean 187 **/ 188static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 189 struct ixgbevf_ring *tx_ring) 190{ 191 struct ixgbevf_adapter *adapter = q_vector->adapter; 192 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 193 struct ixgbevf_tx_buffer *tx_buffer_info; 194 unsigned int i, eop, count = 0; 195 unsigned int total_bytes = 0, total_packets = 0; 196 197 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 198 return true; 199 200 i = tx_ring->next_to_clean; 201 eop = tx_ring->tx_buffer_info[i].next_to_watch; 202 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 203 204 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 205 (count < tx_ring->count)) { 206 bool cleaned = false; 207 rmb(); /* read buffer_info after eop_desc */ 208 /* eop could change between read and DD-check */ 209 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 210 goto cont_loop; 211 for ( ; !cleaned; count++) { 212 struct sk_buff *skb; 213 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 214 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 215 cleaned = (i == eop); 216 skb = tx_buffer_info->skb; 217 218 if (cleaned && skb) { 219 unsigned int segs, bytecount; 220 221 /* gso_segs is currently only valid for tcp */ 222 segs = skb_shinfo(skb)->gso_segs ?: 1; 223 /* multiply data chunks by size of headers */ 224 bytecount = ((segs - 1) * skb_headlen(skb)) + 225 skb->len; 226 total_packets += segs; 227 total_bytes += bytecount; 228 } 229 230 ixgbevf_unmap_and_free_tx_resource(tx_ring, 231 tx_buffer_info); 232 233 tx_desc->wb.status = 0; 234 235 i++; 236 if (i == tx_ring->count) 237 i = 0; 238 } 239 240cont_loop: 241 eop = tx_ring->tx_buffer_info[i].next_to_watch; 242 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 243 } 244 245 tx_ring->next_to_clean = i; 246 247#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 248 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 249 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 250 /* Make sure that anybody stopping the queue after this 251 * sees the new next_to_clean. 252 */ 253 smp_mb(); 254 if (__netif_subqueue_stopped(tx_ring->netdev, 255 tx_ring->queue_index) && 256 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 257 netif_wake_subqueue(tx_ring->netdev, 258 tx_ring->queue_index); 259 ++adapter->restart_queue; 260 } 261 } 262 263 u64_stats_update_begin(&tx_ring->syncp); 264 tx_ring->total_bytes += total_bytes; 265 tx_ring->total_packets += total_packets; 266 u64_stats_update_end(&tx_ring->syncp); 267 q_vector->tx.total_bytes += total_bytes; 268 q_vector->tx.total_packets += total_packets; 269 270 return count < tx_ring->count; 271} 272 273/** 274 * ixgbevf_receive_skb - Send a completed packet up the stack 275 * @q_vector: structure containing interrupt and ring information 276 * @skb: packet to send up 277 * @status: hardware indication of status of receive 278 * @rx_desc: rx descriptor 279 **/ 280static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 281 struct sk_buff *skb, u8 status, 282 union ixgbe_adv_rx_desc *rx_desc) 283{ 284 struct ixgbevf_adapter *adapter = q_vector->adapter; 285 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 286 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 287 288 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 289 __vlan_hwaccel_put_tag(skb, tag); 290 291 napi_gro_receive(&q_vector->napi, skb); 292} 293 294/** 295 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 296 * @adapter: address of board private structure 297 * @status_err: hardware indication of status of receive 298 * @skb: skb currently being received and modified 299 **/ 300static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 301 struct ixgbevf_ring *ring, 302 u32 status_err, struct sk_buff *skb) 303{ 304 skb_checksum_none_assert(skb); 305 306 /* Rx csum disabled */ 307 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 308 return; 309 310 /* if IP and error */ 311 if ((status_err & IXGBE_RXD_STAT_IPCS) && 312 (status_err & IXGBE_RXDADV_ERR_IPE)) { 313 adapter->hw_csum_rx_error++; 314 return; 315 } 316 317 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 318 return; 319 320 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 321 adapter->hw_csum_rx_error++; 322 return; 323 } 324 325 /* It must be a TCP or UDP packet with a valid checksum */ 326 skb->ip_summed = CHECKSUM_UNNECESSARY; 327 adapter->hw_csum_rx_good++; 328} 329 330/** 331 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 332 * @adapter: address of board private structure 333 **/ 334static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 335 struct ixgbevf_ring *rx_ring, 336 int cleaned_count) 337{ 338 struct pci_dev *pdev = adapter->pdev; 339 union ixgbe_adv_rx_desc *rx_desc; 340 struct ixgbevf_rx_buffer *bi; 341 struct sk_buff *skb; 342 unsigned int i = rx_ring->next_to_use; 343 344 bi = &rx_ring->rx_buffer_info[i]; 345 346 while (cleaned_count--) { 347 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 348 skb = bi->skb; 349 if (!skb) { 350 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 351 rx_ring->rx_buf_len); 352 if (!skb) { 353 adapter->alloc_rx_buff_failed++; 354 goto no_buffers; 355 } 356 bi->skb = skb; 357 } 358 if (!bi->dma) { 359 bi->dma = dma_map_single(&pdev->dev, skb->data, 360 rx_ring->rx_buf_len, 361 DMA_FROM_DEVICE); 362 if (dma_mapping_error(&pdev->dev, bi->dma)) { 363 dev_kfree_skb(skb); 364 bi->skb = NULL; 365 dev_err(&pdev->dev, "RX DMA map failed\n"); 366 break; 367 } 368 } 369 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 370 371 i++; 372 if (i == rx_ring->count) 373 i = 0; 374 bi = &rx_ring->rx_buffer_info[i]; 375 } 376 377no_buffers: 378 if (rx_ring->next_to_use != i) { 379 rx_ring->next_to_use = i; 380 381 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 382 } 383} 384 385static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 386 u32 qmask) 387{ 388 struct ixgbe_hw *hw = &adapter->hw; 389 390 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 391} 392 393static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 394 struct ixgbevf_ring *rx_ring, 395 int budget) 396{ 397 struct ixgbevf_adapter *adapter = q_vector->adapter; 398 struct pci_dev *pdev = adapter->pdev; 399 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 400 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 401 struct sk_buff *skb; 402 unsigned int i; 403 u32 len, staterr; 404 int cleaned_count = 0; 405 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 406 407 i = rx_ring->next_to_clean; 408 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 409 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 410 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 411 412 while (staterr & IXGBE_RXD_STAT_DD) { 413 if (!budget) 414 break; 415 budget--; 416 417 rmb(); /* read descriptor and rx_buffer_info after status DD */ 418 len = le16_to_cpu(rx_desc->wb.upper.length); 419 skb = rx_buffer_info->skb; 420 prefetch(skb->data - NET_IP_ALIGN); 421 rx_buffer_info->skb = NULL; 422 423 if (rx_buffer_info->dma) { 424 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 425 rx_ring->rx_buf_len, 426 DMA_FROM_DEVICE); 427 rx_buffer_info->dma = 0; 428 skb_put(skb, len); 429 } 430 431 i++; 432 if (i == rx_ring->count) 433 i = 0; 434 435 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 436 prefetch(next_rxd); 437 cleaned_count++; 438 439 next_buffer = &rx_ring->rx_buffer_info[i]; 440 441 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 442 skb->next = next_buffer->skb; 443 IXGBE_CB(skb->next)->prev = skb; 444 adapter->non_eop_descs++; 445 goto next_desc; 446 } 447 448 /* we should not be chaining buffers, if we did drop the skb */ 449 if (IXGBE_CB(skb)->prev) { 450 do { 451 struct sk_buff *this = skb; 452 skb = IXGBE_CB(skb)->prev; 453 dev_kfree_skb(this); 454 } while (skb); 455 goto next_desc; 456 } 457 458 /* ERR_MASK will only have valid bits if EOP set */ 459 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 460 dev_kfree_skb_irq(skb); 461 goto next_desc; 462 } 463 464 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); 465 466 /* probably a little skewed due to removing CRC */ 467 total_rx_bytes += skb->len; 468 total_rx_packets++; 469 470 /* 471 * Work around issue of some types of VM to VM loop back 472 * packets not getting split correctly 473 */ 474 if (staterr & IXGBE_RXD_STAT_LB) { 475 u32 header_fixup_len = skb_headlen(skb); 476 if (header_fixup_len < 14) 477 skb_push(skb, header_fixup_len); 478 } 479 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 480 481 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 482 483next_desc: 484 rx_desc->wb.upper.status_error = 0; 485 486 /* return some buffers to hardware, one at a time is too slow */ 487 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 488 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 489 cleaned_count); 490 cleaned_count = 0; 491 } 492 493 /* use prefetched values */ 494 rx_desc = next_rxd; 495 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 496 497 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 498 } 499 500 rx_ring->next_to_clean = i; 501 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 502 503 if (cleaned_count) 504 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 505 506 u64_stats_update_begin(&rx_ring->syncp); 507 rx_ring->total_packets += total_rx_packets; 508 rx_ring->total_bytes += total_rx_bytes; 509 u64_stats_update_end(&rx_ring->syncp); 510 q_vector->rx.total_packets += total_rx_packets; 511 q_vector->rx.total_bytes += total_rx_bytes; 512 513 return !!budget; 514} 515 516/** 517 * ixgbevf_poll - NAPI polling calback 518 * @napi: napi struct with our devices info in it 519 * @budget: amount of work driver is allowed to do this pass, in packets 520 * 521 * This function will clean more than one or more rings associated with a 522 * q_vector. 523 **/ 524static int ixgbevf_poll(struct napi_struct *napi, int budget) 525{ 526 struct ixgbevf_q_vector *q_vector = 527 container_of(napi, struct ixgbevf_q_vector, napi); 528 struct ixgbevf_adapter *adapter = q_vector->adapter; 529 struct ixgbevf_ring *ring; 530 int per_ring_budget; 531 bool clean_complete = true; 532 533 ixgbevf_for_each_ring(ring, q_vector->tx) 534 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 535 536 /* attempt to distribute budget to each queue fairly, but don't allow 537 * the budget to go below 1 because we'll exit polling */ 538 if (q_vector->rx.count > 1) 539 per_ring_budget = max(budget/q_vector->rx.count, 1); 540 else 541 per_ring_budget = budget; 542 543 ixgbevf_for_each_ring(ring, q_vector->rx) 544 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 545 per_ring_budget); 546 547 /* If all work not completed, return budget and keep polling */ 548 if (!clean_complete) 549 return budget; 550 /* all work done, exit the polling mode */ 551 napi_complete(napi); 552 if (adapter->rx_itr_setting & 1) 553 ixgbevf_set_itr(q_vector); 554 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 555 ixgbevf_irq_enable_queues(adapter, 556 1 << q_vector->v_idx); 557 558 return 0; 559} 560 561/** 562 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 563 * @q_vector: structure containing interrupt and ring information 564 */ 565static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 566{ 567 struct ixgbevf_adapter *adapter = q_vector->adapter; 568 struct ixgbe_hw *hw = &adapter->hw; 569 int v_idx = q_vector->v_idx; 570 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 571 572 /* 573 * set the WDIS bit to not clear the timer bits and cause an 574 * immediate assertion of the interrupt 575 */ 576 itr_reg |= IXGBE_EITR_CNT_WDIS; 577 578 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 579} 580 581/** 582 * ixgbevf_configure_msix - Configure MSI-X hardware 583 * @adapter: board private structure 584 * 585 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 586 * interrupts. 587 **/ 588static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 589{ 590 struct ixgbevf_q_vector *q_vector; 591 int q_vectors, v_idx; 592 593 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 594 adapter->eims_enable_mask = 0; 595 596 /* 597 * Populate the IVAR table and set the ITR values to the 598 * corresponding register. 599 */ 600 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 601 struct ixgbevf_ring *ring; 602 q_vector = adapter->q_vector[v_idx]; 603 604 ixgbevf_for_each_ring(ring, q_vector->rx) 605 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 606 607 ixgbevf_for_each_ring(ring, q_vector->tx) 608 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 609 610 if (q_vector->tx.ring && !q_vector->rx.ring) { 611 /* tx only vector */ 612 if (adapter->tx_itr_setting == 1) 613 q_vector->itr = IXGBE_10K_ITR; 614 else 615 q_vector->itr = adapter->tx_itr_setting; 616 } else { 617 /* rx or rx/tx vector */ 618 if (adapter->rx_itr_setting == 1) 619 q_vector->itr = IXGBE_20K_ITR; 620 else 621 q_vector->itr = adapter->rx_itr_setting; 622 } 623 624 /* add q_vector eims value to global eims_enable_mask */ 625 adapter->eims_enable_mask |= 1 << v_idx; 626 627 ixgbevf_write_eitr(q_vector); 628 } 629 630 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 631 /* setup eims_other and add value to global eims_enable_mask */ 632 adapter->eims_other = 1 << v_idx; 633 adapter->eims_enable_mask |= adapter->eims_other; 634} 635 636enum latency_range { 637 lowest_latency = 0, 638 low_latency = 1, 639 bulk_latency = 2, 640 latency_invalid = 255 641}; 642 643/** 644 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 645 * @q_vector: structure containing interrupt and ring information 646 * @ring_container: structure containing ring performance data 647 * 648 * Stores a new ITR value based on packets and byte 649 * counts during the last interrupt. The advantage of per interrupt 650 * computation is faster updates and more accurate ITR for the current 651 * traffic pattern. Constants in this function were computed 652 * based on theoretical maximum wire speed and thresholds were set based 653 * on testing data as well as attempting to minimize response time 654 * while increasing bulk throughput. 655 **/ 656static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 657 struct ixgbevf_ring_container *ring_container) 658{ 659 int bytes = ring_container->total_bytes; 660 int packets = ring_container->total_packets; 661 u32 timepassed_us; 662 u64 bytes_perint; 663 u8 itr_setting = ring_container->itr; 664 665 if (packets == 0) 666 return; 667 668 /* simple throttlerate management 669 * 0-20MB/s lowest (100000 ints/s) 670 * 20-100MB/s low (20000 ints/s) 671 * 100-1249MB/s bulk (8000 ints/s) 672 */ 673 /* what was last interrupt timeslice? */ 674 timepassed_us = q_vector->itr >> 2; 675 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 676 677 switch (itr_setting) { 678 case lowest_latency: 679 if (bytes_perint > 10) 680 itr_setting = low_latency; 681 break; 682 case low_latency: 683 if (bytes_perint > 20) 684 itr_setting = bulk_latency; 685 else if (bytes_perint <= 10) 686 itr_setting = lowest_latency; 687 break; 688 case bulk_latency: 689 if (bytes_perint <= 20) 690 itr_setting = low_latency; 691 break; 692 } 693 694 /* clear work counters since we have the values we need */ 695 ring_container->total_bytes = 0; 696 ring_container->total_packets = 0; 697 698 /* write updated itr to ring container */ 699 ring_container->itr = itr_setting; 700} 701 702static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 703{ 704 u32 new_itr = q_vector->itr; 705 u8 current_itr; 706 707 ixgbevf_update_itr(q_vector, &q_vector->tx); 708 ixgbevf_update_itr(q_vector, &q_vector->rx); 709 710 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 711 712 switch (current_itr) { 713 /* counts and packets in update_itr are dependent on these numbers */ 714 case lowest_latency: 715 new_itr = IXGBE_100K_ITR; 716 break; 717 case low_latency: 718 new_itr = IXGBE_20K_ITR; 719 break; 720 case bulk_latency: 721 default: 722 new_itr = IXGBE_8K_ITR; 723 break; 724 } 725 726 if (new_itr != q_vector->itr) { 727 /* do an exponential smoothing */ 728 new_itr = (10 * new_itr * q_vector->itr) / 729 ((9 * new_itr) + q_vector->itr); 730 731 /* save the algorithm value here */ 732 q_vector->itr = new_itr; 733 734 ixgbevf_write_eitr(q_vector); 735 } 736} 737 738static irqreturn_t ixgbevf_msix_other(int irq, void *data) 739{ 740 struct ixgbevf_adapter *adapter = data; 741 struct ixgbe_hw *hw = &adapter->hw; 742 743 hw->mac.get_link_status = 1; 744 745 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 746 mod_timer(&adapter->watchdog_timer, jiffies); 747 748 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 749 750 return IRQ_HANDLED; 751} 752 753 754/** 755 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 756 * @irq: unused 757 * @data: pointer to our q_vector struct for this interrupt vector 758 **/ 759static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 760{ 761 struct ixgbevf_q_vector *q_vector = data; 762 763 /* EIAM disabled interrupts (on this vector) for us */ 764 if (q_vector->rx.ring || q_vector->tx.ring) 765 napi_schedule(&q_vector->napi); 766 767 return IRQ_HANDLED; 768} 769 770static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 771 int r_idx) 772{ 773 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 774 775 a->rx_ring[r_idx].next = q_vector->rx.ring; 776 q_vector->rx.ring = &a->rx_ring[r_idx]; 777 q_vector->rx.count++; 778} 779 780static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 781 int t_idx) 782{ 783 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 784 785 a->tx_ring[t_idx].next = q_vector->tx.ring; 786 q_vector->tx.ring = &a->tx_ring[t_idx]; 787 q_vector->tx.count++; 788} 789 790/** 791 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 792 * @adapter: board private structure to initialize 793 * 794 * This function maps descriptor rings to the queue-specific vectors 795 * we were allotted through the MSI-X enabling code. Ideally, we'd have 796 * one vector per ring/queue, but on a constrained vector budget, we 797 * group the rings as "efficiently" as possible. You would add new 798 * mapping configurations in here. 799 **/ 800static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 801{ 802 int q_vectors; 803 int v_start = 0; 804 int rxr_idx = 0, txr_idx = 0; 805 int rxr_remaining = adapter->num_rx_queues; 806 int txr_remaining = adapter->num_tx_queues; 807 int i, j; 808 int rqpv, tqpv; 809 int err = 0; 810 811 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 812 813 /* 814 * The ideal configuration... 815 * We have enough vectors to map one per queue. 816 */ 817 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 818 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 819 map_vector_to_rxq(adapter, v_start, rxr_idx); 820 821 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 822 map_vector_to_txq(adapter, v_start, txr_idx); 823 goto out; 824 } 825 826 /* 827 * If we don't have enough vectors for a 1-to-1 828 * mapping, we'll have to group them so there are 829 * multiple queues per vector. 830 */ 831 /* Re-adjusting *qpv takes care of the remainder. */ 832 for (i = v_start; i < q_vectors; i++) { 833 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 834 for (j = 0; j < rqpv; j++) { 835 map_vector_to_rxq(adapter, i, rxr_idx); 836 rxr_idx++; 837 rxr_remaining--; 838 } 839 } 840 for (i = v_start; i < q_vectors; i++) { 841 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 842 for (j = 0; j < tqpv; j++) { 843 map_vector_to_txq(adapter, i, txr_idx); 844 txr_idx++; 845 txr_remaining--; 846 } 847 } 848 849out: 850 return err; 851} 852 853/** 854 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 855 * @adapter: board private structure 856 * 857 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 858 * interrupts from the kernel. 859 **/ 860static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 861{ 862 struct net_device *netdev = adapter->netdev; 863 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 864 int vector, err; 865 int ri = 0, ti = 0; 866 867 for (vector = 0; vector < q_vectors; vector++) { 868 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 869 struct msix_entry *entry = &adapter->msix_entries[vector]; 870 871 if (q_vector->tx.ring && q_vector->rx.ring) { 872 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 873 "%s-%s-%d", netdev->name, "TxRx", ri++); 874 ti++; 875 } else if (q_vector->rx.ring) { 876 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 877 "%s-%s-%d", netdev->name, "rx", ri++); 878 } else if (q_vector->tx.ring) { 879 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 880 "%s-%s-%d", netdev->name, "tx", ti++); 881 } else { 882 /* skip this unused q_vector */ 883 continue; 884 } 885 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 886 q_vector->name, q_vector); 887 if (err) { 888 hw_dbg(&adapter->hw, 889 "request_irq failed for MSIX interrupt " 890 "Error: %d\n", err); 891 goto free_queue_irqs; 892 } 893 } 894 895 err = request_irq(adapter->msix_entries[vector].vector, 896 &ixgbevf_msix_other, 0, netdev->name, adapter); 897 if (err) { 898 hw_dbg(&adapter->hw, 899 "request_irq for msix_other failed: %d\n", err); 900 goto free_queue_irqs; 901 } 902 903 return 0; 904 905free_queue_irqs: 906 while (vector) { 907 vector--; 908 free_irq(adapter->msix_entries[vector].vector, 909 adapter->q_vector[vector]); 910 } 911 pci_disable_msix(adapter->pdev); 912 kfree(adapter->msix_entries); 913 adapter->msix_entries = NULL; 914 return err; 915} 916 917static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 918{ 919 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 920 921 for (i = 0; i < q_vectors; i++) { 922 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 923 q_vector->rx.ring = NULL; 924 q_vector->tx.ring = NULL; 925 q_vector->rx.count = 0; 926 q_vector->tx.count = 0; 927 } 928} 929 930/** 931 * ixgbevf_request_irq - initialize interrupts 932 * @adapter: board private structure 933 * 934 * Attempts to configure interrupts using the best available 935 * capabilities of the hardware and kernel. 936 **/ 937static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 938{ 939 int err = 0; 940 941 err = ixgbevf_request_msix_irqs(adapter); 942 943 if (err) 944 hw_dbg(&adapter->hw, 945 "request_irq failed, Error %d\n", err); 946 947 return err; 948} 949 950static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 951{ 952 int i, q_vectors; 953 954 q_vectors = adapter->num_msix_vectors; 955 i = q_vectors - 1; 956 957 free_irq(adapter->msix_entries[i].vector, adapter); 958 i--; 959 960 for (; i >= 0; i--) { 961 /* free only the irqs that were actually requested */ 962 if (!adapter->q_vector[i]->rx.ring && 963 !adapter->q_vector[i]->tx.ring) 964 continue; 965 966 free_irq(adapter->msix_entries[i].vector, 967 adapter->q_vector[i]); 968 } 969 970 ixgbevf_reset_q_vectors(adapter); 971} 972 973/** 974 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 975 * @adapter: board private structure 976 **/ 977static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 978{ 979 struct ixgbe_hw *hw = &adapter->hw; 980 int i; 981 982 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 983 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 984 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 985 986 IXGBE_WRITE_FLUSH(hw); 987 988 for (i = 0; i < adapter->num_msix_vectors; i++) 989 synchronize_irq(adapter->msix_entries[i].vector); 990} 991 992/** 993 * ixgbevf_irq_enable - Enable default interrupt generation settings 994 * @adapter: board private structure 995 **/ 996static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 997{ 998 struct ixgbe_hw *hw = &adapter->hw; 999 1000 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1001 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1002 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1003} 1004 1005/** 1006 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1007 * @adapter: board private structure 1008 * 1009 * Configure the Tx unit of the MAC after a reset. 1010 **/ 1011static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1012{ 1013 u64 tdba; 1014 struct ixgbe_hw *hw = &adapter->hw; 1015 u32 i, j, tdlen, txctrl; 1016 1017 /* Setup the HW Tx Head and Tail descriptor pointers */ 1018 for (i = 0; i < adapter->num_tx_queues; i++) { 1019 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1020 j = ring->reg_idx; 1021 tdba = ring->dma; 1022 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1023 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1024 (tdba & DMA_BIT_MASK(32))); 1025 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1026 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1027 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1028 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1029 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1030 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1031 /* Disable Tx Head Writeback RO bit, since this hoses 1032 * bookkeeping if things aren't delivered in order. 1033 */ 1034 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1035 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1036 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1037 } 1038} 1039 1040#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1041 1042static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1043{ 1044 struct ixgbevf_ring *rx_ring; 1045 struct ixgbe_hw *hw = &adapter->hw; 1046 u32 srrctl; 1047 1048 rx_ring = &adapter->rx_ring[index]; 1049 1050 srrctl = IXGBE_SRRCTL_DROP_EN; 1051 1052 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1053 1054 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1055 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1056 1057 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1058} 1059 1060static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1061{ 1062 struct ixgbe_hw *hw = &adapter->hw; 1063 struct net_device *netdev = adapter->netdev; 1064 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1065 int i; 1066 u16 rx_buf_len; 1067 1068 /* notify the PF of our intent to use this size of frame */ 1069 ixgbevf_rlpml_set_vf(hw, max_frame); 1070 1071 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1072 max_frame += VLAN_HLEN; 1073 1074 /* 1075 * Make best use of allocation by using all but 1K of a 1076 * power of 2 allocation that will be used for skb->head. 1077 */ 1078 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1079 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1080 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1081 else if (max_frame <= IXGBEVF_RXBUFFER_3K) 1082 rx_buf_len = IXGBEVF_RXBUFFER_3K; 1083 else if (max_frame <= IXGBEVF_RXBUFFER_7K) 1084 rx_buf_len = IXGBEVF_RXBUFFER_7K; 1085 else if (max_frame <= IXGBEVF_RXBUFFER_15K) 1086 rx_buf_len = IXGBEVF_RXBUFFER_15K; 1087 else 1088 rx_buf_len = IXGBEVF_MAX_RXBUFFER; 1089 1090 for (i = 0; i < adapter->num_rx_queues; i++) 1091 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1092} 1093 1094/** 1095 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1096 * @adapter: board private structure 1097 * 1098 * Configure the Rx unit of the MAC after a reset. 1099 **/ 1100static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1101{ 1102 u64 rdba; 1103 struct ixgbe_hw *hw = &adapter->hw; 1104 int i, j; 1105 u32 rdlen; 1106 1107 /* PSRTYPE must be initialized in 82599 */ 1108 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1109 1110 /* set_rx_buffer_len must be called before ring initialization */ 1111 ixgbevf_set_rx_buffer_len(adapter); 1112 1113 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1114 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1115 * the Base and Length of the Rx Descriptor Ring */ 1116 for (i = 0; i < adapter->num_rx_queues; i++) { 1117 rdba = adapter->rx_ring[i].dma; 1118 j = adapter->rx_ring[i].reg_idx; 1119 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1120 (rdba & DMA_BIT_MASK(32))); 1121 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1122 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1123 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1124 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1125 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1126 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1127 1128 ixgbevf_configure_srrctl(adapter, j); 1129 } 1130} 1131 1132static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1133{ 1134 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1135 struct ixgbe_hw *hw = &adapter->hw; 1136 int err; 1137 1138 if (!hw->mac.ops.set_vfta) 1139 return -EOPNOTSUPP; 1140 1141 spin_lock_bh(&adapter->mbx_lock); 1142 1143 /* add VID to filter table */ 1144 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1145 1146 spin_unlock_bh(&adapter->mbx_lock); 1147 1148 /* translate error return types so error makes sense */ 1149 if (err == IXGBE_ERR_MBX) 1150 return -EIO; 1151 1152 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1153 return -EACCES; 1154 1155 set_bit(vid, adapter->active_vlans); 1156 1157 return err; 1158} 1159 1160static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1161{ 1162 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1163 struct ixgbe_hw *hw = &adapter->hw; 1164 int err = -EOPNOTSUPP; 1165 1166 spin_lock_bh(&adapter->mbx_lock); 1167 1168 /* remove VID from filter table */ 1169 if (hw->mac.ops.set_vfta) 1170 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1171 1172 spin_unlock_bh(&adapter->mbx_lock); 1173 1174 clear_bit(vid, adapter->active_vlans); 1175 1176 return err; 1177} 1178 1179static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1180{ 1181 u16 vid; 1182 1183 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1184 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1185} 1186 1187static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1188{ 1189 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1190 struct ixgbe_hw *hw = &adapter->hw; 1191 int count = 0; 1192 1193 if ((netdev_uc_count(netdev)) > 10) { 1194 pr_err("Too many unicast filters - No Space\n"); 1195 return -ENOSPC; 1196 } 1197 1198 if (!netdev_uc_empty(netdev)) { 1199 struct netdev_hw_addr *ha; 1200 netdev_for_each_uc_addr(ha, netdev) { 1201 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1202 udelay(200); 1203 } 1204 } else { 1205 /* 1206 * If the list is empty then send message to PF driver to 1207 * clear all macvlans on this VF. 1208 */ 1209 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1210 } 1211 1212 return count; 1213} 1214 1215/** 1216 * ixgbevf_set_rx_mode - Multicast set 1217 * @netdev: network interface device structure 1218 * 1219 * The set_rx_method entry point is called whenever the multicast address 1220 * list or the network interface flags are updated. This routine is 1221 * responsible for configuring the hardware for proper multicast mode. 1222 **/ 1223static void ixgbevf_set_rx_mode(struct net_device *netdev) 1224{ 1225 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1226 struct ixgbe_hw *hw = &adapter->hw; 1227 1228 spin_lock_bh(&adapter->mbx_lock); 1229 1230 /* reprogram multicast list */ 1231 if (hw->mac.ops.update_mc_addr_list) 1232 hw->mac.ops.update_mc_addr_list(hw, netdev); 1233 1234 ixgbevf_write_uc_addr_list(netdev); 1235 1236 spin_unlock_bh(&adapter->mbx_lock); 1237} 1238 1239static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1240{ 1241 int q_idx; 1242 struct ixgbevf_q_vector *q_vector; 1243 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1244 1245 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1246 q_vector = adapter->q_vector[q_idx]; 1247 napi_enable(&q_vector->napi); 1248 } 1249} 1250 1251static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1252{ 1253 int q_idx; 1254 struct ixgbevf_q_vector *q_vector; 1255 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1256 1257 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1258 q_vector = adapter->q_vector[q_idx]; 1259 napi_disable(&q_vector->napi); 1260 } 1261} 1262 1263static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1264{ 1265 struct net_device *netdev = adapter->netdev; 1266 int i; 1267 1268 ixgbevf_set_rx_mode(netdev); 1269 1270 ixgbevf_restore_vlan(adapter); 1271 1272 ixgbevf_configure_tx(adapter); 1273 ixgbevf_configure_rx(adapter); 1274 for (i = 0; i < adapter->num_rx_queues; i++) { 1275 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1276 ixgbevf_alloc_rx_buffers(adapter, ring, 1277 IXGBE_DESC_UNUSED(ring)); 1278 } 1279} 1280 1281#define IXGBE_MAX_RX_DESC_POLL 10 1282static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1283 int rxr) 1284{ 1285 struct ixgbe_hw *hw = &adapter->hw; 1286 int j = adapter->rx_ring[rxr].reg_idx; 1287 int k; 1288 1289 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1290 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1291 break; 1292 else 1293 msleep(1); 1294 } 1295 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1296 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1297 "not set within the polling period\n", rxr); 1298 } 1299 1300 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 1301 (adapter->rx_ring[rxr].count - 1)); 1302} 1303 1304static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1305{ 1306 /* Only save pre-reset stats if there are some */ 1307 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1308 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1309 adapter->stats.base_vfgprc; 1310 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1311 adapter->stats.base_vfgptc; 1312 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1313 adapter->stats.base_vfgorc; 1314 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1315 adapter->stats.base_vfgotc; 1316 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1317 adapter->stats.base_vfmprc; 1318 } 1319} 1320 1321static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1322{ 1323 struct ixgbe_hw *hw = &adapter->hw; 1324 1325 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1326 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1327 adapter->stats.last_vfgorc |= 1328 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1329 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1330 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1331 adapter->stats.last_vfgotc |= 1332 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1333 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1334 1335 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1336 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1337 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1338 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1339 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1340} 1341 1342static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1343{ 1344 struct ixgbe_hw *hw = &adapter->hw; 1345 int api[] = { ixgbe_mbox_api_11, 1346 ixgbe_mbox_api_10, 1347 ixgbe_mbox_api_unknown }; 1348 int err = 0, idx = 0; 1349 1350 spin_lock_bh(&adapter->mbx_lock); 1351 1352 while (api[idx] != ixgbe_mbox_api_unknown) { 1353 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1354 if (!err) 1355 break; 1356 idx++; 1357 } 1358 1359 spin_unlock_bh(&adapter->mbx_lock); 1360} 1361 1362static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1363{ 1364 struct net_device *netdev = adapter->netdev; 1365 struct ixgbe_hw *hw = &adapter->hw; 1366 int i, j = 0; 1367 int num_rx_rings = adapter->num_rx_queues; 1368 u32 txdctl, rxdctl; 1369 1370 for (i = 0; i < adapter->num_tx_queues; i++) { 1371 j = adapter->tx_ring[i].reg_idx; 1372 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1373 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1374 txdctl |= (8 << 16); 1375 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1376 } 1377 1378 for (i = 0; i < adapter->num_tx_queues; i++) { 1379 j = adapter->tx_ring[i].reg_idx; 1380 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1381 txdctl |= IXGBE_TXDCTL_ENABLE; 1382 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1383 } 1384 1385 for (i = 0; i < num_rx_rings; i++) { 1386 j = adapter->rx_ring[i].reg_idx; 1387 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1388 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1389 if (hw->mac.type == ixgbe_mac_X540_vf) { 1390 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1391 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1392 IXGBE_RXDCTL_RLPML_EN); 1393 } 1394 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1395 ixgbevf_rx_desc_queue_enable(adapter, i); 1396 } 1397 1398 ixgbevf_configure_msix(adapter); 1399 1400 spin_lock_bh(&adapter->mbx_lock); 1401 1402 if (hw->mac.ops.set_rar) { 1403 if (is_valid_ether_addr(hw->mac.addr)) 1404 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1405 else 1406 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1407 } 1408 1409 spin_unlock_bh(&adapter->mbx_lock); 1410 1411 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1412 ixgbevf_napi_enable_all(adapter); 1413 1414 /* enable transmits */ 1415 netif_tx_start_all_queues(netdev); 1416 1417 ixgbevf_save_reset_stats(adapter); 1418 ixgbevf_init_last_counter_stats(adapter); 1419 1420 hw->mac.get_link_status = 1; 1421 mod_timer(&adapter->watchdog_timer, jiffies); 1422} 1423 1424static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) 1425{ 1426 struct ixgbe_hw *hw = &adapter->hw; 1427 struct ixgbevf_ring *rx_ring; 1428 unsigned int def_q = 0; 1429 unsigned int num_tcs = 0; 1430 unsigned int num_rx_queues = 1; 1431 int err, i; 1432 1433 spin_lock_bh(&adapter->mbx_lock); 1434 1435 /* fetch queue configuration from the PF */ 1436 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1437 1438 spin_unlock_bh(&adapter->mbx_lock); 1439 1440 if (err) 1441 return err; 1442 1443 if (num_tcs > 1) { 1444 /* update default Tx ring register index */ 1445 adapter->tx_ring[0].reg_idx = def_q; 1446 1447 /* we need as many queues as traffic classes */ 1448 num_rx_queues = num_tcs; 1449 } 1450 1451 /* nothing to do if we have the correct number of queues */ 1452 if (adapter->num_rx_queues == num_rx_queues) 1453 return 0; 1454 1455 /* allocate new rings */ 1456 rx_ring = kcalloc(num_rx_queues, 1457 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1458 if (!rx_ring) 1459 return -ENOMEM; 1460 1461 /* setup ring fields */ 1462 for (i = 0; i < num_rx_queues; i++) { 1463 rx_ring[i].count = adapter->rx_ring_count; 1464 rx_ring[i].queue_index = i; 1465 rx_ring[i].reg_idx = i; 1466 rx_ring[i].dev = &adapter->pdev->dev; 1467 rx_ring[i].netdev = adapter->netdev; 1468 1469 /* allocate resources on the ring */ 1470 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 1471 if (err) { 1472 while (i) { 1473 i--; 1474 ixgbevf_free_rx_resources(adapter, &rx_ring[i]); 1475 } 1476 kfree(rx_ring); 1477 return err; 1478 } 1479 } 1480 1481 /* free the existing rings and queues */ 1482 ixgbevf_free_all_rx_resources(adapter); 1483 adapter->num_rx_queues = 0; 1484 kfree(adapter->rx_ring); 1485 1486 /* move new rings into position on the adapter struct */ 1487 adapter->rx_ring = rx_ring; 1488 adapter->num_rx_queues = num_rx_queues; 1489 1490 /* reset ring to vector mapping */ 1491 ixgbevf_reset_q_vectors(adapter); 1492 ixgbevf_map_rings_to_vectors(adapter); 1493 1494 return 0; 1495} 1496 1497void ixgbevf_up(struct ixgbevf_adapter *adapter) 1498{ 1499 struct ixgbe_hw *hw = &adapter->hw; 1500 1501 ixgbevf_negotiate_api(adapter); 1502 1503 ixgbevf_reset_queues(adapter); 1504 1505 ixgbevf_configure(adapter); 1506 1507 ixgbevf_up_complete(adapter); 1508 1509 /* clear any pending interrupts, may auto mask */ 1510 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1511 1512 ixgbevf_irq_enable(adapter); 1513} 1514 1515/** 1516 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1517 * @adapter: board private structure 1518 * @rx_ring: ring to free buffers from 1519 **/ 1520static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1521 struct ixgbevf_ring *rx_ring) 1522{ 1523 struct pci_dev *pdev = adapter->pdev; 1524 unsigned long size; 1525 unsigned int i; 1526 1527 if (!rx_ring->rx_buffer_info) 1528 return; 1529 1530 /* Free all the Rx ring sk_buffs */ 1531 for (i = 0; i < rx_ring->count; i++) { 1532 struct ixgbevf_rx_buffer *rx_buffer_info; 1533 1534 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1535 if (rx_buffer_info->dma) { 1536 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1537 rx_ring->rx_buf_len, 1538 DMA_FROM_DEVICE); 1539 rx_buffer_info->dma = 0; 1540 } 1541 if (rx_buffer_info->skb) { 1542 struct sk_buff *skb = rx_buffer_info->skb; 1543 rx_buffer_info->skb = NULL; 1544 do { 1545 struct sk_buff *this = skb; 1546 skb = IXGBE_CB(skb)->prev; 1547 dev_kfree_skb(this); 1548 } while (skb); 1549 } 1550 } 1551 1552 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1553 memset(rx_ring->rx_buffer_info, 0, size); 1554 1555 /* Zero out the descriptor ring */ 1556 memset(rx_ring->desc, 0, rx_ring->size); 1557 1558 rx_ring->next_to_clean = 0; 1559 rx_ring->next_to_use = 0; 1560 1561 if (rx_ring->head) 1562 writel(0, adapter->hw.hw_addr + rx_ring->head); 1563 if (rx_ring->tail) 1564 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1565} 1566 1567/** 1568 * ixgbevf_clean_tx_ring - Free Tx Buffers 1569 * @adapter: board private structure 1570 * @tx_ring: ring to be cleaned 1571 **/ 1572static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1573 struct ixgbevf_ring *tx_ring) 1574{ 1575 struct ixgbevf_tx_buffer *tx_buffer_info; 1576 unsigned long size; 1577 unsigned int i; 1578 1579 if (!tx_ring->tx_buffer_info) 1580 return; 1581 1582 /* Free all the Tx ring sk_buffs */ 1583 1584 for (i = 0; i < tx_ring->count; i++) { 1585 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1586 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1587 } 1588 1589 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1590 memset(tx_ring->tx_buffer_info, 0, size); 1591 1592 memset(tx_ring->desc, 0, tx_ring->size); 1593 1594 tx_ring->next_to_use = 0; 1595 tx_ring->next_to_clean = 0; 1596 1597 if (tx_ring->head) 1598 writel(0, adapter->hw.hw_addr + tx_ring->head); 1599 if (tx_ring->tail) 1600 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1601} 1602 1603/** 1604 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1605 * @adapter: board private structure 1606 **/ 1607static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1608{ 1609 int i; 1610 1611 for (i = 0; i < adapter->num_rx_queues; i++) 1612 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1613} 1614 1615/** 1616 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1617 * @adapter: board private structure 1618 **/ 1619static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1620{ 1621 int i; 1622 1623 for (i = 0; i < adapter->num_tx_queues; i++) 1624 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1625} 1626 1627void ixgbevf_down(struct ixgbevf_adapter *adapter) 1628{ 1629 struct net_device *netdev = adapter->netdev; 1630 struct ixgbe_hw *hw = &adapter->hw; 1631 u32 txdctl; 1632 int i, j; 1633 1634 /* signal that we are down to the interrupt handler */ 1635 set_bit(__IXGBEVF_DOWN, &adapter->state); 1636 /* disable receives */ 1637 1638 netif_tx_disable(netdev); 1639 1640 msleep(10); 1641 1642 netif_tx_stop_all_queues(netdev); 1643 1644 ixgbevf_irq_disable(adapter); 1645 1646 ixgbevf_napi_disable_all(adapter); 1647 1648 del_timer_sync(&adapter->watchdog_timer); 1649 /* can't call flush scheduled work here because it can deadlock 1650 * if linkwatch_event tries to acquire the rtnl_lock which we are 1651 * holding */ 1652 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1653 msleep(1); 1654 1655 /* disable transmits in the hardware now that interrupts are off */ 1656 for (i = 0; i < adapter->num_tx_queues; i++) { 1657 j = adapter->tx_ring[i].reg_idx; 1658 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1659 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1660 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1661 } 1662 1663 netif_carrier_off(netdev); 1664 1665 if (!pci_channel_offline(adapter->pdev)) 1666 ixgbevf_reset(adapter); 1667 1668 ixgbevf_clean_all_tx_rings(adapter); 1669 ixgbevf_clean_all_rx_rings(adapter); 1670} 1671 1672void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1673{ 1674 WARN_ON(in_interrupt()); 1675 1676 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1677 msleep(1); 1678 1679 /* 1680 * Check if PF is up before re-init. If not then skip until 1681 * later when the PF is up and ready to service requests from 1682 * the VF via mailbox. If the VF is up and running then the 1683 * watchdog task will continue to schedule reset tasks until 1684 * the PF is up and running. 1685 */ 1686 ixgbevf_down(adapter); 1687 ixgbevf_up(adapter); 1688 1689 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1690} 1691 1692void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1693{ 1694 struct ixgbe_hw *hw = &adapter->hw; 1695 struct net_device *netdev = adapter->netdev; 1696 1697 spin_lock_bh(&adapter->mbx_lock); 1698 1699 if (hw->mac.ops.reset_hw(hw)) 1700 hw_dbg(hw, "PF still resetting\n"); 1701 else 1702 hw->mac.ops.init_hw(hw); 1703 1704 spin_unlock_bh(&adapter->mbx_lock); 1705 1706 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1707 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1708 netdev->addr_len); 1709 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1710 netdev->addr_len); 1711 } 1712} 1713 1714static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1715 int vectors) 1716{ 1717 int err, vector_threshold; 1718 1719 /* We'll want at least 2 (vector_threshold): 1720 * 1) TxQ[0] + RxQ[0] handler 1721 * 2) Other (Link Status Change, etc.) 1722 */ 1723 vector_threshold = MIN_MSIX_COUNT; 1724 1725 /* The more we get, the more we will assign to Tx/Rx Cleanup 1726 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1727 * Right now, we simply care about how many we'll get; we'll 1728 * set them up later while requesting irq's. 1729 */ 1730 while (vectors >= vector_threshold) { 1731 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1732 vectors); 1733 if (!err) /* Success in acquiring all requested vectors. */ 1734 break; 1735 else if (err < 0) 1736 vectors = 0; /* Nasty failure, quit now */ 1737 else /* err == number of vectors we should try again with */ 1738 vectors = err; 1739 } 1740 1741 if (vectors < vector_threshold) { 1742 /* Can't allocate enough MSI-X interrupts? Oh well. 1743 * This just means we'll go with either a single MSI 1744 * vector or fall back to legacy interrupts. 1745 */ 1746 hw_dbg(&adapter->hw, 1747 "Unable to allocate MSI-X interrupts\n"); 1748 kfree(adapter->msix_entries); 1749 adapter->msix_entries = NULL; 1750 } else { 1751 /* 1752 * Adjust for only the vectors we'll use, which is minimum 1753 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1754 * vectors we were allocated. 1755 */ 1756 adapter->num_msix_vectors = vectors; 1757 } 1758} 1759 1760/** 1761 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1762 * @adapter: board private structure to initialize 1763 * 1764 * This is the top level queue allocation routine. The order here is very 1765 * important, starting with the "most" number of features turned on at once, 1766 * and ending with the smallest set of features. This way large combinations 1767 * can be allocated if they're turned on, and smaller combinations are the 1768 * fallthrough conditions. 1769 * 1770 **/ 1771static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1772{ 1773 /* Start with base case */ 1774 adapter->num_rx_queues = 1; 1775 adapter->num_tx_queues = 1; 1776} 1777 1778/** 1779 * ixgbevf_alloc_queues - Allocate memory for all rings 1780 * @adapter: board private structure to initialize 1781 * 1782 * We allocate one ring per queue at run-time since we don't know the 1783 * number of queues at compile-time. The polling_netdev array is 1784 * intended for Multiqueue, but should work fine with a single queue. 1785 **/ 1786static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1787{ 1788 int i; 1789 1790 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1791 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1792 if (!adapter->tx_ring) 1793 goto err_tx_ring_allocation; 1794 1795 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1796 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1797 if (!adapter->rx_ring) 1798 goto err_rx_ring_allocation; 1799 1800 for (i = 0; i < adapter->num_tx_queues; i++) { 1801 adapter->tx_ring[i].count = adapter->tx_ring_count; 1802 adapter->tx_ring[i].queue_index = i; 1803 /* reg_idx may be remapped later by DCB config */ 1804 adapter->tx_ring[i].reg_idx = i; 1805 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1806 adapter->tx_ring[i].netdev = adapter->netdev; 1807 } 1808 1809 for (i = 0; i < adapter->num_rx_queues; i++) { 1810 adapter->rx_ring[i].count = adapter->rx_ring_count; 1811 adapter->rx_ring[i].queue_index = i; 1812 adapter->rx_ring[i].reg_idx = i; 1813 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1814 adapter->rx_ring[i].netdev = adapter->netdev; 1815 } 1816 1817 return 0; 1818 1819err_rx_ring_allocation: 1820 kfree(adapter->tx_ring); 1821err_tx_ring_allocation: 1822 return -ENOMEM; 1823} 1824 1825/** 1826 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1827 * @adapter: board private structure to initialize 1828 * 1829 * Attempt to configure the interrupts using the best available 1830 * capabilities of the hardware and the kernel. 1831 **/ 1832static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1833{ 1834 struct net_device *netdev = adapter->netdev; 1835 int err = 0; 1836 int vector, v_budget; 1837 1838 /* 1839 * It's easy to be greedy for MSI-X vectors, but it really 1840 * doesn't do us much good if we have a lot more vectors 1841 * than CPU's. So let's be conservative and only ask for 1842 * (roughly) the same number of vectors as there are CPU's. 1843 * The default is to use pairs of vectors. 1844 */ 1845 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1846 v_budget = min_t(int, v_budget, num_online_cpus()); 1847 v_budget += NON_Q_VECTORS; 1848 1849 /* A failure in MSI-X entry allocation isn't fatal, but it does 1850 * mean we disable MSI-X capabilities of the adapter. */ 1851 adapter->msix_entries = kcalloc(v_budget, 1852 sizeof(struct msix_entry), GFP_KERNEL); 1853 if (!adapter->msix_entries) { 1854 err = -ENOMEM; 1855 goto out; 1856 } 1857 1858 for (vector = 0; vector < v_budget; vector++) 1859 adapter->msix_entries[vector].entry = vector; 1860 1861 ixgbevf_acquire_msix_vectors(adapter, v_budget); 1862 1863 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1864 if (err) 1865 goto out; 1866 1867 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 1868 1869out: 1870 return err; 1871} 1872 1873/** 1874 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 1875 * @adapter: board private structure to initialize 1876 * 1877 * We allocate one q_vector per queue interrupt. If allocation fails we 1878 * return -ENOMEM. 1879 **/ 1880static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 1881{ 1882 int q_idx, num_q_vectors; 1883 struct ixgbevf_q_vector *q_vector; 1884 1885 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1886 1887 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1888 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 1889 if (!q_vector) 1890 goto err_out; 1891 q_vector->adapter = adapter; 1892 q_vector->v_idx = q_idx; 1893 netif_napi_add(adapter->netdev, &q_vector->napi, 1894 ixgbevf_poll, 64); 1895 adapter->q_vector[q_idx] = q_vector; 1896 } 1897 1898 return 0; 1899 1900err_out: 1901 while (q_idx) { 1902 q_idx--; 1903 q_vector = adapter->q_vector[q_idx]; 1904 netif_napi_del(&q_vector->napi); 1905 kfree(q_vector); 1906 adapter->q_vector[q_idx] = NULL; 1907 } 1908 return -ENOMEM; 1909} 1910 1911/** 1912 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 1913 * @adapter: board private structure to initialize 1914 * 1915 * This function frees the memory allocated to the q_vectors. In addition if 1916 * NAPI is enabled it will delete any references to the NAPI struct prior 1917 * to freeing the q_vector. 1918 **/ 1919static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1920{ 1921 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1922 1923 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1924 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1925 1926 adapter->q_vector[q_idx] = NULL; 1927 netif_napi_del(&q_vector->napi); 1928 kfree(q_vector); 1929 } 1930} 1931 1932/** 1933 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 1934 * @adapter: board private structure 1935 * 1936 **/ 1937static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 1938{ 1939 pci_disable_msix(adapter->pdev); 1940 kfree(adapter->msix_entries); 1941 adapter->msix_entries = NULL; 1942} 1943 1944/** 1945 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 1946 * @adapter: board private structure to initialize 1947 * 1948 **/ 1949static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 1950{ 1951 int err; 1952 1953 /* Number of supported queues */ 1954 ixgbevf_set_num_queues(adapter); 1955 1956 err = ixgbevf_set_interrupt_capability(adapter); 1957 if (err) { 1958 hw_dbg(&adapter->hw, 1959 "Unable to setup interrupt capabilities\n"); 1960 goto err_set_interrupt; 1961 } 1962 1963 err = ixgbevf_alloc_q_vectors(adapter); 1964 if (err) { 1965 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 1966 "vectors\n"); 1967 goto err_alloc_q_vectors; 1968 } 1969 1970 err = ixgbevf_alloc_queues(adapter); 1971 if (err) { 1972 pr_err("Unable to allocate memory for queues\n"); 1973 goto err_alloc_queues; 1974 } 1975 1976 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 1977 "Tx Queue count = %u\n", 1978 (adapter->num_rx_queues > 1) ? "Enabled" : 1979 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 1980 1981 set_bit(__IXGBEVF_DOWN, &adapter->state); 1982 1983 return 0; 1984err_alloc_queues: 1985 ixgbevf_free_q_vectors(adapter); 1986err_alloc_q_vectors: 1987 ixgbevf_reset_interrupt_capability(adapter); 1988err_set_interrupt: 1989 return err; 1990} 1991 1992/** 1993 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 1994 * @adapter: board private structure to clear interrupt scheme on 1995 * 1996 * We go through and clear interrupt specific resources and reset the structure 1997 * to pre-load conditions 1998 **/ 1999static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2000{ 2001 adapter->num_tx_queues = 0; 2002 adapter->num_rx_queues = 0; 2003 2004 ixgbevf_free_q_vectors(adapter); 2005 ixgbevf_reset_interrupt_capability(adapter); 2006} 2007 2008/** 2009 * ixgbevf_sw_init - Initialize general software structures 2010 * (struct ixgbevf_adapter) 2011 * @adapter: board private structure to initialize 2012 * 2013 * ixgbevf_sw_init initializes the Adapter private data structure. 2014 * Fields are initialized based on PCI device information and 2015 * OS network device settings (MTU size). 2016 **/ 2017static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2018{ 2019 struct ixgbe_hw *hw = &adapter->hw; 2020 struct pci_dev *pdev = adapter->pdev; 2021 int err; 2022 2023 /* PCI config space info */ 2024 2025 hw->vendor_id = pdev->vendor; 2026 hw->device_id = pdev->device; 2027 hw->revision_id = pdev->revision; 2028 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2029 hw->subsystem_device_id = pdev->subsystem_device; 2030 2031 hw->mbx.ops.init_params(hw); 2032 2033 /* assume legacy case in which PF would only give VF 2 queues */ 2034 hw->mac.max_tx_queues = 2; 2035 hw->mac.max_rx_queues = 2; 2036 2037 err = hw->mac.ops.reset_hw(hw); 2038 if (err) { 2039 dev_info(&pdev->dev, 2040 "PF still in reset state, assigning new address\n"); 2041 eth_hw_addr_random(adapter->netdev); 2042 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, 2043 adapter->netdev->addr_len); 2044 } else { 2045 err = hw->mac.ops.init_hw(hw); 2046 if (err) { 2047 pr_err("init_shared_code failed: %d\n", err); 2048 goto out; 2049 } 2050 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2051 adapter->netdev->addr_len); 2052 } 2053 2054 /* lock to protect mailbox accesses */ 2055 spin_lock_init(&adapter->mbx_lock); 2056 2057 /* Enable dynamic interrupt throttling rates */ 2058 adapter->rx_itr_setting = 1; 2059 adapter->tx_itr_setting = 1; 2060 2061 /* set default ring sizes */ 2062 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2063 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2064 2065 set_bit(__IXGBEVF_DOWN, &adapter->state); 2066 return 0; 2067 2068out: 2069 return err; 2070} 2071 2072#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2073 { \ 2074 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2075 if (current_counter < last_counter) \ 2076 counter += 0x100000000LL; \ 2077 last_counter = current_counter; \ 2078 counter &= 0xFFFFFFFF00000000LL; \ 2079 counter |= current_counter; \ 2080 } 2081 2082#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2083 { \ 2084 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2085 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2086 u64 current_counter = (current_counter_msb << 32) | \ 2087 current_counter_lsb; \ 2088 if (current_counter < last_counter) \ 2089 counter += 0x1000000000LL; \ 2090 last_counter = current_counter; \ 2091 counter &= 0xFFFFFFF000000000LL; \ 2092 counter |= current_counter; \ 2093 } 2094/** 2095 * ixgbevf_update_stats - Update the board statistics counters. 2096 * @adapter: board private structure 2097 **/ 2098void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2099{ 2100 struct ixgbe_hw *hw = &adapter->hw; 2101 2102 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2103 adapter->stats.vfgprc); 2104 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2105 adapter->stats.vfgptc); 2106 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2107 adapter->stats.last_vfgorc, 2108 adapter->stats.vfgorc); 2109 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2110 adapter->stats.last_vfgotc, 2111 adapter->stats.vfgotc); 2112 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2113 adapter->stats.vfmprc); 2114} 2115 2116/** 2117 * ixgbevf_watchdog - Timer Call-back 2118 * @data: pointer to adapter cast into an unsigned long 2119 **/ 2120static void ixgbevf_watchdog(unsigned long data) 2121{ 2122 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2123 struct ixgbe_hw *hw = &adapter->hw; 2124 u32 eics = 0; 2125 int i; 2126 2127 /* 2128 * Do the watchdog outside of interrupt context due to the lovely 2129 * delays that some of the newer hardware requires 2130 */ 2131 2132 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2133 goto watchdog_short_circuit; 2134 2135 /* get one bit for every active tx/rx interrupt vector */ 2136 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2137 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2138 if (qv->rx.ring || qv->tx.ring) 2139 eics |= 1 << i; 2140 } 2141 2142 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2143 2144watchdog_short_circuit: 2145 schedule_work(&adapter->watchdog_task); 2146} 2147 2148/** 2149 * ixgbevf_tx_timeout - Respond to a Tx Hang 2150 * @netdev: network interface device structure 2151 **/ 2152static void ixgbevf_tx_timeout(struct net_device *netdev) 2153{ 2154 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2155 2156 /* Do the reset outside of interrupt context */ 2157 schedule_work(&adapter->reset_task); 2158} 2159 2160static void ixgbevf_reset_task(struct work_struct *work) 2161{ 2162 struct ixgbevf_adapter *adapter; 2163 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2164 2165 /* If we're already down or resetting, just bail */ 2166 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2167 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2168 return; 2169 2170 adapter->tx_timeout_count++; 2171 2172 ixgbevf_reinit_locked(adapter); 2173} 2174 2175/** 2176 * ixgbevf_watchdog_task - worker thread to bring link up 2177 * @work: pointer to work_struct containing our data 2178 **/ 2179static void ixgbevf_watchdog_task(struct work_struct *work) 2180{ 2181 struct ixgbevf_adapter *adapter = container_of(work, 2182 struct ixgbevf_adapter, 2183 watchdog_task); 2184 struct net_device *netdev = adapter->netdev; 2185 struct ixgbe_hw *hw = &adapter->hw; 2186 u32 link_speed = adapter->link_speed; 2187 bool link_up = adapter->link_up; 2188 2189 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2190 2191 /* 2192 * Always check the link on the watchdog because we have 2193 * no LSC interrupt 2194 */ 2195 if (hw->mac.ops.check_link) { 2196 s32 need_reset; 2197 2198 spin_lock_bh(&adapter->mbx_lock); 2199 2200 need_reset = hw->mac.ops.check_link(hw, &link_speed, 2201 &link_up, false); 2202 2203 spin_unlock_bh(&adapter->mbx_lock); 2204 2205 if (need_reset) { 2206 adapter->link_up = link_up; 2207 adapter->link_speed = link_speed; 2208 netif_carrier_off(netdev); 2209 netif_tx_stop_all_queues(netdev); 2210 schedule_work(&adapter->reset_task); 2211 goto pf_has_reset; 2212 } 2213 } else { 2214 /* always assume link is up, if no check link 2215 * function */ 2216 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 2217 link_up = true; 2218 } 2219 adapter->link_up = link_up; 2220 adapter->link_speed = link_speed; 2221 2222 if (link_up) { 2223 if (!netif_carrier_ok(netdev)) { 2224 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", 2225 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2226 10 : 1); 2227 netif_carrier_on(netdev); 2228 netif_tx_wake_all_queues(netdev); 2229 } 2230 } else { 2231 adapter->link_up = false; 2232 adapter->link_speed = 0; 2233 if (netif_carrier_ok(netdev)) { 2234 hw_dbg(&adapter->hw, "NIC Link is Down\n"); 2235 netif_carrier_off(netdev); 2236 netif_tx_stop_all_queues(netdev); 2237 } 2238 } 2239 2240 ixgbevf_update_stats(adapter); 2241 2242pf_has_reset: 2243 /* Reset the timer */ 2244 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2245 mod_timer(&adapter->watchdog_timer, 2246 round_jiffies(jiffies + (2 * HZ))); 2247 2248 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2249} 2250 2251/** 2252 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2253 * @adapter: board private structure 2254 * @tx_ring: Tx descriptor ring for a specific queue 2255 * 2256 * Free all transmit software resources 2257 **/ 2258void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2259 struct ixgbevf_ring *tx_ring) 2260{ 2261 struct pci_dev *pdev = adapter->pdev; 2262 2263 ixgbevf_clean_tx_ring(adapter, tx_ring); 2264 2265 vfree(tx_ring->tx_buffer_info); 2266 tx_ring->tx_buffer_info = NULL; 2267 2268 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2269 tx_ring->dma); 2270 2271 tx_ring->desc = NULL; 2272} 2273 2274/** 2275 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2276 * @adapter: board private structure 2277 * 2278 * Free all transmit software resources 2279 **/ 2280static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2281{ 2282 int i; 2283 2284 for (i = 0; i < adapter->num_tx_queues; i++) 2285 if (adapter->tx_ring[i].desc) 2286 ixgbevf_free_tx_resources(adapter, 2287 &adapter->tx_ring[i]); 2288 2289} 2290 2291/** 2292 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2293 * @adapter: board private structure 2294 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2295 * 2296 * Return 0 on success, negative on failure 2297 **/ 2298int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2299 struct ixgbevf_ring *tx_ring) 2300{ 2301 struct pci_dev *pdev = adapter->pdev; 2302 int size; 2303 2304 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2305 tx_ring->tx_buffer_info = vzalloc(size); 2306 if (!tx_ring->tx_buffer_info) 2307 goto err; 2308 2309 /* round up to nearest 4K */ 2310 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2311 tx_ring->size = ALIGN(tx_ring->size, 4096); 2312 2313 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2314 &tx_ring->dma, GFP_KERNEL); 2315 if (!tx_ring->desc) 2316 goto err; 2317 2318 tx_ring->next_to_use = 0; 2319 tx_ring->next_to_clean = 0; 2320 return 0; 2321 2322err: 2323 vfree(tx_ring->tx_buffer_info); 2324 tx_ring->tx_buffer_info = NULL; 2325 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2326 "descriptor ring\n"); 2327 return -ENOMEM; 2328} 2329 2330/** 2331 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2332 * @adapter: board private structure 2333 * 2334 * If this function returns with an error, then it's possible one or 2335 * more of the rings is populated (while the rest are not). It is the 2336 * callers duty to clean those orphaned rings. 2337 * 2338 * Return 0 on success, negative on failure 2339 **/ 2340static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2341{ 2342 int i, err = 0; 2343 2344 for (i = 0; i < adapter->num_tx_queues; i++) { 2345 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2346 if (!err) 2347 continue; 2348 hw_dbg(&adapter->hw, 2349 "Allocation for Tx Queue %u failed\n", i); 2350 break; 2351 } 2352 2353 return err; 2354} 2355 2356/** 2357 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2358 * @adapter: board private structure 2359 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2360 * 2361 * Returns 0 on success, negative on failure 2362 **/ 2363int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2364 struct ixgbevf_ring *rx_ring) 2365{ 2366 struct pci_dev *pdev = adapter->pdev; 2367 int size; 2368 2369 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2370 rx_ring->rx_buffer_info = vzalloc(size); 2371 if (!rx_ring->rx_buffer_info) 2372 goto alloc_failed; 2373 2374 /* Round up to nearest 4K */ 2375 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2376 rx_ring->size = ALIGN(rx_ring->size, 4096); 2377 2378 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2379 &rx_ring->dma, GFP_KERNEL); 2380 2381 if (!rx_ring->desc) { 2382 hw_dbg(&adapter->hw, 2383 "Unable to allocate memory for " 2384 "the receive descriptor ring\n"); 2385 vfree(rx_ring->rx_buffer_info); 2386 rx_ring->rx_buffer_info = NULL; 2387 goto alloc_failed; 2388 } 2389 2390 rx_ring->next_to_clean = 0; 2391 rx_ring->next_to_use = 0; 2392 2393 return 0; 2394alloc_failed: 2395 return -ENOMEM; 2396} 2397 2398/** 2399 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2400 * @adapter: board private structure 2401 * 2402 * If this function returns with an error, then it's possible one or 2403 * more of the rings is populated (while the rest are not). It is the 2404 * callers duty to clean those orphaned rings. 2405 * 2406 * Return 0 on success, negative on failure 2407 **/ 2408static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2409{ 2410 int i, err = 0; 2411 2412 for (i = 0; i < adapter->num_rx_queues; i++) { 2413 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2414 if (!err) 2415 continue; 2416 hw_dbg(&adapter->hw, 2417 "Allocation for Rx Queue %u failed\n", i); 2418 break; 2419 } 2420 return err; 2421} 2422 2423/** 2424 * ixgbevf_free_rx_resources - Free Rx Resources 2425 * @adapter: board private structure 2426 * @rx_ring: ring to clean the resources from 2427 * 2428 * Free all receive software resources 2429 **/ 2430void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2431 struct ixgbevf_ring *rx_ring) 2432{ 2433 struct pci_dev *pdev = adapter->pdev; 2434 2435 ixgbevf_clean_rx_ring(adapter, rx_ring); 2436 2437 vfree(rx_ring->rx_buffer_info); 2438 rx_ring->rx_buffer_info = NULL; 2439 2440 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2441 rx_ring->dma); 2442 2443 rx_ring->desc = NULL; 2444} 2445 2446/** 2447 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2448 * @adapter: board private structure 2449 * 2450 * Free all receive software resources 2451 **/ 2452static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2453{ 2454 int i; 2455 2456 for (i = 0; i < adapter->num_rx_queues; i++) 2457 if (adapter->rx_ring[i].desc) 2458 ixgbevf_free_rx_resources(adapter, 2459 &adapter->rx_ring[i]); 2460} 2461 2462static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) 2463{ 2464 struct ixgbe_hw *hw = &adapter->hw; 2465 struct ixgbevf_ring *rx_ring; 2466 unsigned int def_q = 0; 2467 unsigned int num_tcs = 0; 2468 unsigned int num_rx_queues = 1; 2469 int err, i; 2470 2471 spin_lock_bh(&adapter->mbx_lock); 2472 2473 /* fetch queue configuration from the PF */ 2474 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2475 2476 spin_unlock_bh(&adapter->mbx_lock); 2477 2478 if (err) 2479 return err; 2480 2481 if (num_tcs > 1) { 2482 /* update default Tx ring register index */ 2483 adapter->tx_ring[0].reg_idx = def_q; 2484 2485 /* we need as many queues as traffic classes */ 2486 num_rx_queues = num_tcs; 2487 } 2488 2489 /* nothing to do if we have the correct number of queues */ 2490 if (adapter->num_rx_queues == num_rx_queues) 2491 return 0; 2492 2493 /* allocate new rings */ 2494 rx_ring = kcalloc(num_rx_queues, 2495 sizeof(struct ixgbevf_ring), GFP_KERNEL); 2496 if (!rx_ring) 2497 return -ENOMEM; 2498 2499 /* setup ring fields */ 2500 for (i = 0; i < num_rx_queues; i++) { 2501 rx_ring[i].count = adapter->rx_ring_count; 2502 rx_ring[i].queue_index = i; 2503 rx_ring[i].reg_idx = i; 2504 rx_ring[i].dev = &adapter->pdev->dev; 2505 rx_ring[i].netdev = adapter->netdev; 2506 } 2507 2508 /* free the existing ring and queues */ 2509 adapter->num_rx_queues = 0; 2510 kfree(adapter->rx_ring); 2511 2512 /* move new rings into position on the adapter struct */ 2513 adapter->rx_ring = rx_ring; 2514 adapter->num_rx_queues = num_rx_queues; 2515 2516 return 0; 2517} 2518 2519/** 2520 * ixgbevf_open - Called when a network interface is made active 2521 * @netdev: network interface device structure 2522 * 2523 * Returns 0 on success, negative value on failure 2524 * 2525 * The open entry point is called when a network interface is made 2526 * active by the system (IFF_UP). At this point all resources needed 2527 * for transmit and receive operations are allocated, the interrupt 2528 * handler is registered with the OS, the watchdog timer is started, 2529 * and the stack is notified that the interface is ready. 2530 **/ 2531static int ixgbevf_open(struct net_device *netdev) 2532{ 2533 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2534 struct ixgbe_hw *hw = &adapter->hw; 2535 int err; 2536 2537 /* disallow open during test */ 2538 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2539 return -EBUSY; 2540 2541 if (hw->adapter_stopped) { 2542 ixgbevf_reset(adapter); 2543 /* if adapter is still stopped then PF isn't up and 2544 * the vf can't start. */ 2545 if (hw->adapter_stopped) { 2546 err = IXGBE_ERR_MBX; 2547 pr_err("Unable to start - perhaps the PF Driver isn't " 2548 "up yet\n"); 2549 goto err_setup_reset; 2550 } 2551 } 2552 2553 ixgbevf_negotiate_api(adapter); 2554 2555 /* setup queue reg_idx and Rx queue count */ 2556 err = ixgbevf_setup_queues(adapter); 2557 if (err) 2558 goto err_setup_queues; 2559 2560 /* allocate transmit descriptors */ 2561 err = ixgbevf_setup_all_tx_resources(adapter); 2562 if (err) 2563 goto err_setup_tx; 2564 2565 /* allocate receive descriptors */ 2566 err = ixgbevf_setup_all_rx_resources(adapter); 2567 if (err) 2568 goto err_setup_rx; 2569 2570 ixgbevf_configure(adapter); 2571 2572 /* 2573 * Map the Tx/Rx rings to the vectors we were allotted. 2574 * if request_irq will be called in this function map_rings 2575 * must be called *before* up_complete 2576 */ 2577 ixgbevf_map_rings_to_vectors(adapter); 2578 2579 ixgbevf_up_complete(adapter); 2580 2581 /* clear any pending interrupts, may auto mask */ 2582 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2583 err = ixgbevf_request_irq(adapter); 2584 if (err) 2585 goto err_req_irq; 2586 2587 ixgbevf_irq_enable(adapter); 2588 2589 return 0; 2590 2591err_req_irq: 2592 ixgbevf_down(adapter); 2593 ixgbevf_free_irq(adapter); 2594err_setup_rx: 2595 ixgbevf_free_all_rx_resources(adapter); 2596err_setup_tx: 2597 ixgbevf_free_all_tx_resources(adapter); 2598err_setup_queues: 2599 ixgbevf_reset(adapter); 2600 2601err_setup_reset: 2602 2603 return err; 2604} 2605 2606/** 2607 * ixgbevf_close - Disables a network interface 2608 * @netdev: network interface device structure 2609 * 2610 * Returns 0, this is not allowed to fail 2611 * 2612 * The close entry point is called when an interface is de-activated 2613 * by the OS. The hardware is still under the drivers control, but 2614 * needs to be disabled. A global MAC reset is issued to stop the 2615 * hardware, and all transmit and receive resources are freed. 2616 **/ 2617static int ixgbevf_close(struct net_device *netdev) 2618{ 2619 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2620 2621 ixgbevf_down(adapter); 2622 ixgbevf_free_irq(adapter); 2623 2624 ixgbevf_free_all_tx_resources(adapter); 2625 ixgbevf_free_all_rx_resources(adapter); 2626 2627 return 0; 2628} 2629 2630static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2631 u32 vlan_macip_lens, u32 type_tucmd, 2632 u32 mss_l4len_idx) 2633{ 2634 struct ixgbe_adv_tx_context_desc *context_desc; 2635 u16 i = tx_ring->next_to_use; 2636 2637 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2638 2639 i++; 2640 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2641 2642 /* set bits to identify this as an advanced context descriptor */ 2643 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2644 2645 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2646 context_desc->seqnum_seed = 0; 2647 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2648 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2649} 2650 2651static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2652 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2653{ 2654 u32 vlan_macip_lens, type_tucmd; 2655 u32 mss_l4len_idx, l4len; 2656 2657 if (!skb_is_gso(skb)) 2658 return 0; 2659 2660 if (skb_header_cloned(skb)) { 2661 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2662 if (err) 2663 return err; 2664 } 2665 2666 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2667 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2668 2669 if (skb->protocol == htons(ETH_P_IP)) { 2670 struct iphdr *iph = ip_hdr(skb); 2671 iph->tot_len = 0; 2672 iph->check = 0; 2673 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2674 iph->daddr, 0, 2675 IPPROTO_TCP, 2676 0); 2677 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2678 } else if (skb_is_gso_v6(skb)) { 2679 ipv6_hdr(skb)->payload_len = 0; 2680 tcp_hdr(skb)->check = 2681 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2682 &ipv6_hdr(skb)->daddr, 2683 0, IPPROTO_TCP, 0); 2684 } 2685 2686 /* compute header lengths */ 2687 l4len = tcp_hdrlen(skb); 2688 *hdr_len += l4len; 2689 *hdr_len = skb_transport_offset(skb) + l4len; 2690 2691 /* mss_l4len_id: use 1 as index for TSO */ 2692 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2693 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2694 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2695 2696 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2697 vlan_macip_lens = skb_network_header_len(skb); 2698 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2699 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2700 2701 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2702 type_tucmd, mss_l4len_idx); 2703 2704 return 1; 2705} 2706 2707static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2708 struct sk_buff *skb, u32 tx_flags) 2709{ 2710 2711 2712 2713 u32 vlan_macip_lens = 0; 2714 u32 mss_l4len_idx = 0; 2715 u32 type_tucmd = 0; 2716 2717 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2718 u8 l4_hdr = 0; 2719 switch (skb->protocol) { 2720 case __constant_htons(ETH_P_IP): 2721 vlan_macip_lens |= skb_network_header_len(skb); 2722 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2723 l4_hdr = ip_hdr(skb)->protocol; 2724 break; 2725 case __constant_htons(ETH_P_IPV6): 2726 vlan_macip_lens |= skb_network_header_len(skb); 2727 l4_hdr = ipv6_hdr(skb)->nexthdr; 2728 break; 2729 default: 2730 if (unlikely(net_ratelimit())) { 2731 dev_warn(tx_ring->dev, 2732 "partial checksum but proto=%x!\n", 2733 skb->protocol); 2734 } 2735 break; 2736 } 2737 2738 switch (l4_hdr) { 2739 case IPPROTO_TCP: 2740 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2741 mss_l4len_idx = tcp_hdrlen(skb) << 2742 IXGBE_ADVTXD_L4LEN_SHIFT; 2743 break; 2744 case IPPROTO_SCTP: 2745 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2746 mss_l4len_idx = sizeof(struct sctphdr) << 2747 IXGBE_ADVTXD_L4LEN_SHIFT; 2748 break; 2749 case IPPROTO_UDP: 2750 mss_l4len_idx = sizeof(struct udphdr) << 2751 IXGBE_ADVTXD_L4LEN_SHIFT; 2752 break; 2753 default: 2754 if (unlikely(net_ratelimit())) { 2755 dev_warn(tx_ring->dev, 2756 "partial checksum but l4 proto=%x!\n", 2757 l4_hdr); 2758 } 2759 break; 2760 } 2761 } 2762 2763 /* vlan_macip_lens: MACLEN, VLAN tag */ 2764 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2765 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2766 2767 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2768 type_tucmd, mss_l4len_idx); 2769 2770 return (skb->ip_summed == CHECKSUM_PARTIAL); 2771} 2772 2773static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2774 struct sk_buff *skb, u32 tx_flags, 2775 unsigned int first) 2776{ 2777 struct ixgbevf_tx_buffer *tx_buffer_info; 2778 unsigned int len; 2779 unsigned int total = skb->len; 2780 unsigned int offset = 0, size; 2781 int count = 0; 2782 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2783 unsigned int f; 2784 int i; 2785 2786 i = tx_ring->next_to_use; 2787 2788 len = min(skb_headlen(skb), total); 2789 while (len) { 2790 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2791 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2792 2793 tx_buffer_info->length = size; 2794 tx_buffer_info->mapped_as_page = false; 2795 tx_buffer_info->dma = dma_map_single(tx_ring->dev, 2796 skb->data + offset, 2797 size, DMA_TO_DEVICE); 2798 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2799 goto dma_error; 2800 tx_buffer_info->next_to_watch = i; 2801 2802 len -= size; 2803 total -= size; 2804 offset += size; 2805 count++; 2806 i++; 2807 if (i == tx_ring->count) 2808 i = 0; 2809 } 2810 2811 for (f = 0; f < nr_frags; f++) { 2812 const struct skb_frag_struct *frag; 2813 2814 frag = &skb_shinfo(skb)->frags[f]; 2815 len = min((unsigned int)skb_frag_size(frag), total); 2816 offset = 0; 2817 2818 while (len) { 2819 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2820 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2821 2822 tx_buffer_info->length = size; 2823 tx_buffer_info->dma = 2824 skb_frag_dma_map(tx_ring->dev, frag, 2825 offset, size, DMA_TO_DEVICE); 2826 if (dma_mapping_error(tx_ring->dev, 2827 tx_buffer_info->dma)) 2828 goto dma_error; 2829 tx_buffer_info->mapped_as_page = true; 2830 tx_buffer_info->next_to_watch = i; 2831 2832 len -= size; 2833 total -= size; 2834 offset += size; 2835 count++; 2836 i++; 2837 if (i == tx_ring->count) 2838 i = 0; 2839 } 2840 if (total == 0) 2841 break; 2842 } 2843 2844 if (i == 0) 2845 i = tx_ring->count - 1; 2846 else 2847 i = i - 1; 2848 tx_ring->tx_buffer_info[i].skb = skb; 2849 tx_ring->tx_buffer_info[first].next_to_watch = i; 2850 tx_ring->tx_buffer_info[first].time_stamp = jiffies; 2851 2852 return count; 2853 2854dma_error: 2855 dev_err(tx_ring->dev, "TX DMA map failed\n"); 2856 2857 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2858 tx_buffer_info->dma = 0; 2859 tx_buffer_info->next_to_watch = 0; 2860 count--; 2861 2862 /* clear timestamp and dma mappings for remaining portion of packet */ 2863 while (count >= 0) { 2864 count--; 2865 i--; 2866 if (i < 0) 2867 i += tx_ring->count; 2868 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2869 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2870 } 2871 2872 return count; 2873} 2874 2875static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2876 int count, u32 paylen, u8 hdr_len) 2877{ 2878 union ixgbe_adv_tx_desc *tx_desc = NULL; 2879 struct ixgbevf_tx_buffer *tx_buffer_info; 2880 u32 olinfo_status = 0, cmd_type_len = 0; 2881 unsigned int i; 2882 2883 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 2884 2885 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 2886 2887 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 2888 2889 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2890 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2891 2892 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2893 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 2894 2895 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2896 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2897 2898 /* use index 1 context for tso */ 2899 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2900 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2901 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 2902 2903 } 2904 2905 /* 2906 * Check Context must be set if Tx switch is enabled, which it 2907 * always is for case where virtual functions are running 2908 */ 2909 olinfo_status |= IXGBE_ADVTXD_CC; 2910 2911 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2912 2913 i = tx_ring->next_to_use; 2914 while (count--) { 2915 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2916 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2917 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 2918 tx_desc->read.cmd_type_len = 2919 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 2920 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2921 i++; 2922 if (i == tx_ring->count) 2923 i = 0; 2924 } 2925 2926 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2927 2928 tx_ring->next_to_use = i; 2929} 2930 2931static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2932{ 2933 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 2934 2935 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 2936 /* Herbert's original patch had: 2937 * smp_mb__after_netif_stop_queue(); 2938 * but since that doesn't exist yet, just open code it. */ 2939 smp_mb(); 2940 2941 /* We need to check again in a case another CPU has just 2942 * made room available. */ 2943 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 2944 return -EBUSY; 2945 2946 /* A reprieve! - use start_queue because it doesn't call schedule */ 2947 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 2948 ++adapter->restart_queue; 2949 return 0; 2950} 2951 2952static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2953{ 2954 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 2955 return 0; 2956 return __ixgbevf_maybe_stop_tx(tx_ring, size); 2957} 2958 2959static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2960{ 2961 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2962 struct ixgbevf_ring *tx_ring; 2963 unsigned int first; 2964 unsigned int tx_flags = 0; 2965 u8 hdr_len = 0; 2966 int r_idx = 0, tso; 2967 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 2968#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2969 unsigned short f; 2970#endif 2971 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 2972 if (!dst_mac || is_link_local(dst_mac)) { 2973 dev_kfree_skb(skb); 2974 return NETDEV_TX_OK; 2975 } 2976 2977 tx_ring = &adapter->tx_ring[r_idx]; 2978 2979 /* 2980 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 2981 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 2982 * + 2 desc gap to keep tail from touching head, 2983 * + 1 desc for context descriptor, 2984 * otherwise try next time 2985 */ 2986#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2987 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 2988 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2989#else 2990 count += skb_shinfo(skb)->nr_frags; 2991#endif 2992 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 2993 adapter->tx_busy++; 2994 return NETDEV_TX_BUSY; 2995 } 2996 2997 if (vlan_tx_tag_present(skb)) { 2998 tx_flags |= vlan_tx_tag_get(skb); 2999 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3000 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3001 } 3002 3003 first = tx_ring->next_to_use; 3004 3005 if (skb->protocol == htons(ETH_P_IP)) 3006 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3007 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); 3008 if (tso < 0) { 3009 dev_kfree_skb_any(skb); 3010 return NETDEV_TX_OK; 3011 } 3012 3013 if (tso) 3014 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; 3015 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) 3016 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3017 3018 ixgbevf_tx_queue(tx_ring, tx_flags, 3019 ixgbevf_tx_map(tx_ring, skb, tx_flags, first), 3020 skb->len, hdr_len); 3021 /* 3022 * Force memory writes to complete before letting h/w 3023 * know there are new descriptors to fetch. (Only 3024 * applicable for weak-ordered memory model archs, 3025 * such as IA-64). 3026 */ 3027 wmb(); 3028 3029 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 3030 3031 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3032 3033 return NETDEV_TX_OK; 3034} 3035 3036/** 3037 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3038 * @netdev: network interface device structure 3039 * @p: pointer to an address structure 3040 * 3041 * Returns 0 on success, negative on failure 3042 **/ 3043static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3044{ 3045 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3046 struct ixgbe_hw *hw = &adapter->hw; 3047 struct sockaddr *addr = p; 3048 3049 if (!is_valid_ether_addr(addr->sa_data)) 3050 return -EADDRNOTAVAIL; 3051 3052 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3053 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3054 3055 spin_lock_bh(&adapter->mbx_lock); 3056 3057 if (hw->mac.ops.set_rar) 3058 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3059 3060 spin_unlock_bh(&adapter->mbx_lock); 3061 3062 return 0; 3063} 3064 3065/** 3066 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3067 * @netdev: network interface device structure 3068 * @new_mtu: new value for maximum frame size 3069 * 3070 * Returns 0 on success, negative on failure 3071 **/ 3072static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3073{ 3074 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3075 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3076 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3077 3078 switch (adapter->hw.api_version) { 3079 case ixgbe_mbox_api_11: 3080 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3081 break; 3082 default: 3083 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3084 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3085 break; 3086 } 3087 3088 /* MTU < 68 is an error and causes problems on some kernels */ 3089 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3090 return -EINVAL; 3091 3092 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3093 netdev->mtu, new_mtu); 3094 /* must set new MTU before calling down or up */ 3095 netdev->mtu = new_mtu; 3096 3097 if (netif_running(netdev)) 3098 ixgbevf_reinit_locked(adapter); 3099 3100 return 0; 3101} 3102 3103static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3104{ 3105 struct net_device *netdev = pci_get_drvdata(pdev); 3106 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3107#ifdef CONFIG_PM 3108 int retval = 0; 3109#endif 3110 3111 netif_device_detach(netdev); 3112 3113 if (netif_running(netdev)) { 3114 rtnl_lock(); 3115 ixgbevf_down(adapter); 3116 ixgbevf_free_irq(adapter); 3117 ixgbevf_free_all_tx_resources(adapter); 3118 ixgbevf_free_all_rx_resources(adapter); 3119 rtnl_unlock(); 3120 } 3121 3122 ixgbevf_clear_interrupt_scheme(adapter); 3123 3124#ifdef CONFIG_PM 3125 retval = pci_save_state(pdev); 3126 if (retval) 3127 return retval; 3128 3129#endif 3130 pci_disable_device(pdev); 3131 3132 return 0; 3133} 3134 3135#ifdef CONFIG_PM 3136static int ixgbevf_resume(struct pci_dev *pdev) 3137{ 3138 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); 3139 struct net_device *netdev = adapter->netdev; 3140 u32 err; 3141 3142 pci_set_power_state(pdev, PCI_D0); 3143 pci_restore_state(pdev); 3144 /* 3145 * pci_restore_state clears dev->state_saved so call 3146 * pci_save_state to restore it. 3147 */ 3148 pci_save_state(pdev); 3149 3150 err = pci_enable_device_mem(pdev); 3151 if (err) { 3152 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3153 return err; 3154 } 3155 pci_set_master(pdev); 3156 3157 rtnl_lock(); 3158 err = ixgbevf_init_interrupt_scheme(adapter); 3159 rtnl_unlock(); 3160 if (err) { 3161 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3162 return err; 3163 } 3164 3165 ixgbevf_reset(adapter); 3166 3167 if (netif_running(netdev)) { 3168 err = ixgbevf_open(netdev); 3169 if (err) 3170 return err; 3171 } 3172 3173 netif_device_attach(netdev); 3174 3175 return err; 3176} 3177 3178#endif /* CONFIG_PM */ 3179static void ixgbevf_shutdown(struct pci_dev *pdev) 3180{ 3181 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3182} 3183 3184static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3185 struct rtnl_link_stats64 *stats) 3186{ 3187 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3188 unsigned int start; 3189 u64 bytes, packets; 3190 const struct ixgbevf_ring *ring; 3191 int i; 3192 3193 ixgbevf_update_stats(adapter); 3194 3195 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3196 3197 for (i = 0; i < adapter->num_rx_queues; i++) { 3198 ring = &adapter->rx_ring[i]; 3199 do { 3200 start = u64_stats_fetch_begin_bh(&ring->syncp); 3201 bytes = ring->total_bytes; 3202 packets = ring->total_packets; 3203 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3204 stats->rx_bytes += bytes; 3205 stats->rx_packets += packets; 3206 } 3207 3208 for (i = 0; i < adapter->num_tx_queues; i++) { 3209 ring = &adapter->tx_ring[i]; 3210 do { 3211 start = u64_stats_fetch_begin_bh(&ring->syncp); 3212 bytes = ring->total_bytes; 3213 packets = ring->total_packets; 3214 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3215 stats->tx_bytes += bytes; 3216 stats->tx_packets += packets; 3217 } 3218 3219 return stats; 3220} 3221 3222static const struct net_device_ops ixgbevf_netdev_ops = { 3223 .ndo_open = ixgbevf_open, 3224 .ndo_stop = ixgbevf_close, 3225 .ndo_start_xmit = ixgbevf_xmit_frame, 3226 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3227 .ndo_get_stats64 = ixgbevf_get_stats, 3228 .ndo_validate_addr = eth_validate_addr, 3229 .ndo_set_mac_address = ixgbevf_set_mac, 3230 .ndo_change_mtu = ixgbevf_change_mtu, 3231 .ndo_tx_timeout = ixgbevf_tx_timeout, 3232 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3233 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3234}; 3235 3236static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3237{ 3238 dev->netdev_ops = &ixgbevf_netdev_ops; 3239 ixgbevf_set_ethtool_ops(dev); 3240 dev->watchdog_timeo = 5 * HZ; 3241} 3242 3243/** 3244 * ixgbevf_probe - Device Initialization Routine 3245 * @pdev: PCI device information struct 3246 * @ent: entry in ixgbevf_pci_tbl 3247 * 3248 * Returns 0 on success, negative on failure 3249 * 3250 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3251 * The OS initialization, configuring of the adapter private structure, 3252 * and a hardware reset occur. 3253 **/ 3254static int __devinit ixgbevf_probe(struct pci_dev *pdev, 3255 const struct pci_device_id *ent) 3256{ 3257 struct net_device *netdev; 3258 struct ixgbevf_adapter *adapter = NULL; 3259 struct ixgbe_hw *hw = NULL; 3260 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3261 static int cards_found; 3262 int err, pci_using_dac; 3263 3264 err = pci_enable_device(pdev); 3265 if (err) 3266 return err; 3267 3268 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3269 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3270 pci_using_dac = 1; 3271 } else { 3272 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3273 if (err) { 3274 err = dma_set_coherent_mask(&pdev->dev, 3275 DMA_BIT_MASK(32)); 3276 if (err) { 3277 dev_err(&pdev->dev, "No usable DMA " 3278 "configuration, aborting\n"); 3279 goto err_dma; 3280 } 3281 } 3282 pci_using_dac = 0; 3283 } 3284 3285 err = pci_request_regions(pdev, ixgbevf_driver_name); 3286 if (err) { 3287 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3288 goto err_pci_reg; 3289 } 3290 3291 pci_set_master(pdev); 3292 3293 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3294 MAX_TX_QUEUES); 3295 if (!netdev) { 3296 err = -ENOMEM; 3297 goto err_alloc_etherdev; 3298 } 3299 3300 SET_NETDEV_DEV(netdev, &pdev->dev); 3301 3302 pci_set_drvdata(pdev, netdev); 3303 adapter = netdev_priv(netdev); 3304 3305 adapter->netdev = netdev; 3306 adapter->pdev = pdev; 3307 hw = &adapter->hw; 3308 hw->back = adapter; 3309 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3310 3311 /* 3312 * call save state here in standalone driver because it relies on 3313 * adapter struct to exist, and needs to call netdev_priv 3314 */ 3315 pci_save_state(pdev); 3316 3317 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3318 pci_resource_len(pdev, 0)); 3319 if (!hw->hw_addr) { 3320 err = -EIO; 3321 goto err_ioremap; 3322 } 3323 3324 ixgbevf_assign_netdev_ops(netdev); 3325 3326 adapter->bd_number = cards_found; 3327 3328 /* Setup hw api */ 3329 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3330 hw->mac.type = ii->mac; 3331 3332 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3333 sizeof(struct ixgbe_mbx_operations)); 3334 3335 /* setup the private structure */ 3336 err = ixgbevf_sw_init(adapter); 3337 if (err) 3338 goto err_sw_init; 3339 3340 /* The HW MAC address was set and/or determined in sw_init */ 3341 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 3342 3343 if (!is_valid_ether_addr(netdev->dev_addr)) { 3344 pr_err("invalid MAC address\n"); 3345 err = -EIO; 3346 goto err_sw_init; 3347 } 3348 3349 netdev->hw_features = NETIF_F_SG | 3350 NETIF_F_IP_CSUM | 3351 NETIF_F_IPV6_CSUM | 3352 NETIF_F_TSO | 3353 NETIF_F_TSO6 | 3354 NETIF_F_RXCSUM; 3355 3356 netdev->features = netdev->hw_features | 3357 NETIF_F_HW_VLAN_TX | 3358 NETIF_F_HW_VLAN_RX | 3359 NETIF_F_HW_VLAN_FILTER; 3360 3361 netdev->vlan_features |= NETIF_F_TSO; 3362 netdev->vlan_features |= NETIF_F_TSO6; 3363 netdev->vlan_features |= NETIF_F_IP_CSUM; 3364 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3365 netdev->vlan_features |= NETIF_F_SG; 3366 3367 if (pci_using_dac) 3368 netdev->features |= NETIF_F_HIGHDMA; 3369 3370 netdev->priv_flags |= IFF_UNICAST_FLT; 3371 3372 init_timer(&adapter->watchdog_timer); 3373 adapter->watchdog_timer.function = ixgbevf_watchdog; 3374 adapter->watchdog_timer.data = (unsigned long)adapter; 3375 3376 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3377 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3378 3379 err = ixgbevf_init_interrupt_scheme(adapter); 3380 if (err) 3381 goto err_sw_init; 3382 3383 /* pick up the PCI bus settings for reporting later */ 3384 if (hw->mac.ops.get_bus_info) 3385 hw->mac.ops.get_bus_info(hw); 3386 3387 strcpy(netdev->name, "eth%d"); 3388 3389 err = register_netdev(netdev); 3390 if (err) 3391 goto err_register; 3392 3393 netif_carrier_off(netdev); 3394 3395 ixgbevf_init_last_counter_stats(adapter); 3396 3397 /* print the MAC address */ 3398 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3399 3400 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3401 3402 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3403 cards_found++; 3404 return 0; 3405 3406err_register: 3407 ixgbevf_clear_interrupt_scheme(adapter); 3408err_sw_init: 3409 ixgbevf_reset_interrupt_capability(adapter); 3410 iounmap(hw->hw_addr); 3411err_ioremap: 3412 free_netdev(netdev); 3413err_alloc_etherdev: 3414 pci_release_regions(pdev); 3415err_pci_reg: 3416err_dma: 3417 pci_disable_device(pdev); 3418 return err; 3419} 3420 3421/** 3422 * ixgbevf_remove - Device Removal Routine 3423 * @pdev: PCI device information struct 3424 * 3425 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3426 * that it should release a PCI device. The could be caused by a 3427 * Hot-Plug event, or because the driver is going to be removed from 3428 * memory. 3429 **/ 3430static void __devexit ixgbevf_remove(struct pci_dev *pdev) 3431{ 3432 struct net_device *netdev = pci_get_drvdata(pdev); 3433 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3434 3435 set_bit(__IXGBEVF_DOWN, &adapter->state); 3436 3437 del_timer_sync(&adapter->watchdog_timer); 3438 3439 cancel_work_sync(&adapter->reset_task); 3440 cancel_work_sync(&adapter->watchdog_task); 3441 3442 if (netdev->reg_state == NETREG_REGISTERED) 3443 unregister_netdev(netdev); 3444 3445 ixgbevf_clear_interrupt_scheme(adapter); 3446 ixgbevf_reset_interrupt_capability(adapter); 3447 3448 iounmap(adapter->hw.hw_addr); 3449 pci_release_regions(pdev); 3450 3451 hw_dbg(&adapter->hw, "Remove complete\n"); 3452 3453 kfree(adapter->tx_ring); 3454 kfree(adapter->rx_ring); 3455 3456 free_netdev(netdev); 3457 3458 pci_disable_device(pdev); 3459} 3460 3461/** 3462 * ixgbevf_io_error_detected - called when PCI error is detected 3463 * @pdev: Pointer to PCI device 3464 * @state: The current pci connection state 3465 * 3466 * This function is called after a PCI bus error affecting 3467 * this device has been detected. 3468 */ 3469static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3470 pci_channel_state_t state) 3471{ 3472 struct net_device *netdev = pci_get_drvdata(pdev); 3473 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3474 3475 netif_device_detach(netdev); 3476 3477 if (state == pci_channel_io_perm_failure) 3478 return PCI_ERS_RESULT_DISCONNECT; 3479 3480 if (netif_running(netdev)) 3481 ixgbevf_down(adapter); 3482 3483 pci_disable_device(pdev); 3484 3485 /* Request a slot slot reset. */ 3486 return PCI_ERS_RESULT_NEED_RESET; 3487} 3488 3489/** 3490 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3491 * @pdev: Pointer to PCI device 3492 * 3493 * Restart the card from scratch, as if from a cold-boot. Implementation 3494 * resembles the first-half of the ixgbevf_resume routine. 3495 */ 3496static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3497{ 3498 struct net_device *netdev = pci_get_drvdata(pdev); 3499 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3500 3501 if (pci_enable_device_mem(pdev)) { 3502 dev_err(&pdev->dev, 3503 "Cannot re-enable PCI device after reset.\n"); 3504 return PCI_ERS_RESULT_DISCONNECT; 3505 } 3506 3507 pci_set_master(pdev); 3508 3509 ixgbevf_reset(adapter); 3510 3511 return PCI_ERS_RESULT_RECOVERED; 3512} 3513 3514/** 3515 * ixgbevf_io_resume - called when traffic can start flowing again. 3516 * @pdev: Pointer to PCI device 3517 * 3518 * This callback is called when the error recovery driver tells us that 3519 * its OK to resume normal operation. Implementation resembles the 3520 * second-half of the ixgbevf_resume routine. 3521 */ 3522static void ixgbevf_io_resume(struct pci_dev *pdev) 3523{ 3524 struct net_device *netdev = pci_get_drvdata(pdev); 3525 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3526 3527 if (netif_running(netdev)) 3528 ixgbevf_up(adapter); 3529 3530 netif_device_attach(netdev); 3531} 3532 3533/* PCI Error Recovery (ERS) */ 3534static const struct pci_error_handlers ixgbevf_err_handler = { 3535 .error_detected = ixgbevf_io_error_detected, 3536 .slot_reset = ixgbevf_io_slot_reset, 3537 .resume = ixgbevf_io_resume, 3538}; 3539 3540static struct pci_driver ixgbevf_driver = { 3541 .name = ixgbevf_driver_name, 3542 .id_table = ixgbevf_pci_tbl, 3543 .probe = ixgbevf_probe, 3544 .remove = __devexit_p(ixgbevf_remove), 3545#ifdef CONFIG_PM 3546 /* Power Management Hooks */ 3547 .suspend = ixgbevf_suspend, 3548 .resume = ixgbevf_resume, 3549#endif 3550 .shutdown = ixgbevf_shutdown, 3551 .err_handler = &ixgbevf_err_handler 3552}; 3553 3554/** 3555 * ixgbevf_init_module - Driver Registration Routine 3556 * 3557 * ixgbevf_init_module is the first routine called when the driver is 3558 * loaded. All it does is register with the PCI subsystem. 3559 **/ 3560static int __init ixgbevf_init_module(void) 3561{ 3562 int ret; 3563 pr_info("%s - version %s\n", ixgbevf_driver_string, 3564 ixgbevf_driver_version); 3565 3566 pr_info("%s\n", ixgbevf_copyright); 3567 3568 ret = pci_register_driver(&ixgbevf_driver); 3569 return ret; 3570} 3571 3572module_init(ixgbevf_init_module); 3573 3574/** 3575 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3576 * 3577 * ixgbevf_exit_module is called just before the driver is removed 3578 * from memory. 3579 **/ 3580static void __exit ixgbevf_exit_module(void) 3581{ 3582 pci_unregister_driver(&ixgbevf_driver); 3583} 3584 3585#ifdef DEBUG 3586/** 3587 * ixgbevf_get_hw_dev_name - return device name string 3588 * used by hardware layer to print debugging information 3589 **/ 3590char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3591{ 3592 struct ixgbevf_adapter *adapter = hw->back; 3593 return adapter->netdev->name; 3594} 3595 3596#endif 3597module_exit(ixgbevf_exit_module); 3598 3599/* ixgbevf_main.c */ 3600