ixgbevf_main.c revision 92fe0bf7d0169a8a077e76e387d939d974ee3745
1/******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28 29/****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31******************************************************************************/ 32 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35#include <linux/types.h> 36#include <linux/bitops.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/vmalloc.h> 41#include <linux/string.h> 42#include <linux/in.h> 43#include <linux/ip.h> 44#include <linux/tcp.h> 45#include <linux/sctp.h> 46#include <linux/ipv6.h> 47#include <linux/slab.h> 48#include <net/checksum.h> 49#include <net/ip6_checksum.h> 50#include <linux/ethtool.h> 51#include <linux/if.h> 52#include <linux/if_vlan.h> 53#include <linux/prefetch.h> 54 55#include "ixgbevf.h" 56 57const char ixgbevf_driver_name[] = "ixgbevf"; 58static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61#define DRV_VERSION "2.7.12-k" 62const char ixgbevf_driver_version[] = DRV_VERSION; 63static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69}; 70 71/* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79static struct pci_device_id ixgbevf_pci_tbl[] = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 81 board_82599_vf}, 82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 83 board_X540_vf}, 84 85 /* required last entry */ 86 {0, } 87}; 88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 89 90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 92MODULE_LICENSE("GPL"); 93MODULE_VERSION(DRV_VERSION); 94 95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 96static int debug = -1; 97module_param(debug, int, 0); 98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99 100/* forward decls */ 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 103 104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 105 struct ixgbevf_ring *rx_ring, 106 u32 val) 107{ 108 /* 109 * Force memory writes to complete before letting h/w 110 * know there are new descriptors to fetch. (Only 111 * applicable for weak-ordered memory model archs, 112 * such as IA-64). 113 */ 114 wmb(); 115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 116} 117 118/** 119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 120 * @adapter: pointer to adapter struct 121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 122 * @queue: queue to map the corresponding interrupt to 123 * @msix_vector: the vector to map to the corresponding queue 124 * 125 */ 126static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 127 u8 queue, u8 msix_vector) 128{ 129 u32 ivar, index; 130 struct ixgbe_hw *hw = &adapter->hw; 131 if (direction == -1) { 132 /* other causes */ 133 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 134 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 135 ivar &= ~0xFF; 136 ivar |= msix_vector; 137 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 138 } else { 139 /* tx or rx causes */ 140 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 141 index = ((16 * (queue & 1)) + (8 * direction)); 142 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 143 ivar &= ~(0xFF << index); 144 ivar |= (msix_vector << index); 145 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 146 } 147} 148 149static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 150 struct ixgbevf_tx_buffer 151 *tx_buffer_info) 152{ 153 if (tx_buffer_info->dma) { 154 if (tx_buffer_info->mapped_as_page) 155 dma_unmap_page(tx_ring->dev, 156 tx_buffer_info->dma, 157 tx_buffer_info->length, 158 DMA_TO_DEVICE); 159 else 160 dma_unmap_single(tx_ring->dev, 161 tx_buffer_info->dma, 162 tx_buffer_info->length, 163 DMA_TO_DEVICE); 164 tx_buffer_info->dma = 0; 165 } 166 if (tx_buffer_info->skb) { 167 dev_kfree_skb_any(tx_buffer_info->skb); 168 tx_buffer_info->skb = NULL; 169 } 170 tx_buffer_info->time_stamp = 0; 171 /* tx_buffer_info must be completely set up in the transmit path */ 172} 173 174#define IXGBE_MAX_TXD_PWR 14 175#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 176 177/* Tx Descriptors needed, worst case */ 178#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 179#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 180 181static void ixgbevf_tx_timeout(struct net_device *netdev); 182 183/** 184 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 185 * @q_vector: board private structure 186 * @tx_ring: tx ring to clean 187 **/ 188static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 189 struct ixgbevf_ring *tx_ring) 190{ 191 struct ixgbevf_adapter *adapter = q_vector->adapter; 192 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 193 struct ixgbevf_tx_buffer *tx_buffer_info; 194 unsigned int i, eop, count = 0; 195 unsigned int total_bytes = 0, total_packets = 0; 196 197 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 198 return true; 199 200 i = tx_ring->next_to_clean; 201 eop = tx_ring->tx_buffer_info[i].next_to_watch; 202 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 203 204 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 205 (count < tx_ring->count)) { 206 bool cleaned = false; 207 rmb(); /* read buffer_info after eop_desc */ 208 /* eop could change between read and DD-check */ 209 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 210 goto cont_loop; 211 for ( ; !cleaned; count++) { 212 struct sk_buff *skb; 213 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 214 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 215 cleaned = (i == eop); 216 skb = tx_buffer_info->skb; 217 218 if (cleaned && skb) { 219 unsigned int segs, bytecount; 220 221 /* gso_segs is currently only valid for tcp */ 222 segs = skb_shinfo(skb)->gso_segs ?: 1; 223 /* multiply data chunks by size of headers */ 224 bytecount = ((segs - 1) * skb_headlen(skb)) + 225 skb->len; 226 total_packets += segs; 227 total_bytes += bytecount; 228 } 229 230 ixgbevf_unmap_and_free_tx_resource(tx_ring, 231 tx_buffer_info); 232 233 tx_desc->wb.status = 0; 234 235 i++; 236 if (i == tx_ring->count) 237 i = 0; 238 } 239 240cont_loop: 241 eop = tx_ring->tx_buffer_info[i].next_to_watch; 242 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 243 } 244 245 tx_ring->next_to_clean = i; 246 247#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 248 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 249 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 250 /* Make sure that anybody stopping the queue after this 251 * sees the new next_to_clean. 252 */ 253 smp_mb(); 254 if (__netif_subqueue_stopped(tx_ring->netdev, 255 tx_ring->queue_index) && 256 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 257 netif_wake_subqueue(tx_ring->netdev, 258 tx_ring->queue_index); 259 ++adapter->restart_queue; 260 } 261 } 262 263 u64_stats_update_begin(&tx_ring->syncp); 264 tx_ring->total_bytes += total_bytes; 265 tx_ring->total_packets += total_packets; 266 u64_stats_update_end(&tx_ring->syncp); 267 q_vector->tx.total_bytes += total_bytes; 268 q_vector->tx.total_packets += total_packets; 269 270 return count < tx_ring->count; 271} 272 273/** 274 * ixgbevf_receive_skb - Send a completed packet up the stack 275 * @q_vector: structure containing interrupt and ring information 276 * @skb: packet to send up 277 * @status: hardware indication of status of receive 278 * @rx_desc: rx descriptor 279 **/ 280static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 281 struct sk_buff *skb, u8 status, 282 union ixgbe_adv_rx_desc *rx_desc) 283{ 284 struct ixgbevf_adapter *adapter = q_vector->adapter; 285 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 286 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 287 288 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 289 __vlan_hwaccel_put_tag(skb, tag); 290 291 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 292 napi_gro_receive(&q_vector->napi, skb); 293 else 294 netif_rx(skb); 295} 296 297/** 298 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 299 * @adapter: address of board private structure 300 * @status_err: hardware indication of status of receive 301 * @skb: skb currently being received and modified 302 **/ 303static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 304 struct ixgbevf_ring *ring, 305 u32 status_err, struct sk_buff *skb) 306{ 307 skb_checksum_none_assert(skb); 308 309 /* Rx csum disabled */ 310 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 311 return; 312 313 /* if IP and error */ 314 if ((status_err & IXGBE_RXD_STAT_IPCS) && 315 (status_err & IXGBE_RXDADV_ERR_IPE)) { 316 adapter->hw_csum_rx_error++; 317 return; 318 } 319 320 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 321 return; 322 323 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 324 adapter->hw_csum_rx_error++; 325 return; 326 } 327 328 /* It must be a TCP or UDP packet with a valid checksum */ 329 skb->ip_summed = CHECKSUM_UNNECESSARY; 330 adapter->hw_csum_rx_good++; 331} 332 333/** 334 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 335 * @adapter: address of board private structure 336 **/ 337static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 338 struct ixgbevf_ring *rx_ring, 339 int cleaned_count) 340{ 341 struct pci_dev *pdev = adapter->pdev; 342 union ixgbe_adv_rx_desc *rx_desc; 343 struct ixgbevf_rx_buffer *bi; 344 unsigned int i = rx_ring->next_to_use; 345 346 bi = &rx_ring->rx_buffer_info[i]; 347 348 while (cleaned_count--) { 349 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 350 351 if (!bi->skb) { 352 struct sk_buff *skb; 353 354 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 355 rx_ring->rx_buf_len); 356 if (!skb) { 357 adapter->alloc_rx_buff_failed++; 358 goto no_buffers; 359 } 360 bi->skb = skb; 361 362 bi->dma = dma_map_single(&pdev->dev, skb->data, 363 rx_ring->rx_buf_len, 364 DMA_FROM_DEVICE); 365 if (dma_mapping_error(&pdev->dev, bi->dma)) { 366 dev_kfree_skb(skb); 367 bi->skb = NULL; 368 dev_err(&pdev->dev, "RX DMA map failed\n"); 369 break; 370 } 371 } 372 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 373 374 i++; 375 if (i == rx_ring->count) 376 i = 0; 377 bi = &rx_ring->rx_buffer_info[i]; 378 } 379 380no_buffers: 381 if (rx_ring->next_to_use != i) { 382 rx_ring->next_to_use = i; 383 384 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 385 } 386} 387 388static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 389 u32 qmask) 390{ 391 struct ixgbe_hw *hw = &adapter->hw; 392 393 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 394} 395 396static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 397 struct ixgbevf_ring *rx_ring, 398 int budget) 399{ 400 struct ixgbevf_adapter *adapter = q_vector->adapter; 401 struct pci_dev *pdev = adapter->pdev; 402 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 403 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 404 struct sk_buff *skb; 405 unsigned int i; 406 u32 len, staterr; 407 int cleaned_count = 0; 408 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 409 410 i = rx_ring->next_to_clean; 411 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 412 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 413 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 414 415 while (staterr & IXGBE_RXD_STAT_DD) { 416 if (!budget) 417 break; 418 budget--; 419 420 rmb(); /* read descriptor and rx_buffer_info after status DD */ 421 len = le16_to_cpu(rx_desc->wb.upper.length); 422 skb = rx_buffer_info->skb; 423 prefetch(skb->data - NET_IP_ALIGN); 424 rx_buffer_info->skb = NULL; 425 426 if (rx_buffer_info->dma) { 427 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 428 rx_ring->rx_buf_len, 429 DMA_FROM_DEVICE); 430 rx_buffer_info->dma = 0; 431 skb_put(skb, len); 432 } 433 434 i++; 435 if (i == rx_ring->count) 436 i = 0; 437 438 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 439 prefetch(next_rxd); 440 cleaned_count++; 441 442 next_buffer = &rx_ring->rx_buffer_info[i]; 443 444 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 445 skb->next = next_buffer->skb; 446 IXGBE_CB(skb->next)->prev = skb; 447 adapter->non_eop_descs++; 448 goto next_desc; 449 } 450 451 /* we should not be chaining buffers, if we did drop the skb */ 452 if (IXGBE_CB(skb)->prev) { 453 do { 454 struct sk_buff *this = skb; 455 skb = IXGBE_CB(skb)->prev; 456 dev_kfree_skb(this); 457 } while (skb); 458 goto next_desc; 459 } 460 461 /* ERR_MASK will only have valid bits if EOP set */ 462 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 463 dev_kfree_skb_irq(skb); 464 goto next_desc; 465 } 466 467 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); 468 469 /* probably a little skewed due to removing CRC */ 470 total_rx_bytes += skb->len; 471 total_rx_packets++; 472 473 /* 474 * Work around issue of some types of VM to VM loop back 475 * packets not getting split correctly 476 */ 477 if (staterr & IXGBE_RXD_STAT_LB) { 478 u32 header_fixup_len = skb_headlen(skb); 479 if (header_fixup_len < 14) 480 skb_push(skb, header_fixup_len); 481 } 482 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 483 484 /* Workaround hardware that can't do proper VEPA multicast 485 * source pruning. 486 */ 487 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 488 !(compare_ether_addr(adapter->netdev->dev_addr, 489 eth_hdr(skb)->h_source))) { 490 dev_kfree_skb_irq(skb); 491 goto next_desc; 492 } 493 494 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 495 496next_desc: 497 rx_desc->wb.upper.status_error = 0; 498 499 /* return some buffers to hardware, one at a time is too slow */ 500 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 501 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 502 cleaned_count); 503 cleaned_count = 0; 504 } 505 506 /* use prefetched values */ 507 rx_desc = next_rxd; 508 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 509 510 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 511 } 512 513 rx_ring->next_to_clean = i; 514 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 515 516 if (cleaned_count) 517 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 518 519 u64_stats_update_begin(&rx_ring->syncp); 520 rx_ring->total_packets += total_rx_packets; 521 rx_ring->total_bytes += total_rx_bytes; 522 u64_stats_update_end(&rx_ring->syncp); 523 q_vector->rx.total_packets += total_rx_packets; 524 q_vector->rx.total_bytes += total_rx_bytes; 525 526 return !!budget; 527} 528 529/** 530 * ixgbevf_poll - NAPI polling calback 531 * @napi: napi struct with our devices info in it 532 * @budget: amount of work driver is allowed to do this pass, in packets 533 * 534 * This function will clean more than one or more rings associated with a 535 * q_vector. 536 **/ 537static int ixgbevf_poll(struct napi_struct *napi, int budget) 538{ 539 struct ixgbevf_q_vector *q_vector = 540 container_of(napi, struct ixgbevf_q_vector, napi); 541 struct ixgbevf_adapter *adapter = q_vector->adapter; 542 struct ixgbevf_ring *ring; 543 int per_ring_budget; 544 bool clean_complete = true; 545 546 ixgbevf_for_each_ring(ring, q_vector->tx) 547 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 548 549 /* attempt to distribute budget to each queue fairly, but don't allow 550 * the budget to go below 1 because we'll exit polling */ 551 if (q_vector->rx.count > 1) 552 per_ring_budget = max(budget/q_vector->rx.count, 1); 553 else 554 per_ring_budget = budget; 555 556 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 557 ixgbevf_for_each_ring(ring, q_vector->rx) 558 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 559 per_ring_budget); 560 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 561 562 /* If all work not completed, return budget and keep polling */ 563 if (!clean_complete) 564 return budget; 565 /* all work done, exit the polling mode */ 566 napi_complete(napi); 567 if (adapter->rx_itr_setting & 1) 568 ixgbevf_set_itr(q_vector); 569 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 570 ixgbevf_irq_enable_queues(adapter, 571 1 << q_vector->v_idx); 572 573 return 0; 574} 575 576/** 577 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 578 * @q_vector: structure containing interrupt and ring information 579 */ 580static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 581{ 582 struct ixgbevf_adapter *adapter = q_vector->adapter; 583 struct ixgbe_hw *hw = &adapter->hw; 584 int v_idx = q_vector->v_idx; 585 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 586 587 /* 588 * set the WDIS bit to not clear the timer bits and cause an 589 * immediate assertion of the interrupt 590 */ 591 itr_reg |= IXGBE_EITR_CNT_WDIS; 592 593 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 594} 595 596/** 597 * ixgbevf_configure_msix - Configure MSI-X hardware 598 * @adapter: board private structure 599 * 600 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 601 * interrupts. 602 **/ 603static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 604{ 605 struct ixgbevf_q_vector *q_vector; 606 int q_vectors, v_idx; 607 608 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 609 adapter->eims_enable_mask = 0; 610 611 /* 612 * Populate the IVAR table and set the ITR values to the 613 * corresponding register. 614 */ 615 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 616 struct ixgbevf_ring *ring; 617 q_vector = adapter->q_vector[v_idx]; 618 619 ixgbevf_for_each_ring(ring, q_vector->rx) 620 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 621 622 ixgbevf_for_each_ring(ring, q_vector->tx) 623 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 624 625 if (q_vector->tx.ring && !q_vector->rx.ring) { 626 /* tx only vector */ 627 if (adapter->tx_itr_setting == 1) 628 q_vector->itr = IXGBE_10K_ITR; 629 else 630 q_vector->itr = adapter->tx_itr_setting; 631 } else { 632 /* rx or rx/tx vector */ 633 if (adapter->rx_itr_setting == 1) 634 q_vector->itr = IXGBE_20K_ITR; 635 else 636 q_vector->itr = adapter->rx_itr_setting; 637 } 638 639 /* add q_vector eims value to global eims_enable_mask */ 640 adapter->eims_enable_mask |= 1 << v_idx; 641 642 ixgbevf_write_eitr(q_vector); 643 } 644 645 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 646 /* setup eims_other and add value to global eims_enable_mask */ 647 adapter->eims_other = 1 << v_idx; 648 adapter->eims_enable_mask |= adapter->eims_other; 649} 650 651enum latency_range { 652 lowest_latency = 0, 653 low_latency = 1, 654 bulk_latency = 2, 655 latency_invalid = 255 656}; 657 658/** 659 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 660 * @q_vector: structure containing interrupt and ring information 661 * @ring_container: structure containing ring performance data 662 * 663 * Stores a new ITR value based on packets and byte 664 * counts during the last interrupt. The advantage of per interrupt 665 * computation is faster updates and more accurate ITR for the current 666 * traffic pattern. Constants in this function were computed 667 * based on theoretical maximum wire speed and thresholds were set based 668 * on testing data as well as attempting to minimize response time 669 * while increasing bulk throughput. 670 **/ 671static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 672 struct ixgbevf_ring_container *ring_container) 673{ 674 int bytes = ring_container->total_bytes; 675 int packets = ring_container->total_packets; 676 u32 timepassed_us; 677 u64 bytes_perint; 678 u8 itr_setting = ring_container->itr; 679 680 if (packets == 0) 681 return; 682 683 /* simple throttlerate management 684 * 0-20MB/s lowest (100000 ints/s) 685 * 20-100MB/s low (20000 ints/s) 686 * 100-1249MB/s bulk (8000 ints/s) 687 */ 688 /* what was last interrupt timeslice? */ 689 timepassed_us = q_vector->itr >> 2; 690 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 691 692 switch (itr_setting) { 693 case lowest_latency: 694 if (bytes_perint > 10) 695 itr_setting = low_latency; 696 break; 697 case low_latency: 698 if (bytes_perint > 20) 699 itr_setting = bulk_latency; 700 else if (bytes_perint <= 10) 701 itr_setting = lowest_latency; 702 break; 703 case bulk_latency: 704 if (bytes_perint <= 20) 705 itr_setting = low_latency; 706 break; 707 } 708 709 /* clear work counters since we have the values we need */ 710 ring_container->total_bytes = 0; 711 ring_container->total_packets = 0; 712 713 /* write updated itr to ring container */ 714 ring_container->itr = itr_setting; 715} 716 717static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 718{ 719 u32 new_itr = q_vector->itr; 720 u8 current_itr; 721 722 ixgbevf_update_itr(q_vector, &q_vector->tx); 723 ixgbevf_update_itr(q_vector, &q_vector->rx); 724 725 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 726 727 switch (current_itr) { 728 /* counts and packets in update_itr are dependent on these numbers */ 729 case lowest_latency: 730 new_itr = IXGBE_100K_ITR; 731 break; 732 case low_latency: 733 new_itr = IXGBE_20K_ITR; 734 break; 735 case bulk_latency: 736 default: 737 new_itr = IXGBE_8K_ITR; 738 break; 739 } 740 741 if (new_itr != q_vector->itr) { 742 /* do an exponential smoothing */ 743 new_itr = (10 * new_itr * q_vector->itr) / 744 ((9 * new_itr) + q_vector->itr); 745 746 /* save the algorithm value here */ 747 q_vector->itr = new_itr; 748 749 ixgbevf_write_eitr(q_vector); 750 } 751} 752 753static irqreturn_t ixgbevf_msix_other(int irq, void *data) 754{ 755 struct ixgbevf_adapter *adapter = data; 756 struct ixgbe_hw *hw = &adapter->hw; 757 758 hw->mac.get_link_status = 1; 759 760 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 761 mod_timer(&adapter->watchdog_timer, jiffies); 762 763 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 764 765 return IRQ_HANDLED; 766} 767 768 769/** 770 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 771 * @irq: unused 772 * @data: pointer to our q_vector struct for this interrupt vector 773 **/ 774static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 775{ 776 struct ixgbevf_q_vector *q_vector = data; 777 778 /* EIAM disabled interrupts (on this vector) for us */ 779 if (q_vector->rx.ring || q_vector->tx.ring) 780 napi_schedule(&q_vector->napi); 781 782 return IRQ_HANDLED; 783} 784 785static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 786 int r_idx) 787{ 788 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 789 790 a->rx_ring[r_idx].next = q_vector->rx.ring; 791 q_vector->rx.ring = &a->rx_ring[r_idx]; 792 q_vector->rx.count++; 793} 794 795static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 796 int t_idx) 797{ 798 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 799 800 a->tx_ring[t_idx].next = q_vector->tx.ring; 801 q_vector->tx.ring = &a->tx_ring[t_idx]; 802 q_vector->tx.count++; 803} 804 805/** 806 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 807 * @adapter: board private structure to initialize 808 * 809 * This function maps descriptor rings to the queue-specific vectors 810 * we were allotted through the MSI-X enabling code. Ideally, we'd have 811 * one vector per ring/queue, but on a constrained vector budget, we 812 * group the rings as "efficiently" as possible. You would add new 813 * mapping configurations in here. 814 **/ 815static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 816{ 817 int q_vectors; 818 int v_start = 0; 819 int rxr_idx = 0, txr_idx = 0; 820 int rxr_remaining = adapter->num_rx_queues; 821 int txr_remaining = adapter->num_tx_queues; 822 int i, j; 823 int rqpv, tqpv; 824 int err = 0; 825 826 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 827 828 /* 829 * The ideal configuration... 830 * We have enough vectors to map one per queue. 831 */ 832 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 833 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 834 map_vector_to_rxq(adapter, v_start, rxr_idx); 835 836 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 837 map_vector_to_txq(adapter, v_start, txr_idx); 838 goto out; 839 } 840 841 /* 842 * If we don't have enough vectors for a 1-to-1 843 * mapping, we'll have to group them so there are 844 * multiple queues per vector. 845 */ 846 /* Re-adjusting *qpv takes care of the remainder. */ 847 for (i = v_start; i < q_vectors; i++) { 848 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 849 for (j = 0; j < rqpv; j++) { 850 map_vector_to_rxq(adapter, i, rxr_idx); 851 rxr_idx++; 852 rxr_remaining--; 853 } 854 } 855 for (i = v_start; i < q_vectors; i++) { 856 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 857 for (j = 0; j < tqpv; j++) { 858 map_vector_to_txq(adapter, i, txr_idx); 859 txr_idx++; 860 txr_remaining--; 861 } 862 } 863 864out: 865 return err; 866} 867 868/** 869 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 870 * @adapter: board private structure 871 * 872 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 873 * interrupts from the kernel. 874 **/ 875static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 876{ 877 struct net_device *netdev = adapter->netdev; 878 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 879 int vector, err; 880 int ri = 0, ti = 0; 881 882 for (vector = 0; vector < q_vectors; vector++) { 883 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 884 struct msix_entry *entry = &adapter->msix_entries[vector]; 885 886 if (q_vector->tx.ring && q_vector->rx.ring) { 887 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 888 "%s-%s-%d", netdev->name, "TxRx", ri++); 889 ti++; 890 } else if (q_vector->rx.ring) { 891 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 892 "%s-%s-%d", netdev->name, "rx", ri++); 893 } else if (q_vector->tx.ring) { 894 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 895 "%s-%s-%d", netdev->name, "tx", ti++); 896 } else { 897 /* skip this unused q_vector */ 898 continue; 899 } 900 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 901 q_vector->name, q_vector); 902 if (err) { 903 hw_dbg(&adapter->hw, 904 "request_irq failed for MSIX interrupt " 905 "Error: %d\n", err); 906 goto free_queue_irqs; 907 } 908 } 909 910 err = request_irq(adapter->msix_entries[vector].vector, 911 &ixgbevf_msix_other, 0, netdev->name, adapter); 912 if (err) { 913 hw_dbg(&adapter->hw, 914 "request_irq for msix_other failed: %d\n", err); 915 goto free_queue_irqs; 916 } 917 918 return 0; 919 920free_queue_irqs: 921 while (vector) { 922 vector--; 923 free_irq(adapter->msix_entries[vector].vector, 924 adapter->q_vector[vector]); 925 } 926 pci_disable_msix(adapter->pdev); 927 kfree(adapter->msix_entries); 928 adapter->msix_entries = NULL; 929 return err; 930} 931 932static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 933{ 934 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 935 936 for (i = 0; i < q_vectors; i++) { 937 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 938 q_vector->rx.ring = NULL; 939 q_vector->tx.ring = NULL; 940 q_vector->rx.count = 0; 941 q_vector->tx.count = 0; 942 } 943} 944 945/** 946 * ixgbevf_request_irq - initialize interrupts 947 * @adapter: board private structure 948 * 949 * Attempts to configure interrupts using the best available 950 * capabilities of the hardware and kernel. 951 **/ 952static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 953{ 954 int err = 0; 955 956 err = ixgbevf_request_msix_irqs(adapter); 957 958 if (err) 959 hw_dbg(&adapter->hw, 960 "request_irq failed, Error %d\n", err); 961 962 return err; 963} 964 965static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 966{ 967 int i, q_vectors; 968 969 q_vectors = adapter->num_msix_vectors; 970 i = q_vectors - 1; 971 972 free_irq(adapter->msix_entries[i].vector, adapter); 973 i--; 974 975 for (; i >= 0; i--) { 976 /* free only the irqs that were actually requested */ 977 if (!adapter->q_vector[i]->rx.ring && 978 !adapter->q_vector[i]->tx.ring) 979 continue; 980 981 free_irq(adapter->msix_entries[i].vector, 982 adapter->q_vector[i]); 983 } 984 985 ixgbevf_reset_q_vectors(adapter); 986} 987 988/** 989 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 990 * @adapter: board private structure 991 **/ 992static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 993{ 994 struct ixgbe_hw *hw = &adapter->hw; 995 int i; 996 997 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 998 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 999 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1000 1001 IXGBE_WRITE_FLUSH(hw); 1002 1003 for (i = 0; i < adapter->num_msix_vectors; i++) 1004 synchronize_irq(adapter->msix_entries[i].vector); 1005} 1006 1007/** 1008 * ixgbevf_irq_enable - Enable default interrupt generation settings 1009 * @adapter: board private structure 1010 **/ 1011static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1012{ 1013 struct ixgbe_hw *hw = &adapter->hw; 1014 1015 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1016 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1017 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1018} 1019 1020/** 1021 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1022 * @adapter: board private structure 1023 * 1024 * Configure the Tx unit of the MAC after a reset. 1025 **/ 1026static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1027{ 1028 u64 tdba; 1029 struct ixgbe_hw *hw = &adapter->hw; 1030 u32 i, j, tdlen, txctrl; 1031 1032 /* Setup the HW Tx Head and Tail descriptor pointers */ 1033 for (i = 0; i < adapter->num_tx_queues; i++) { 1034 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1035 j = ring->reg_idx; 1036 tdba = ring->dma; 1037 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1038 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1039 (tdba & DMA_BIT_MASK(32))); 1040 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1041 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1042 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1043 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1044 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1045 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1046 /* Disable Tx Head Writeback RO bit, since this hoses 1047 * bookkeeping if things aren't delivered in order. 1048 */ 1049 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1050 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1051 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1052 } 1053} 1054 1055#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1056 1057static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1058{ 1059 struct ixgbevf_ring *rx_ring; 1060 struct ixgbe_hw *hw = &adapter->hw; 1061 u32 srrctl; 1062 1063 rx_ring = &adapter->rx_ring[index]; 1064 1065 srrctl = IXGBE_SRRCTL_DROP_EN; 1066 1067 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1068 1069 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1070 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1071 1072 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1073} 1074 1075static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1076{ 1077 struct ixgbe_hw *hw = &adapter->hw; 1078 struct net_device *netdev = adapter->netdev; 1079 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1080 int i; 1081 u16 rx_buf_len; 1082 1083 /* notify the PF of our intent to use this size of frame */ 1084 ixgbevf_rlpml_set_vf(hw, max_frame); 1085 1086 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1087 max_frame += VLAN_HLEN; 1088 1089 /* 1090 * Allocate buffer sizes that fit well into 32K and 1091 * take into account max frame size of 9.5K 1092 */ 1093 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1094 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1095 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1096 else if (max_frame <= IXGBEVF_RXBUFFER_2K) 1097 rx_buf_len = IXGBEVF_RXBUFFER_2K; 1098 else if (max_frame <= IXGBEVF_RXBUFFER_4K) 1099 rx_buf_len = IXGBEVF_RXBUFFER_4K; 1100 else if (max_frame <= IXGBEVF_RXBUFFER_8K) 1101 rx_buf_len = IXGBEVF_RXBUFFER_8K; 1102 else 1103 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1104 1105 for (i = 0; i < adapter->num_rx_queues; i++) 1106 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1107} 1108 1109/** 1110 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1111 * @adapter: board private structure 1112 * 1113 * Configure the Rx unit of the MAC after a reset. 1114 **/ 1115static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1116{ 1117 u64 rdba; 1118 struct ixgbe_hw *hw = &adapter->hw; 1119 int i, j; 1120 u32 rdlen; 1121 1122 /* PSRTYPE must be initialized in 82599 */ 1123 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1124 1125 /* set_rx_buffer_len must be called before ring initialization */ 1126 ixgbevf_set_rx_buffer_len(adapter); 1127 1128 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1129 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1130 * the Base and Length of the Rx Descriptor Ring */ 1131 for (i = 0; i < adapter->num_rx_queues; i++) { 1132 rdba = adapter->rx_ring[i].dma; 1133 j = adapter->rx_ring[i].reg_idx; 1134 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1135 (rdba & DMA_BIT_MASK(32))); 1136 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1137 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1138 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1139 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1140 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1141 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1142 1143 ixgbevf_configure_srrctl(adapter, j); 1144 } 1145} 1146 1147static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1148{ 1149 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1150 struct ixgbe_hw *hw = &adapter->hw; 1151 int err; 1152 1153 spin_lock_bh(&adapter->mbx_lock); 1154 1155 /* add VID to filter table */ 1156 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1157 1158 spin_unlock_bh(&adapter->mbx_lock); 1159 1160 /* translate error return types so error makes sense */ 1161 if (err == IXGBE_ERR_MBX) 1162 return -EIO; 1163 1164 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1165 return -EACCES; 1166 1167 set_bit(vid, adapter->active_vlans); 1168 1169 return err; 1170} 1171 1172static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1173{ 1174 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1175 struct ixgbe_hw *hw = &adapter->hw; 1176 int err = -EOPNOTSUPP; 1177 1178 spin_lock_bh(&adapter->mbx_lock); 1179 1180 /* remove VID from filter table */ 1181 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1182 1183 spin_unlock_bh(&adapter->mbx_lock); 1184 1185 clear_bit(vid, adapter->active_vlans); 1186 1187 return err; 1188} 1189 1190static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1191{ 1192 u16 vid; 1193 1194 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1195 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1196} 1197 1198static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1199{ 1200 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1201 struct ixgbe_hw *hw = &adapter->hw; 1202 int count = 0; 1203 1204 if ((netdev_uc_count(netdev)) > 10) { 1205 pr_err("Too many unicast filters - No Space\n"); 1206 return -ENOSPC; 1207 } 1208 1209 if (!netdev_uc_empty(netdev)) { 1210 struct netdev_hw_addr *ha; 1211 netdev_for_each_uc_addr(ha, netdev) { 1212 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1213 udelay(200); 1214 } 1215 } else { 1216 /* 1217 * If the list is empty then send message to PF driver to 1218 * clear all macvlans on this VF. 1219 */ 1220 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1221 } 1222 1223 return count; 1224} 1225 1226/** 1227 * ixgbevf_set_rx_mode - Multicast set 1228 * @netdev: network interface device structure 1229 * 1230 * The set_rx_method entry point is called whenever the multicast address 1231 * list or the network interface flags are updated. This routine is 1232 * responsible for configuring the hardware for proper multicast mode. 1233 **/ 1234static void ixgbevf_set_rx_mode(struct net_device *netdev) 1235{ 1236 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1237 struct ixgbe_hw *hw = &adapter->hw; 1238 1239 spin_lock_bh(&adapter->mbx_lock); 1240 1241 /* reprogram multicast list */ 1242 hw->mac.ops.update_mc_addr_list(hw, netdev); 1243 1244 ixgbevf_write_uc_addr_list(netdev); 1245 1246 spin_unlock_bh(&adapter->mbx_lock); 1247} 1248 1249static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1250{ 1251 int q_idx; 1252 struct ixgbevf_q_vector *q_vector; 1253 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1254 1255 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1256 q_vector = adapter->q_vector[q_idx]; 1257 napi_enable(&q_vector->napi); 1258 } 1259} 1260 1261static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1262{ 1263 int q_idx; 1264 struct ixgbevf_q_vector *q_vector; 1265 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1266 1267 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1268 q_vector = adapter->q_vector[q_idx]; 1269 napi_disable(&q_vector->napi); 1270 } 1271} 1272 1273static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1274{ 1275 struct net_device *netdev = adapter->netdev; 1276 int i; 1277 1278 ixgbevf_set_rx_mode(netdev); 1279 1280 ixgbevf_restore_vlan(adapter); 1281 1282 ixgbevf_configure_tx(adapter); 1283 ixgbevf_configure_rx(adapter); 1284 for (i = 0; i < adapter->num_rx_queues; i++) { 1285 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1286 ixgbevf_alloc_rx_buffers(adapter, ring, 1287 IXGBE_DESC_UNUSED(ring)); 1288 } 1289} 1290 1291#define IXGBE_MAX_RX_DESC_POLL 10 1292static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1293 int rxr) 1294{ 1295 struct ixgbe_hw *hw = &adapter->hw; 1296 int j = adapter->rx_ring[rxr].reg_idx; 1297 int k; 1298 1299 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1300 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1301 break; 1302 else 1303 msleep(1); 1304 } 1305 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1306 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1307 "not set within the polling period\n", rxr); 1308 } 1309 1310 ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], 1311 adapter->rx_ring[rxr].count - 1); 1312} 1313 1314static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1315{ 1316 /* Only save pre-reset stats if there are some */ 1317 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1318 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1319 adapter->stats.base_vfgprc; 1320 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1321 adapter->stats.base_vfgptc; 1322 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1323 adapter->stats.base_vfgorc; 1324 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1325 adapter->stats.base_vfgotc; 1326 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1327 adapter->stats.base_vfmprc; 1328 } 1329} 1330 1331static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1332{ 1333 struct ixgbe_hw *hw = &adapter->hw; 1334 1335 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1336 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1337 adapter->stats.last_vfgorc |= 1338 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1339 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1340 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1341 adapter->stats.last_vfgotc |= 1342 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1343 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1344 1345 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1346 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1347 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1348 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1349 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1350} 1351 1352static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1353{ 1354 struct ixgbe_hw *hw = &adapter->hw; 1355 int api[] = { ixgbe_mbox_api_11, 1356 ixgbe_mbox_api_10, 1357 ixgbe_mbox_api_unknown }; 1358 int err = 0, idx = 0; 1359 1360 spin_lock_bh(&adapter->mbx_lock); 1361 1362 while (api[idx] != ixgbe_mbox_api_unknown) { 1363 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1364 if (!err) 1365 break; 1366 idx++; 1367 } 1368 1369 spin_unlock_bh(&adapter->mbx_lock); 1370} 1371 1372static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1373{ 1374 struct net_device *netdev = adapter->netdev; 1375 struct ixgbe_hw *hw = &adapter->hw; 1376 int i, j = 0; 1377 int num_rx_rings = adapter->num_rx_queues; 1378 u32 txdctl, rxdctl; 1379 1380 for (i = 0; i < adapter->num_tx_queues; i++) { 1381 j = adapter->tx_ring[i].reg_idx; 1382 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1383 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1384 txdctl |= (8 << 16); 1385 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1386 } 1387 1388 for (i = 0; i < adapter->num_tx_queues; i++) { 1389 j = adapter->tx_ring[i].reg_idx; 1390 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1391 txdctl |= IXGBE_TXDCTL_ENABLE; 1392 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1393 } 1394 1395 for (i = 0; i < num_rx_rings; i++) { 1396 j = adapter->rx_ring[i].reg_idx; 1397 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1398 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1399 if (hw->mac.type == ixgbe_mac_X540_vf) { 1400 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1401 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1402 IXGBE_RXDCTL_RLPML_EN); 1403 } 1404 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1405 ixgbevf_rx_desc_queue_enable(adapter, i); 1406 } 1407 1408 ixgbevf_configure_msix(adapter); 1409 1410 spin_lock_bh(&adapter->mbx_lock); 1411 1412 if (is_valid_ether_addr(hw->mac.addr)) 1413 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1414 else 1415 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1416 1417 spin_unlock_bh(&adapter->mbx_lock); 1418 1419 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1420 ixgbevf_napi_enable_all(adapter); 1421 1422 /* enable transmits */ 1423 netif_tx_start_all_queues(netdev); 1424 1425 ixgbevf_save_reset_stats(adapter); 1426 ixgbevf_init_last_counter_stats(adapter); 1427 1428 hw->mac.get_link_status = 1; 1429 mod_timer(&adapter->watchdog_timer, jiffies); 1430} 1431 1432static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) 1433{ 1434 struct ixgbe_hw *hw = &adapter->hw; 1435 struct ixgbevf_ring *rx_ring; 1436 unsigned int def_q = 0; 1437 unsigned int num_tcs = 0; 1438 unsigned int num_rx_queues = 1; 1439 int err, i; 1440 1441 spin_lock_bh(&adapter->mbx_lock); 1442 1443 /* fetch queue configuration from the PF */ 1444 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1445 1446 spin_unlock_bh(&adapter->mbx_lock); 1447 1448 if (err) 1449 return err; 1450 1451 if (num_tcs > 1) { 1452 /* update default Tx ring register index */ 1453 adapter->tx_ring[0].reg_idx = def_q; 1454 1455 /* we need as many queues as traffic classes */ 1456 num_rx_queues = num_tcs; 1457 } 1458 1459 /* nothing to do if we have the correct number of queues */ 1460 if (adapter->num_rx_queues == num_rx_queues) 1461 return 0; 1462 1463 /* allocate new rings */ 1464 rx_ring = kcalloc(num_rx_queues, 1465 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1466 if (!rx_ring) 1467 return -ENOMEM; 1468 1469 /* setup ring fields */ 1470 for (i = 0; i < num_rx_queues; i++) { 1471 rx_ring[i].count = adapter->rx_ring_count; 1472 rx_ring[i].queue_index = i; 1473 rx_ring[i].reg_idx = i; 1474 rx_ring[i].dev = &adapter->pdev->dev; 1475 rx_ring[i].netdev = adapter->netdev; 1476 1477 /* allocate resources on the ring */ 1478 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 1479 if (err) { 1480 while (i) { 1481 i--; 1482 ixgbevf_free_rx_resources(adapter, &rx_ring[i]); 1483 } 1484 kfree(rx_ring); 1485 return err; 1486 } 1487 } 1488 1489 /* free the existing rings and queues */ 1490 ixgbevf_free_all_rx_resources(adapter); 1491 adapter->num_rx_queues = 0; 1492 kfree(adapter->rx_ring); 1493 1494 /* move new rings into position on the adapter struct */ 1495 adapter->rx_ring = rx_ring; 1496 adapter->num_rx_queues = num_rx_queues; 1497 1498 /* reset ring to vector mapping */ 1499 ixgbevf_reset_q_vectors(adapter); 1500 ixgbevf_map_rings_to_vectors(adapter); 1501 1502 return 0; 1503} 1504 1505void ixgbevf_up(struct ixgbevf_adapter *adapter) 1506{ 1507 struct ixgbe_hw *hw = &adapter->hw; 1508 1509 ixgbevf_negotiate_api(adapter); 1510 1511 ixgbevf_reset_queues(adapter); 1512 1513 ixgbevf_configure(adapter); 1514 1515 ixgbevf_up_complete(adapter); 1516 1517 /* clear any pending interrupts, may auto mask */ 1518 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1519 1520 ixgbevf_irq_enable(adapter); 1521} 1522 1523/** 1524 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1525 * @adapter: board private structure 1526 * @rx_ring: ring to free buffers from 1527 **/ 1528static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1529 struct ixgbevf_ring *rx_ring) 1530{ 1531 struct pci_dev *pdev = adapter->pdev; 1532 unsigned long size; 1533 unsigned int i; 1534 1535 if (!rx_ring->rx_buffer_info) 1536 return; 1537 1538 /* Free all the Rx ring sk_buffs */ 1539 for (i = 0; i < rx_ring->count; i++) { 1540 struct ixgbevf_rx_buffer *rx_buffer_info; 1541 1542 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1543 if (rx_buffer_info->dma) { 1544 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1545 rx_ring->rx_buf_len, 1546 DMA_FROM_DEVICE); 1547 rx_buffer_info->dma = 0; 1548 } 1549 if (rx_buffer_info->skb) { 1550 struct sk_buff *skb = rx_buffer_info->skb; 1551 rx_buffer_info->skb = NULL; 1552 do { 1553 struct sk_buff *this = skb; 1554 skb = IXGBE_CB(skb)->prev; 1555 dev_kfree_skb(this); 1556 } while (skb); 1557 } 1558 } 1559 1560 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1561 memset(rx_ring->rx_buffer_info, 0, size); 1562 1563 /* Zero out the descriptor ring */ 1564 memset(rx_ring->desc, 0, rx_ring->size); 1565 1566 rx_ring->next_to_clean = 0; 1567 rx_ring->next_to_use = 0; 1568 1569 if (rx_ring->head) 1570 writel(0, adapter->hw.hw_addr + rx_ring->head); 1571 if (rx_ring->tail) 1572 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1573} 1574 1575/** 1576 * ixgbevf_clean_tx_ring - Free Tx Buffers 1577 * @adapter: board private structure 1578 * @tx_ring: ring to be cleaned 1579 **/ 1580static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1581 struct ixgbevf_ring *tx_ring) 1582{ 1583 struct ixgbevf_tx_buffer *tx_buffer_info; 1584 unsigned long size; 1585 unsigned int i; 1586 1587 if (!tx_ring->tx_buffer_info) 1588 return; 1589 1590 /* Free all the Tx ring sk_buffs */ 1591 1592 for (i = 0; i < tx_ring->count; i++) { 1593 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1594 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1595 } 1596 1597 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1598 memset(tx_ring->tx_buffer_info, 0, size); 1599 1600 memset(tx_ring->desc, 0, tx_ring->size); 1601 1602 tx_ring->next_to_use = 0; 1603 tx_ring->next_to_clean = 0; 1604 1605 if (tx_ring->head) 1606 writel(0, adapter->hw.hw_addr + tx_ring->head); 1607 if (tx_ring->tail) 1608 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1609} 1610 1611/** 1612 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1613 * @adapter: board private structure 1614 **/ 1615static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1616{ 1617 int i; 1618 1619 for (i = 0; i < adapter->num_rx_queues; i++) 1620 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1621} 1622 1623/** 1624 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1625 * @adapter: board private structure 1626 **/ 1627static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1628{ 1629 int i; 1630 1631 for (i = 0; i < adapter->num_tx_queues; i++) 1632 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1633} 1634 1635void ixgbevf_down(struct ixgbevf_adapter *adapter) 1636{ 1637 struct net_device *netdev = adapter->netdev; 1638 struct ixgbe_hw *hw = &adapter->hw; 1639 u32 txdctl; 1640 int i, j; 1641 1642 /* signal that we are down to the interrupt handler */ 1643 set_bit(__IXGBEVF_DOWN, &adapter->state); 1644 /* disable receives */ 1645 1646 netif_tx_disable(netdev); 1647 1648 msleep(10); 1649 1650 netif_tx_stop_all_queues(netdev); 1651 1652 ixgbevf_irq_disable(adapter); 1653 1654 ixgbevf_napi_disable_all(adapter); 1655 1656 del_timer_sync(&adapter->watchdog_timer); 1657 /* can't call flush scheduled work here because it can deadlock 1658 * if linkwatch_event tries to acquire the rtnl_lock which we are 1659 * holding */ 1660 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1661 msleep(1); 1662 1663 /* disable transmits in the hardware now that interrupts are off */ 1664 for (i = 0; i < adapter->num_tx_queues; i++) { 1665 j = adapter->tx_ring[i].reg_idx; 1666 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1667 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1668 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1669 } 1670 1671 netif_carrier_off(netdev); 1672 1673 if (!pci_channel_offline(adapter->pdev)) 1674 ixgbevf_reset(adapter); 1675 1676 ixgbevf_clean_all_tx_rings(adapter); 1677 ixgbevf_clean_all_rx_rings(adapter); 1678} 1679 1680void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1681{ 1682 WARN_ON(in_interrupt()); 1683 1684 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1685 msleep(1); 1686 1687 /* 1688 * Check if PF is up before re-init. If not then skip until 1689 * later when the PF is up and ready to service requests from 1690 * the VF via mailbox. If the VF is up and running then the 1691 * watchdog task will continue to schedule reset tasks until 1692 * the PF is up and running. 1693 */ 1694 ixgbevf_down(adapter); 1695 ixgbevf_up(adapter); 1696 1697 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1698} 1699 1700void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1701{ 1702 struct ixgbe_hw *hw = &adapter->hw; 1703 struct net_device *netdev = adapter->netdev; 1704 1705 spin_lock_bh(&adapter->mbx_lock); 1706 1707 if (hw->mac.ops.reset_hw(hw)) 1708 hw_dbg(hw, "PF still resetting\n"); 1709 else 1710 hw->mac.ops.init_hw(hw); 1711 1712 spin_unlock_bh(&adapter->mbx_lock); 1713 1714 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1715 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1716 netdev->addr_len); 1717 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1718 netdev->addr_len); 1719 } 1720} 1721 1722static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1723 int vectors) 1724{ 1725 int err = 0; 1726 int vector_threshold; 1727 1728 /* We'll want at least 2 (vector_threshold): 1729 * 1) TxQ[0] + RxQ[0] handler 1730 * 2) Other (Link Status Change, etc.) 1731 */ 1732 vector_threshold = MIN_MSIX_COUNT; 1733 1734 /* The more we get, the more we will assign to Tx/Rx Cleanup 1735 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1736 * Right now, we simply care about how many we'll get; we'll 1737 * set them up later while requesting irq's. 1738 */ 1739 while (vectors >= vector_threshold) { 1740 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1741 vectors); 1742 if (!err || err < 0) /* Success or a nasty failure. */ 1743 break; 1744 else /* err == number of vectors we should try again with */ 1745 vectors = err; 1746 } 1747 1748 if (vectors < vector_threshold) 1749 err = -ENOMEM; 1750 1751 if (err) { 1752 dev_err(&adapter->pdev->dev, 1753 "Unable to allocate MSI-X interrupts\n"); 1754 kfree(adapter->msix_entries); 1755 adapter->msix_entries = NULL; 1756 } else { 1757 /* 1758 * Adjust for only the vectors we'll use, which is minimum 1759 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1760 * vectors we were allocated. 1761 */ 1762 adapter->num_msix_vectors = vectors; 1763 } 1764 return err; 1765} 1766 1767/** 1768 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1769 * @adapter: board private structure to initialize 1770 * 1771 * This is the top level queue allocation routine. The order here is very 1772 * important, starting with the "most" number of features turned on at once, 1773 * and ending with the smallest set of features. This way large combinations 1774 * can be allocated if they're turned on, and smaller combinations are the 1775 * fallthrough conditions. 1776 * 1777 **/ 1778static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1779{ 1780 /* Start with base case */ 1781 adapter->num_rx_queues = 1; 1782 adapter->num_tx_queues = 1; 1783} 1784 1785/** 1786 * ixgbevf_alloc_queues - Allocate memory for all rings 1787 * @adapter: board private structure to initialize 1788 * 1789 * We allocate one ring per queue at run-time since we don't know the 1790 * number of queues at compile-time. The polling_netdev array is 1791 * intended for Multiqueue, but should work fine with a single queue. 1792 **/ 1793static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1794{ 1795 int i; 1796 1797 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1798 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1799 if (!adapter->tx_ring) 1800 goto err_tx_ring_allocation; 1801 1802 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1803 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1804 if (!adapter->rx_ring) 1805 goto err_rx_ring_allocation; 1806 1807 for (i = 0; i < adapter->num_tx_queues; i++) { 1808 adapter->tx_ring[i].count = adapter->tx_ring_count; 1809 adapter->tx_ring[i].queue_index = i; 1810 /* reg_idx may be remapped later by DCB config */ 1811 adapter->tx_ring[i].reg_idx = i; 1812 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1813 adapter->tx_ring[i].netdev = adapter->netdev; 1814 } 1815 1816 for (i = 0; i < adapter->num_rx_queues; i++) { 1817 adapter->rx_ring[i].count = adapter->rx_ring_count; 1818 adapter->rx_ring[i].queue_index = i; 1819 adapter->rx_ring[i].reg_idx = i; 1820 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1821 adapter->rx_ring[i].netdev = adapter->netdev; 1822 } 1823 1824 return 0; 1825 1826err_rx_ring_allocation: 1827 kfree(adapter->tx_ring); 1828err_tx_ring_allocation: 1829 return -ENOMEM; 1830} 1831 1832/** 1833 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1834 * @adapter: board private structure to initialize 1835 * 1836 * Attempt to configure the interrupts using the best available 1837 * capabilities of the hardware and the kernel. 1838 **/ 1839static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1840{ 1841 struct net_device *netdev = adapter->netdev; 1842 int err = 0; 1843 int vector, v_budget; 1844 1845 /* 1846 * It's easy to be greedy for MSI-X vectors, but it really 1847 * doesn't do us much good if we have a lot more vectors 1848 * than CPU's. So let's be conservative and only ask for 1849 * (roughly) the same number of vectors as there are CPU's. 1850 * The default is to use pairs of vectors. 1851 */ 1852 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1853 v_budget = min_t(int, v_budget, num_online_cpus()); 1854 v_budget += NON_Q_VECTORS; 1855 1856 /* A failure in MSI-X entry allocation isn't fatal, but it does 1857 * mean we disable MSI-X capabilities of the adapter. */ 1858 adapter->msix_entries = kcalloc(v_budget, 1859 sizeof(struct msix_entry), GFP_KERNEL); 1860 if (!adapter->msix_entries) { 1861 err = -ENOMEM; 1862 goto out; 1863 } 1864 1865 for (vector = 0; vector < v_budget; vector++) 1866 adapter->msix_entries[vector].entry = vector; 1867 1868 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 1869 if (err) 1870 goto out; 1871 1872 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1873 if (err) 1874 goto out; 1875 1876 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 1877 1878out: 1879 return err; 1880} 1881 1882/** 1883 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 1884 * @adapter: board private structure to initialize 1885 * 1886 * We allocate one q_vector per queue interrupt. If allocation fails we 1887 * return -ENOMEM. 1888 **/ 1889static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 1890{ 1891 int q_idx, num_q_vectors; 1892 struct ixgbevf_q_vector *q_vector; 1893 1894 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1895 1896 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1897 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 1898 if (!q_vector) 1899 goto err_out; 1900 q_vector->adapter = adapter; 1901 q_vector->v_idx = q_idx; 1902 netif_napi_add(adapter->netdev, &q_vector->napi, 1903 ixgbevf_poll, 64); 1904 adapter->q_vector[q_idx] = q_vector; 1905 } 1906 1907 return 0; 1908 1909err_out: 1910 while (q_idx) { 1911 q_idx--; 1912 q_vector = adapter->q_vector[q_idx]; 1913 netif_napi_del(&q_vector->napi); 1914 kfree(q_vector); 1915 adapter->q_vector[q_idx] = NULL; 1916 } 1917 return -ENOMEM; 1918} 1919 1920/** 1921 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 1922 * @adapter: board private structure to initialize 1923 * 1924 * This function frees the memory allocated to the q_vectors. In addition if 1925 * NAPI is enabled it will delete any references to the NAPI struct prior 1926 * to freeing the q_vector. 1927 **/ 1928static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1929{ 1930 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1931 1932 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1933 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1934 1935 adapter->q_vector[q_idx] = NULL; 1936 netif_napi_del(&q_vector->napi); 1937 kfree(q_vector); 1938 } 1939} 1940 1941/** 1942 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 1943 * @adapter: board private structure 1944 * 1945 **/ 1946static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 1947{ 1948 pci_disable_msix(adapter->pdev); 1949 kfree(adapter->msix_entries); 1950 adapter->msix_entries = NULL; 1951} 1952 1953/** 1954 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 1955 * @adapter: board private structure to initialize 1956 * 1957 **/ 1958static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 1959{ 1960 int err; 1961 1962 /* Number of supported queues */ 1963 ixgbevf_set_num_queues(adapter); 1964 1965 err = ixgbevf_set_interrupt_capability(adapter); 1966 if (err) { 1967 hw_dbg(&adapter->hw, 1968 "Unable to setup interrupt capabilities\n"); 1969 goto err_set_interrupt; 1970 } 1971 1972 err = ixgbevf_alloc_q_vectors(adapter); 1973 if (err) { 1974 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 1975 "vectors\n"); 1976 goto err_alloc_q_vectors; 1977 } 1978 1979 err = ixgbevf_alloc_queues(adapter); 1980 if (err) { 1981 pr_err("Unable to allocate memory for queues\n"); 1982 goto err_alloc_queues; 1983 } 1984 1985 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 1986 "Tx Queue count = %u\n", 1987 (adapter->num_rx_queues > 1) ? "Enabled" : 1988 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 1989 1990 set_bit(__IXGBEVF_DOWN, &adapter->state); 1991 1992 return 0; 1993err_alloc_queues: 1994 ixgbevf_free_q_vectors(adapter); 1995err_alloc_q_vectors: 1996 ixgbevf_reset_interrupt_capability(adapter); 1997err_set_interrupt: 1998 return err; 1999} 2000 2001/** 2002 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2003 * @adapter: board private structure to clear interrupt scheme on 2004 * 2005 * We go through and clear interrupt specific resources and reset the structure 2006 * to pre-load conditions 2007 **/ 2008static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2009{ 2010 adapter->num_tx_queues = 0; 2011 adapter->num_rx_queues = 0; 2012 2013 ixgbevf_free_q_vectors(adapter); 2014 ixgbevf_reset_interrupt_capability(adapter); 2015} 2016 2017/** 2018 * ixgbevf_sw_init - Initialize general software structures 2019 * (struct ixgbevf_adapter) 2020 * @adapter: board private structure to initialize 2021 * 2022 * ixgbevf_sw_init initializes the Adapter private data structure. 2023 * Fields are initialized based on PCI device information and 2024 * OS network device settings (MTU size). 2025 **/ 2026static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2027{ 2028 struct ixgbe_hw *hw = &adapter->hw; 2029 struct pci_dev *pdev = adapter->pdev; 2030 int err; 2031 2032 /* PCI config space info */ 2033 2034 hw->vendor_id = pdev->vendor; 2035 hw->device_id = pdev->device; 2036 hw->revision_id = pdev->revision; 2037 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2038 hw->subsystem_device_id = pdev->subsystem_device; 2039 2040 hw->mbx.ops.init_params(hw); 2041 2042 /* assume legacy case in which PF would only give VF 2 queues */ 2043 hw->mac.max_tx_queues = 2; 2044 hw->mac.max_rx_queues = 2; 2045 2046 err = hw->mac.ops.reset_hw(hw); 2047 if (err) { 2048 dev_info(&pdev->dev, 2049 "PF still in reset state, assigning new address\n"); 2050 eth_hw_addr_random(adapter->netdev); 2051 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, 2052 adapter->netdev->addr_len); 2053 } else { 2054 err = hw->mac.ops.init_hw(hw); 2055 if (err) { 2056 pr_err("init_shared_code failed: %d\n", err); 2057 goto out; 2058 } 2059 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2060 adapter->netdev->addr_len); 2061 } 2062 2063 /* lock to protect mailbox accesses */ 2064 spin_lock_init(&adapter->mbx_lock); 2065 2066 /* Enable dynamic interrupt throttling rates */ 2067 adapter->rx_itr_setting = 1; 2068 adapter->tx_itr_setting = 1; 2069 2070 /* set default ring sizes */ 2071 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2072 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2073 2074 set_bit(__IXGBEVF_DOWN, &adapter->state); 2075 return 0; 2076 2077out: 2078 return err; 2079} 2080 2081#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2082 { \ 2083 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2084 if (current_counter < last_counter) \ 2085 counter += 0x100000000LL; \ 2086 last_counter = current_counter; \ 2087 counter &= 0xFFFFFFFF00000000LL; \ 2088 counter |= current_counter; \ 2089 } 2090 2091#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2092 { \ 2093 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2094 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2095 u64 current_counter = (current_counter_msb << 32) | \ 2096 current_counter_lsb; \ 2097 if (current_counter < last_counter) \ 2098 counter += 0x1000000000LL; \ 2099 last_counter = current_counter; \ 2100 counter &= 0xFFFFFFF000000000LL; \ 2101 counter |= current_counter; \ 2102 } 2103/** 2104 * ixgbevf_update_stats - Update the board statistics counters. 2105 * @adapter: board private structure 2106 **/ 2107void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2108{ 2109 struct ixgbe_hw *hw = &adapter->hw; 2110 2111 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2112 adapter->stats.vfgprc); 2113 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2114 adapter->stats.vfgptc); 2115 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2116 adapter->stats.last_vfgorc, 2117 adapter->stats.vfgorc); 2118 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2119 adapter->stats.last_vfgotc, 2120 adapter->stats.vfgotc); 2121 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2122 adapter->stats.vfmprc); 2123} 2124 2125/** 2126 * ixgbevf_watchdog - Timer Call-back 2127 * @data: pointer to adapter cast into an unsigned long 2128 **/ 2129static void ixgbevf_watchdog(unsigned long data) 2130{ 2131 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2132 struct ixgbe_hw *hw = &adapter->hw; 2133 u32 eics = 0; 2134 int i; 2135 2136 /* 2137 * Do the watchdog outside of interrupt context due to the lovely 2138 * delays that some of the newer hardware requires 2139 */ 2140 2141 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2142 goto watchdog_short_circuit; 2143 2144 /* get one bit for every active tx/rx interrupt vector */ 2145 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2146 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2147 if (qv->rx.ring || qv->tx.ring) 2148 eics |= 1 << i; 2149 } 2150 2151 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2152 2153watchdog_short_circuit: 2154 schedule_work(&adapter->watchdog_task); 2155} 2156 2157/** 2158 * ixgbevf_tx_timeout - Respond to a Tx Hang 2159 * @netdev: network interface device structure 2160 **/ 2161static void ixgbevf_tx_timeout(struct net_device *netdev) 2162{ 2163 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2164 2165 /* Do the reset outside of interrupt context */ 2166 schedule_work(&adapter->reset_task); 2167} 2168 2169static void ixgbevf_reset_task(struct work_struct *work) 2170{ 2171 struct ixgbevf_adapter *adapter; 2172 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2173 2174 /* If we're already down or resetting, just bail */ 2175 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2176 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2177 return; 2178 2179 adapter->tx_timeout_count++; 2180 2181 ixgbevf_reinit_locked(adapter); 2182} 2183 2184/** 2185 * ixgbevf_watchdog_task - worker thread to bring link up 2186 * @work: pointer to work_struct containing our data 2187 **/ 2188static void ixgbevf_watchdog_task(struct work_struct *work) 2189{ 2190 struct ixgbevf_adapter *adapter = container_of(work, 2191 struct ixgbevf_adapter, 2192 watchdog_task); 2193 struct net_device *netdev = adapter->netdev; 2194 struct ixgbe_hw *hw = &adapter->hw; 2195 u32 link_speed = adapter->link_speed; 2196 bool link_up = adapter->link_up; 2197 s32 need_reset; 2198 2199 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2200 2201 /* 2202 * Always check the link on the watchdog because we have 2203 * no LSC interrupt 2204 */ 2205 2206 spin_lock_bh(&adapter->mbx_lock); 2207 2208 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2209 2210 spin_unlock_bh(&adapter->mbx_lock); 2211 2212 if (need_reset) { 2213 adapter->link_up = link_up; 2214 adapter->link_speed = link_speed; 2215 netif_carrier_off(netdev); 2216 netif_tx_stop_all_queues(netdev); 2217 schedule_work(&adapter->reset_task); 2218 goto pf_has_reset; 2219 } 2220 adapter->link_up = link_up; 2221 adapter->link_speed = link_speed; 2222 2223 if (link_up) { 2224 if (!netif_carrier_ok(netdev)) { 2225 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", 2226 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2227 10 : 1); 2228 netif_carrier_on(netdev); 2229 netif_tx_wake_all_queues(netdev); 2230 } 2231 } else { 2232 adapter->link_up = false; 2233 adapter->link_speed = 0; 2234 if (netif_carrier_ok(netdev)) { 2235 hw_dbg(&adapter->hw, "NIC Link is Down\n"); 2236 netif_carrier_off(netdev); 2237 netif_tx_stop_all_queues(netdev); 2238 } 2239 } 2240 2241 ixgbevf_update_stats(adapter); 2242 2243pf_has_reset: 2244 /* Reset the timer */ 2245 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2246 mod_timer(&adapter->watchdog_timer, 2247 round_jiffies(jiffies + (2 * HZ))); 2248 2249 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2250} 2251 2252/** 2253 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2254 * @adapter: board private structure 2255 * @tx_ring: Tx descriptor ring for a specific queue 2256 * 2257 * Free all transmit software resources 2258 **/ 2259void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2260 struct ixgbevf_ring *tx_ring) 2261{ 2262 struct pci_dev *pdev = adapter->pdev; 2263 2264 ixgbevf_clean_tx_ring(adapter, tx_ring); 2265 2266 vfree(tx_ring->tx_buffer_info); 2267 tx_ring->tx_buffer_info = NULL; 2268 2269 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2270 tx_ring->dma); 2271 2272 tx_ring->desc = NULL; 2273} 2274 2275/** 2276 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2277 * @adapter: board private structure 2278 * 2279 * Free all transmit software resources 2280 **/ 2281static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2282{ 2283 int i; 2284 2285 for (i = 0; i < adapter->num_tx_queues; i++) 2286 if (adapter->tx_ring[i].desc) 2287 ixgbevf_free_tx_resources(adapter, 2288 &adapter->tx_ring[i]); 2289 2290} 2291 2292/** 2293 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2294 * @adapter: board private structure 2295 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2296 * 2297 * Return 0 on success, negative on failure 2298 **/ 2299int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2300 struct ixgbevf_ring *tx_ring) 2301{ 2302 struct pci_dev *pdev = adapter->pdev; 2303 int size; 2304 2305 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2306 tx_ring->tx_buffer_info = vzalloc(size); 2307 if (!tx_ring->tx_buffer_info) 2308 goto err; 2309 2310 /* round up to nearest 4K */ 2311 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2312 tx_ring->size = ALIGN(tx_ring->size, 4096); 2313 2314 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2315 &tx_ring->dma, GFP_KERNEL); 2316 if (!tx_ring->desc) 2317 goto err; 2318 2319 tx_ring->next_to_use = 0; 2320 tx_ring->next_to_clean = 0; 2321 return 0; 2322 2323err: 2324 vfree(tx_ring->tx_buffer_info); 2325 tx_ring->tx_buffer_info = NULL; 2326 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2327 "descriptor ring\n"); 2328 return -ENOMEM; 2329} 2330 2331/** 2332 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2333 * @adapter: board private structure 2334 * 2335 * If this function returns with an error, then it's possible one or 2336 * more of the rings is populated (while the rest are not). It is the 2337 * callers duty to clean those orphaned rings. 2338 * 2339 * Return 0 on success, negative on failure 2340 **/ 2341static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2342{ 2343 int i, err = 0; 2344 2345 for (i = 0; i < adapter->num_tx_queues; i++) { 2346 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2347 if (!err) 2348 continue; 2349 hw_dbg(&adapter->hw, 2350 "Allocation for Tx Queue %u failed\n", i); 2351 break; 2352 } 2353 2354 return err; 2355} 2356 2357/** 2358 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2359 * @adapter: board private structure 2360 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2361 * 2362 * Returns 0 on success, negative on failure 2363 **/ 2364int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2365 struct ixgbevf_ring *rx_ring) 2366{ 2367 struct pci_dev *pdev = adapter->pdev; 2368 int size; 2369 2370 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2371 rx_ring->rx_buffer_info = vzalloc(size); 2372 if (!rx_ring->rx_buffer_info) 2373 goto alloc_failed; 2374 2375 /* Round up to nearest 4K */ 2376 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2377 rx_ring->size = ALIGN(rx_ring->size, 4096); 2378 2379 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2380 &rx_ring->dma, GFP_KERNEL); 2381 2382 if (!rx_ring->desc) { 2383 hw_dbg(&adapter->hw, 2384 "Unable to allocate memory for " 2385 "the receive descriptor ring\n"); 2386 vfree(rx_ring->rx_buffer_info); 2387 rx_ring->rx_buffer_info = NULL; 2388 goto alloc_failed; 2389 } 2390 2391 rx_ring->next_to_clean = 0; 2392 rx_ring->next_to_use = 0; 2393 2394 return 0; 2395alloc_failed: 2396 return -ENOMEM; 2397} 2398 2399/** 2400 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2401 * @adapter: board private structure 2402 * 2403 * If this function returns with an error, then it's possible one or 2404 * more of the rings is populated (while the rest are not). It is the 2405 * callers duty to clean those orphaned rings. 2406 * 2407 * Return 0 on success, negative on failure 2408 **/ 2409static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2410{ 2411 int i, err = 0; 2412 2413 for (i = 0; i < adapter->num_rx_queues; i++) { 2414 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2415 if (!err) 2416 continue; 2417 hw_dbg(&adapter->hw, 2418 "Allocation for Rx Queue %u failed\n", i); 2419 break; 2420 } 2421 return err; 2422} 2423 2424/** 2425 * ixgbevf_free_rx_resources - Free Rx Resources 2426 * @adapter: board private structure 2427 * @rx_ring: ring to clean the resources from 2428 * 2429 * Free all receive software resources 2430 **/ 2431void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2432 struct ixgbevf_ring *rx_ring) 2433{ 2434 struct pci_dev *pdev = adapter->pdev; 2435 2436 ixgbevf_clean_rx_ring(adapter, rx_ring); 2437 2438 vfree(rx_ring->rx_buffer_info); 2439 rx_ring->rx_buffer_info = NULL; 2440 2441 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2442 rx_ring->dma); 2443 2444 rx_ring->desc = NULL; 2445} 2446 2447/** 2448 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2449 * @adapter: board private structure 2450 * 2451 * Free all receive software resources 2452 **/ 2453static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2454{ 2455 int i; 2456 2457 for (i = 0; i < adapter->num_rx_queues; i++) 2458 if (adapter->rx_ring[i].desc) 2459 ixgbevf_free_rx_resources(adapter, 2460 &adapter->rx_ring[i]); 2461} 2462 2463static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) 2464{ 2465 struct ixgbe_hw *hw = &adapter->hw; 2466 struct ixgbevf_ring *rx_ring; 2467 unsigned int def_q = 0; 2468 unsigned int num_tcs = 0; 2469 unsigned int num_rx_queues = 1; 2470 int err, i; 2471 2472 spin_lock_bh(&adapter->mbx_lock); 2473 2474 /* fetch queue configuration from the PF */ 2475 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2476 2477 spin_unlock_bh(&adapter->mbx_lock); 2478 2479 if (err) 2480 return err; 2481 2482 if (num_tcs > 1) { 2483 /* update default Tx ring register index */ 2484 adapter->tx_ring[0].reg_idx = def_q; 2485 2486 /* we need as many queues as traffic classes */ 2487 num_rx_queues = num_tcs; 2488 } 2489 2490 /* nothing to do if we have the correct number of queues */ 2491 if (adapter->num_rx_queues == num_rx_queues) 2492 return 0; 2493 2494 /* allocate new rings */ 2495 rx_ring = kcalloc(num_rx_queues, 2496 sizeof(struct ixgbevf_ring), GFP_KERNEL); 2497 if (!rx_ring) 2498 return -ENOMEM; 2499 2500 /* setup ring fields */ 2501 for (i = 0; i < num_rx_queues; i++) { 2502 rx_ring[i].count = adapter->rx_ring_count; 2503 rx_ring[i].queue_index = i; 2504 rx_ring[i].reg_idx = i; 2505 rx_ring[i].dev = &adapter->pdev->dev; 2506 rx_ring[i].netdev = adapter->netdev; 2507 } 2508 2509 /* free the existing ring and queues */ 2510 adapter->num_rx_queues = 0; 2511 kfree(adapter->rx_ring); 2512 2513 /* move new rings into position on the adapter struct */ 2514 adapter->rx_ring = rx_ring; 2515 adapter->num_rx_queues = num_rx_queues; 2516 2517 return 0; 2518} 2519 2520/** 2521 * ixgbevf_open - Called when a network interface is made active 2522 * @netdev: network interface device structure 2523 * 2524 * Returns 0 on success, negative value on failure 2525 * 2526 * The open entry point is called when a network interface is made 2527 * active by the system (IFF_UP). At this point all resources needed 2528 * for transmit and receive operations are allocated, the interrupt 2529 * handler is registered with the OS, the watchdog timer is started, 2530 * and the stack is notified that the interface is ready. 2531 **/ 2532static int ixgbevf_open(struct net_device *netdev) 2533{ 2534 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2535 struct ixgbe_hw *hw = &adapter->hw; 2536 int err; 2537 2538 /* disallow open during test */ 2539 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2540 return -EBUSY; 2541 2542 if (hw->adapter_stopped) { 2543 ixgbevf_reset(adapter); 2544 /* if adapter is still stopped then PF isn't up and 2545 * the vf can't start. */ 2546 if (hw->adapter_stopped) { 2547 err = IXGBE_ERR_MBX; 2548 pr_err("Unable to start - perhaps the PF Driver isn't " 2549 "up yet\n"); 2550 goto err_setup_reset; 2551 } 2552 } 2553 2554 ixgbevf_negotiate_api(adapter); 2555 2556 /* setup queue reg_idx and Rx queue count */ 2557 err = ixgbevf_setup_queues(adapter); 2558 if (err) 2559 goto err_setup_queues; 2560 2561 /* allocate transmit descriptors */ 2562 err = ixgbevf_setup_all_tx_resources(adapter); 2563 if (err) 2564 goto err_setup_tx; 2565 2566 /* allocate receive descriptors */ 2567 err = ixgbevf_setup_all_rx_resources(adapter); 2568 if (err) 2569 goto err_setup_rx; 2570 2571 ixgbevf_configure(adapter); 2572 2573 /* 2574 * Map the Tx/Rx rings to the vectors we were allotted. 2575 * if request_irq will be called in this function map_rings 2576 * must be called *before* up_complete 2577 */ 2578 ixgbevf_map_rings_to_vectors(adapter); 2579 2580 ixgbevf_up_complete(adapter); 2581 2582 /* clear any pending interrupts, may auto mask */ 2583 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2584 err = ixgbevf_request_irq(adapter); 2585 if (err) 2586 goto err_req_irq; 2587 2588 ixgbevf_irq_enable(adapter); 2589 2590 return 0; 2591 2592err_req_irq: 2593 ixgbevf_down(adapter); 2594 ixgbevf_free_irq(adapter); 2595err_setup_rx: 2596 ixgbevf_free_all_rx_resources(adapter); 2597err_setup_tx: 2598 ixgbevf_free_all_tx_resources(adapter); 2599err_setup_queues: 2600 ixgbevf_reset(adapter); 2601 2602err_setup_reset: 2603 2604 return err; 2605} 2606 2607/** 2608 * ixgbevf_close - Disables a network interface 2609 * @netdev: network interface device structure 2610 * 2611 * Returns 0, this is not allowed to fail 2612 * 2613 * The close entry point is called when an interface is de-activated 2614 * by the OS. The hardware is still under the drivers control, but 2615 * needs to be disabled. A global MAC reset is issued to stop the 2616 * hardware, and all transmit and receive resources are freed. 2617 **/ 2618static int ixgbevf_close(struct net_device *netdev) 2619{ 2620 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2621 2622 ixgbevf_down(adapter); 2623 ixgbevf_free_irq(adapter); 2624 2625 ixgbevf_free_all_tx_resources(adapter); 2626 ixgbevf_free_all_rx_resources(adapter); 2627 2628 return 0; 2629} 2630 2631static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2632 u32 vlan_macip_lens, u32 type_tucmd, 2633 u32 mss_l4len_idx) 2634{ 2635 struct ixgbe_adv_tx_context_desc *context_desc; 2636 u16 i = tx_ring->next_to_use; 2637 2638 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2639 2640 i++; 2641 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2642 2643 /* set bits to identify this as an advanced context descriptor */ 2644 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2645 2646 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2647 context_desc->seqnum_seed = 0; 2648 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2649 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2650} 2651 2652static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2653 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2654{ 2655 u32 vlan_macip_lens, type_tucmd; 2656 u32 mss_l4len_idx, l4len; 2657 2658 if (!skb_is_gso(skb)) 2659 return 0; 2660 2661 if (skb_header_cloned(skb)) { 2662 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2663 if (err) 2664 return err; 2665 } 2666 2667 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2668 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2669 2670 if (skb->protocol == htons(ETH_P_IP)) { 2671 struct iphdr *iph = ip_hdr(skb); 2672 iph->tot_len = 0; 2673 iph->check = 0; 2674 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2675 iph->daddr, 0, 2676 IPPROTO_TCP, 2677 0); 2678 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2679 } else if (skb_is_gso_v6(skb)) { 2680 ipv6_hdr(skb)->payload_len = 0; 2681 tcp_hdr(skb)->check = 2682 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2683 &ipv6_hdr(skb)->daddr, 2684 0, IPPROTO_TCP, 0); 2685 } 2686 2687 /* compute header lengths */ 2688 l4len = tcp_hdrlen(skb); 2689 *hdr_len += l4len; 2690 *hdr_len = skb_transport_offset(skb) + l4len; 2691 2692 /* mss_l4len_id: use 1 as index for TSO */ 2693 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2694 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2695 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2696 2697 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2698 vlan_macip_lens = skb_network_header_len(skb); 2699 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2700 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2701 2702 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2703 type_tucmd, mss_l4len_idx); 2704 2705 return 1; 2706} 2707 2708static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2709 struct sk_buff *skb, u32 tx_flags) 2710{ 2711 2712 2713 2714 u32 vlan_macip_lens = 0; 2715 u32 mss_l4len_idx = 0; 2716 u32 type_tucmd = 0; 2717 2718 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2719 u8 l4_hdr = 0; 2720 switch (skb->protocol) { 2721 case __constant_htons(ETH_P_IP): 2722 vlan_macip_lens |= skb_network_header_len(skb); 2723 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2724 l4_hdr = ip_hdr(skb)->protocol; 2725 break; 2726 case __constant_htons(ETH_P_IPV6): 2727 vlan_macip_lens |= skb_network_header_len(skb); 2728 l4_hdr = ipv6_hdr(skb)->nexthdr; 2729 break; 2730 default: 2731 if (unlikely(net_ratelimit())) { 2732 dev_warn(tx_ring->dev, 2733 "partial checksum but proto=%x!\n", 2734 skb->protocol); 2735 } 2736 break; 2737 } 2738 2739 switch (l4_hdr) { 2740 case IPPROTO_TCP: 2741 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2742 mss_l4len_idx = tcp_hdrlen(skb) << 2743 IXGBE_ADVTXD_L4LEN_SHIFT; 2744 break; 2745 case IPPROTO_SCTP: 2746 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2747 mss_l4len_idx = sizeof(struct sctphdr) << 2748 IXGBE_ADVTXD_L4LEN_SHIFT; 2749 break; 2750 case IPPROTO_UDP: 2751 mss_l4len_idx = sizeof(struct udphdr) << 2752 IXGBE_ADVTXD_L4LEN_SHIFT; 2753 break; 2754 default: 2755 if (unlikely(net_ratelimit())) { 2756 dev_warn(tx_ring->dev, 2757 "partial checksum but l4 proto=%x!\n", 2758 l4_hdr); 2759 } 2760 break; 2761 } 2762 } 2763 2764 /* vlan_macip_lens: MACLEN, VLAN tag */ 2765 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2766 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2767 2768 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2769 type_tucmd, mss_l4len_idx); 2770 2771 return (skb->ip_summed == CHECKSUM_PARTIAL); 2772} 2773 2774static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2775 struct sk_buff *skb, u32 tx_flags, 2776 unsigned int first) 2777{ 2778 struct ixgbevf_tx_buffer *tx_buffer_info; 2779 unsigned int len; 2780 unsigned int total = skb->len; 2781 unsigned int offset = 0, size; 2782 int count = 0; 2783 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2784 unsigned int f; 2785 int i; 2786 2787 i = tx_ring->next_to_use; 2788 2789 len = min(skb_headlen(skb), total); 2790 while (len) { 2791 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2792 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2793 2794 tx_buffer_info->length = size; 2795 tx_buffer_info->mapped_as_page = false; 2796 tx_buffer_info->dma = dma_map_single(tx_ring->dev, 2797 skb->data + offset, 2798 size, DMA_TO_DEVICE); 2799 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2800 goto dma_error; 2801 tx_buffer_info->next_to_watch = i; 2802 2803 len -= size; 2804 total -= size; 2805 offset += size; 2806 count++; 2807 i++; 2808 if (i == tx_ring->count) 2809 i = 0; 2810 } 2811 2812 for (f = 0; f < nr_frags; f++) { 2813 const struct skb_frag_struct *frag; 2814 2815 frag = &skb_shinfo(skb)->frags[f]; 2816 len = min((unsigned int)skb_frag_size(frag), total); 2817 offset = 0; 2818 2819 while (len) { 2820 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2821 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2822 2823 tx_buffer_info->length = size; 2824 tx_buffer_info->dma = 2825 skb_frag_dma_map(tx_ring->dev, frag, 2826 offset, size, DMA_TO_DEVICE); 2827 if (dma_mapping_error(tx_ring->dev, 2828 tx_buffer_info->dma)) 2829 goto dma_error; 2830 tx_buffer_info->mapped_as_page = true; 2831 tx_buffer_info->next_to_watch = i; 2832 2833 len -= size; 2834 total -= size; 2835 offset += size; 2836 count++; 2837 i++; 2838 if (i == tx_ring->count) 2839 i = 0; 2840 } 2841 if (total == 0) 2842 break; 2843 } 2844 2845 if (i == 0) 2846 i = tx_ring->count - 1; 2847 else 2848 i = i - 1; 2849 tx_ring->tx_buffer_info[i].skb = skb; 2850 tx_ring->tx_buffer_info[first].next_to_watch = i; 2851 tx_ring->tx_buffer_info[first].time_stamp = jiffies; 2852 2853 return count; 2854 2855dma_error: 2856 dev_err(tx_ring->dev, "TX DMA map failed\n"); 2857 2858 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2859 tx_buffer_info->dma = 0; 2860 tx_buffer_info->next_to_watch = 0; 2861 count--; 2862 2863 /* clear timestamp and dma mappings for remaining portion of packet */ 2864 while (count >= 0) { 2865 count--; 2866 i--; 2867 if (i < 0) 2868 i += tx_ring->count; 2869 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2870 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2871 } 2872 2873 return count; 2874} 2875 2876static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2877 int count, u32 paylen, u8 hdr_len) 2878{ 2879 union ixgbe_adv_tx_desc *tx_desc = NULL; 2880 struct ixgbevf_tx_buffer *tx_buffer_info; 2881 u32 olinfo_status = 0, cmd_type_len = 0; 2882 unsigned int i; 2883 2884 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 2885 2886 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 2887 2888 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 2889 2890 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2891 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2892 2893 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2894 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 2895 2896 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2897 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2898 2899 /* use index 1 context for tso */ 2900 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2901 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2902 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 2903 2904 } 2905 2906 /* 2907 * Check Context must be set if Tx switch is enabled, which it 2908 * always is for case where virtual functions are running 2909 */ 2910 olinfo_status |= IXGBE_ADVTXD_CC; 2911 2912 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2913 2914 i = tx_ring->next_to_use; 2915 while (count--) { 2916 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2917 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2918 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 2919 tx_desc->read.cmd_type_len = 2920 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 2921 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2922 i++; 2923 if (i == tx_ring->count) 2924 i = 0; 2925 } 2926 2927 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2928 2929 tx_ring->next_to_use = i; 2930} 2931 2932static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2933{ 2934 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 2935 2936 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 2937 /* Herbert's original patch had: 2938 * smp_mb__after_netif_stop_queue(); 2939 * but since that doesn't exist yet, just open code it. */ 2940 smp_mb(); 2941 2942 /* We need to check again in a case another CPU has just 2943 * made room available. */ 2944 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 2945 return -EBUSY; 2946 2947 /* A reprieve! - use start_queue because it doesn't call schedule */ 2948 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 2949 ++adapter->restart_queue; 2950 return 0; 2951} 2952 2953static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2954{ 2955 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 2956 return 0; 2957 return __ixgbevf_maybe_stop_tx(tx_ring, size); 2958} 2959 2960static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2961{ 2962 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2963 struct ixgbevf_ring *tx_ring; 2964 unsigned int first; 2965 unsigned int tx_flags = 0; 2966 u8 hdr_len = 0; 2967 int r_idx = 0, tso; 2968 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 2969#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2970 unsigned short f; 2971#endif 2972 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 2973 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 2974 dev_kfree_skb(skb); 2975 return NETDEV_TX_OK; 2976 } 2977 2978 tx_ring = &adapter->tx_ring[r_idx]; 2979 2980 /* 2981 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 2982 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 2983 * + 2 desc gap to keep tail from touching head, 2984 * + 1 desc for context descriptor, 2985 * otherwise try next time 2986 */ 2987#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2988 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 2989 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 2990#else 2991 count += skb_shinfo(skb)->nr_frags; 2992#endif 2993 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 2994 adapter->tx_busy++; 2995 return NETDEV_TX_BUSY; 2996 } 2997 2998 if (vlan_tx_tag_present(skb)) { 2999 tx_flags |= vlan_tx_tag_get(skb); 3000 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3001 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3002 } 3003 3004 first = tx_ring->next_to_use; 3005 3006 if (skb->protocol == htons(ETH_P_IP)) 3007 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3008 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); 3009 if (tso < 0) { 3010 dev_kfree_skb_any(skb); 3011 return NETDEV_TX_OK; 3012 } 3013 3014 if (tso) 3015 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; 3016 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) 3017 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3018 3019 ixgbevf_tx_queue(tx_ring, tx_flags, 3020 ixgbevf_tx_map(tx_ring, skb, tx_flags, first), 3021 skb->len, hdr_len); 3022 /* 3023 * Force memory writes to complete before letting h/w 3024 * know there are new descriptors to fetch. (Only 3025 * applicable for weak-ordered memory model archs, 3026 * such as IA-64). 3027 */ 3028 wmb(); 3029 3030 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 3031 3032 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3033 3034 return NETDEV_TX_OK; 3035} 3036 3037/** 3038 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3039 * @netdev: network interface device structure 3040 * @p: pointer to an address structure 3041 * 3042 * Returns 0 on success, negative on failure 3043 **/ 3044static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3045{ 3046 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3047 struct ixgbe_hw *hw = &adapter->hw; 3048 struct sockaddr *addr = p; 3049 3050 if (!is_valid_ether_addr(addr->sa_data)) 3051 return -EADDRNOTAVAIL; 3052 3053 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3054 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3055 3056 spin_lock_bh(&adapter->mbx_lock); 3057 3058 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3059 3060 spin_unlock_bh(&adapter->mbx_lock); 3061 3062 return 0; 3063} 3064 3065/** 3066 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3067 * @netdev: network interface device structure 3068 * @new_mtu: new value for maximum frame size 3069 * 3070 * Returns 0 on success, negative on failure 3071 **/ 3072static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3073{ 3074 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3075 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3076 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3077 3078 switch (adapter->hw.api_version) { 3079 case ixgbe_mbox_api_11: 3080 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3081 break; 3082 default: 3083 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3084 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3085 break; 3086 } 3087 3088 /* MTU < 68 is an error and causes problems on some kernels */ 3089 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3090 return -EINVAL; 3091 3092 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3093 netdev->mtu, new_mtu); 3094 /* must set new MTU before calling down or up */ 3095 netdev->mtu = new_mtu; 3096 3097 if (netif_running(netdev)) 3098 ixgbevf_reinit_locked(adapter); 3099 3100 return 0; 3101} 3102 3103static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3104{ 3105 struct net_device *netdev = pci_get_drvdata(pdev); 3106 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3107#ifdef CONFIG_PM 3108 int retval = 0; 3109#endif 3110 3111 netif_device_detach(netdev); 3112 3113 if (netif_running(netdev)) { 3114 rtnl_lock(); 3115 ixgbevf_down(adapter); 3116 ixgbevf_free_irq(adapter); 3117 ixgbevf_free_all_tx_resources(adapter); 3118 ixgbevf_free_all_rx_resources(adapter); 3119 rtnl_unlock(); 3120 } 3121 3122 ixgbevf_clear_interrupt_scheme(adapter); 3123 3124#ifdef CONFIG_PM 3125 retval = pci_save_state(pdev); 3126 if (retval) 3127 return retval; 3128 3129#endif 3130 pci_disable_device(pdev); 3131 3132 return 0; 3133} 3134 3135#ifdef CONFIG_PM 3136static int ixgbevf_resume(struct pci_dev *pdev) 3137{ 3138 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); 3139 struct net_device *netdev = adapter->netdev; 3140 u32 err; 3141 3142 pci_set_power_state(pdev, PCI_D0); 3143 pci_restore_state(pdev); 3144 /* 3145 * pci_restore_state clears dev->state_saved so call 3146 * pci_save_state to restore it. 3147 */ 3148 pci_save_state(pdev); 3149 3150 err = pci_enable_device_mem(pdev); 3151 if (err) { 3152 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3153 return err; 3154 } 3155 pci_set_master(pdev); 3156 3157 rtnl_lock(); 3158 err = ixgbevf_init_interrupt_scheme(adapter); 3159 rtnl_unlock(); 3160 if (err) { 3161 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3162 return err; 3163 } 3164 3165 ixgbevf_reset(adapter); 3166 3167 if (netif_running(netdev)) { 3168 err = ixgbevf_open(netdev); 3169 if (err) 3170 return err; 3171 } 3172 3173 netif_device_attach(netdev); 3174 3175 return err; 3176} 3177 3178#endif /* CONFIG_PM */ 3179static void ixgbevf_shutdown(struct pci_dev *pdev) 3180{ 3181 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3182} 3183 3184static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3185 struct rtnl_link_stats64 *stats) 3186{ 3187 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3188 unsigned int start; 3189 u64 bytes, packets; 3190 const struct ixgbevf_ring *ring; 3191 int i; 3192 3193 ixgbevf_update_stats(adapter); 3194 3195 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3196 3197 for (i = 0; i < adapter->num_rx_queues; i++) { 3198 ring = &adapter->rx_ring[i]; 3199 do { 3200 start = u64_stats_fetch_begin_bh(&ring->syncp); 3201 bytes = ring->total_bytes; 3202 packets = ring->total_packets; 3203 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3204 stats->rx_bytes += bytes; 3205 stats->rx_packets += packets; 3206 } 3207 3208 for (i = 0; i < adapter->num_tx_queues; i++) { 3209 ring = &adapter->tx_ring[i]; 3210 do { 3211 start = u64_stats_fetch_begin_bh(&ring->syncp); 3212 bytes = ring->total_bytes; 3213 packets = ring->total_packets; 3214 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3215 stats->tx_bytes += bytes; 3216 stats->tx_packets += packets; 3217 } 3218 3219 return stats; 3220} 3221 3222static const struct net_device_ops ixgbevf_netdev_ops = { 3223 .ndo_open = ixgbevf_open, 3224 .ndo_stop = ixgbevf_close, 3225 .ndo_start_xmit = ixgbevf_xmit_frame, 3226 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3227 .ndo_get_stats64 = ixgbevf_get_stats, 3228 .ndo_validate_addr = eth_validate_addr, 3229 .ndo_set_mac_address = ixgbevf_set_mac, 3230 .ndo_change_mtu = ixgbevf_change_mtu, 3231 .ndo_tx_timeout = ixgbevf_tx_timeout, 3232 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3233 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3234}; 3235 3236static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3237{ 3238 dev->netdev_ops = &ixgbevf_netdev_ops; 3239 ixgbevf_set_ethtool_ops(dev); 3240 dev->watchdog_timeo = 5 * HZ; 3241} 3242 3243/** 3244 * ixgbevf_probe - Device Initialization Routine 3245 * @pdev: PCI device information struct 3246 * @ent: entry in ixgbevf_pci_tbl 3247 * 3248 * Returns 0 on success, negative on failure 3249 * 3250 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3251 * The OS initialization, configuring of the adapter private structure, 3252 * and a hardware reset occur. 3253 **/ 3254static int __devinit ixgbevf_probe(struct pci_dev *pdev, 3255 const struct pci_device_id *ent) 3256{ 3257 struct net_device *netdev; 3258 struct ixgbevf_adapter *adapter = NULL; 3259 struct ixgbe_hw *hw = NULL; 3260 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3261 static int cards_found; 3262 int err, pci_using_dac; 3263 3264 err = pci_enable_device(pdev); 3265 if (err) 3266 return err; 3267 3268 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3269 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3270 pci_using_dac = 1; 3271 } else { 3272 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3273 if (err) { 3274 err = dma_set_coherent_mask(&pdev->dev, 3275 DMA_BIT_MASK(32)); 3276 if (err) { 3277 dev_err(&pdev->dev, "No usable DMA " 3278 "configuration, aborting\n"); 3279 goto err_dma; 3280 } 3281 } 3282 pci_using_dac = 0; 3283 } 3284 3285 err = pci_request_regions(pdev, ixgbevf_driver_name); 3286 if (err) { 3287 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3288 goto err_pci_reg; 3289 } 3290 3291 pci_set_master(pdev); 3292 3293 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3294 MAX_TX_QUEUES); 3295 if (!netdev) { 3296 err = -ENOMEM; 3297 goto err_alloc_etherdev; 3298 } 3299 3300 SET_NETDEV_DEV(netdev, &pdev->dev); 3301 3302 pci_set_drvdata(pdev, netdev); 3303 adapter = netdev_priv(netdev); 3304 3305 adapter->netdev = netdev; 3306 adapter->pdev = pdev; 3307 hw = &adapter->hw; 3308 hw->back = adapter; 3309 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3310 3311 /* 3312 * call save state here in standalone driver because it relies on 3313 * adapter struct to exist, and needs to call netdev_priv 3314 */ 3315 pci_save_state(pdev); 3316 3317 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3318 pci_resource_len(pdev, 0)); 3319 if (!hw->hw_addr) { 3320 err = -EIO; 3321 goto err_ioremap; 3322 } 3323 3324 ixgbevf_assign_netdev_ops(netdev); 3325 3326 adapter->bd_number = cards_found; 3327 3328 /* Setup hw api */ 3329 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3330 hw->mac.type = ii->mac; 3331 3332 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3333 sizeof(struct ixgbe_mbx_operations)); 3334 3335 /* setup the private structure */ 3336 err = ixgbevf_sw_init(adapter); 3337 if (err) 3338 goto err_sw_init; 3339 3340 /* The HW MAC address was set and/or determined in sw_init */ 3341 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 3342 3343 if (!is_valid_ether_addr(netdev->dev_addr)) { 3344 pr_err("invalid MAC address\n"); 3345 err = -EIO; 3346 goto err_sw_init; 3347 } 3348 3349 netdev->hw_features = NETIF_F_SG | 3350 NETIF_F_IP_CSUM | 3351 NETIF_F_IPV6_CSUM | 3352 NETIF_F_TSO | 3353 NETIF_F_TSO6 | 3354 NETIF_F_RXCSUM; 3355 3356 netdev->features = netdev->hw_features | 3357 NETIF_F_HW_VLAN_TX | 3358 NETIF_F_HW_VLAN_RX | 3359 NETIF_F_HW_VLAN_FILTER; 3360 3361 netdev->vlan_features |= NETIF_F_TSO; 3362 netdev->vlan_features |= NETIF_F_TSO6; 3363 netdev->vlan_features |= NETIF_F_IP_CSUM; 3364 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3365 netdev->vlan_features |= NETIF_F_SG; 3366 3367 if (pci_using_dac) 3368 netdev->features |= NETIF_F_HIGHDMA; 3369 3370 netdev->priv_flags |= IFF_UNICAST_FLT; 3371 3372 init_timer(&adapter->watchdog_timer); 3373 adapter->watchdog_timer.function = ixgbevf_watchdog; 3374 adapter->watchdog_timer.data = (unsigned long)adapter; 3375 3376 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3377 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3378 3379 err = ixgbevf_init_interrupt_scheme(adapter); 3380 if (err) 3381 goto err_sw_init; 3382 3383 strcpy(netdev->name, "eth%d"); 3384 3385 err = register_netdev(netdev); 3386 if (err) 3387 goto err_register; 3388 3389 netif_carrier_off(netdev); 3390 3391 ixgbevf_init_last_counter_stats(adapter); 3392 3393 /* print the MAC address */ 3394 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3395 3396 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3397 3398 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3399 cards_found++; 3400 return 0; 3401 3402err_register: 3403 ixgbevf_clear_interrupt_scheme(adapter); 3404err_sw_init: 3405 ixgbevf_reset_interrupt_capability(adapter); 3406 iounmap(hw->hw_addr); 3407err_ioremap: 3408 free_netdev(netdev); 3409err_alloc_etherdev: 3410 pci_release_regions(pdev); 3411err_pci_reg: 3412err_dma: 3413 pci_disable_device(pdev); 3414 return err; 3415} 3416 3417/** 3418 * ixgbevf_remove - Device Removal Routine 3419 * @pdev: PCI device information struct 3420 * 3421 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3422 * that it should release a PCI device. The could be caused by a 3423 * Hot-Plug event, or because the driver is going to be removed from 3424 * memory. 3425 **/ 3426static void __devexit ixgbevf_remove(struct pci_dev *pdev) 3427{ 3428 struct net_device *netdev = pci_get_drvdata(pdev); 3429 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3430 3431 set_bit(__IXGBEVF_DOWN, &adapter->state); 3432 3433 del_timer_sync(&adapter->watchdog_timer); 3434 3435 cancel_work_sync(&adapter->reset_task); 3436 cancel_work_sync(&adapter->watchdog_task); 3437 3438 if (netdev->reg_state == NETREG_REGISTERED) 3439 unregister_netdev(netdev); 3440 3441 ixgbevf_clear_interrupt_scheme(adapter); 3442 ixgbevf_reset_interrupt_capability(adapter); 3443 3444 iounmap(adapter->hw.hw_addr); 3445 pci_release_regions(pdev); 3446 3447 hw_dbg(&adapter->hw, "Remove complete\n"); 3448 3449 kfree(adapter->tx_ring); 3450 kfree(adapter->rx_ring); 3451 3452 free_netdev(netdev); 3453 3454 pci_disable_device(pdev); 3455} 3456 3457/** 3458 * ixgbevf_io_error_detected - called when PCI error is detected 3459 * @pdev: Pointer to PCI device 3460 * @state: The current pci connection state 3461 * 3462 * This function is called after a PCI bus error affecting 3463 * this device has been detected. 3464 */ 3465static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3466 pci_channel_state_t state) 3467{ 3468 struct net_device *netdev = pci_get_drvdata(pdev); 3469 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3470 3471 netif_device_detach(netdev); 3472 3473 if (state == pci_channel_io_perm_failure) 3474 return PCI_ERS_RESULT_DISCONNECT; 3475 3476 if (netif_running(netdev)) 3477 ixgbevf_down(adapter); 3478 3479 pci_disable_device(pdev); 3480 3481 /* Request a slot slot reset. */ 3482 return PCI_ERS_RESULT_NEED_RESET; 3483} 3484 3485/** 3486 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3487 * @pdev: Pointer to PCI device 3488 * 3489 * Restart the card from scratch, as if from a cold-boot. Implementation 3490 * resembles the first-half of the ixgbevf_resume routine. 3491 */ 3492static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3493{ 3494 struct net_device *netdev = pci_get_drvdata(pdev); 3495 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3496 3497 if (pci_enable_device_mem(pdev)) { 3498 dev_err(&pdev->dev, 3499 "Cannot re-enable PCI device after reset.\n"); 3500 return PCI_ERS_RESULT_DISCONNECT; 3501 } 3502 3503 pci_set_master(pdev); 3504 3505 ixgbevf_reset(adapter); 3506 3507 return PCI_ERS_RESULT_RECOVERED; 3508} 3509 3510/** 3511 * ixgbevf_io_resume - called when traffic can start flowing again. 3512 * @pdev: Pointer to PCI device 3513 * 3514 * This callback is called when the error recovery driver tells us that 3515 * its OK to resume normal operation. Implementation resembles the 3516 * second-half of the ixgbevf_resume routine. 3517 */ 3518static void ixgbevf_io_resume(struct pci_dev *pdev) 3519{ 3520 struct net_device *netdev = pci_get_drvdata(pdev); 3521 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3522 3523 if (netif_running(netdev)) 3524 ixgbevf_up(adapter); 3525 3526 netif_device_attach(netdev); 3527} 3528 3529/* PCI Error Recovery (ERS) */ 3530static const struct pci_error_handlers ixgbevf_err_handler = { 3531 .error_detected = ixgbevf_io_error_detected, 3532 .slot_reset = ixgbevf_io_slot_reset, 3533 .resume = ixgbevf_io_resume, 3534}; 3535 3536static struct pci_driver ixgbevf_driver = { 3537 .name = ixgbevf_driver_name, 3538 .id_table = ixgbevf_pci_tbl, 3539 .probe = ixgbevf_probe, 3540 .remove = __devexit_p(ixgbevf_remove), 3541#ifdef CONFIG_PM 3542 /* Power Management Hooks */ 3543 .suspend = ixgbevf_suspend, 3544 .resume = ixgbevf_resume, 3545#endif 3546 .shutdown = ixgbevf_shutdown, 3547 .err_handler = &ixgbevf_err_handler 3548}; 3549 3550/** 3551 * ixgbevf_init_module - Driver Registration Routine 3552 * 3553 * ixgbevf_init_module is the first routine called when the driver is 3554 * loaded. All it does is register with the PCI subsystem. 3555 **/ 3556static int __init ixgbevf_init_module(void) 3557{ 3558 int ret; 3559 pr_info("%s - version %s\n", ixgbevf_driver_string, 3560 ixgbevf_driver_version); 3561 3562 pr_info("%s\n", ixgbevf_copyright); 3563 3564 ret = pci_register_driver(&ixgbevf_driver); 3565 return ret; 3566} 3567 3568module_init(ixgbevf_init_module); 3569 3570/** 3571 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3572 * 3573 * ixgbevf_exit_module is called just before the driver is removed 3574 * from memory. 3575 **/ 3576static void __exit ixgbevf_exit_module(void) 3577{ 3578 pci_unregister_driver(&ixgbevf_driver); 3579} 3580 3581#ifdef DEBUG 3582/** 3583 * ixgbevf_get_hw_dev_name - return device name string 3584 * used by hardware layer to print debugging information 3585 **/ 3586char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3587{ 3588 struct ixgbevf_adapter *adapter = hw->back; 3589 return adapter->netdev->name; 3590} 3591 3592#endif 3593module_exit(ixgbevf_exit_module); 3594 3595/* ixgbevf_main.c */ 3596