ixgbevf_main.c revision b876a744a228308ecf75466831453f0863d253db
1/******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28 29/****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31******************************************************************************/ 32 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35#include <linux/types.h> 36#include <linux/bitops.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/vmalloc.h> 41#include <linux/string.h> 42#include <linux/in.h> 43#include <linux/ip.h> 44#include <linux/tcp.h> 45#include <linux/sctp.h> 46#include <linux/ipv6.h> 47#include <linux/slab.h> 48#include <net/checksum.h> 49#include <net/ip6_checksum.h> 50#include <linux/ethtool.h> 51#include <linux/if.h> 52#include <linux/if_vlan.h> 53#include <linux/prefetch.h> 54 55#include "ixgbevf.h" 56 57const char ixgbevf_driver_name[] = "ixgbevf"; 58static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61#define DRV_VERSION "2.7.12-k" 62const char ixgbevf_driver_version[] = DRV_VERSION; 63static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69}; 70 71/* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79static struct pci_device_id ixgbevf_pci_tbl[] = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 81 board_82599_vf}, 82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 83 board_X540_vf}, 84 85 /* required last entry */ 86 {0, } 87}; 88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 89 90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 92MODULE_LICENSE("GPL"); 93MODULE_VERSION(DRV_VERSION); 94 95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 96static int debug = -1; 97module_param(debug, int, 0); 98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99 100/* forward decls */ 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 103 104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 105 struct ixgbevf_ring *rx_ring, 106 u32 val) 107{ 108 /* 109 * Force memory writes to complete before letting h/w 110 * know there are new descriptors to fetch. (Only 111 * applicable for weak-ordered memory model archs, 112 * such as IA-64). 113 */ 114 wmb(); 115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 116} 117 118/** 119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 120 * @adapter: pointer to adapter struct 121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 122 * @queue: queue to map the corresponding interrupt to 123 * @msix_vector: the vector to map to the corresponding queue 124 */ 125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 126 u8 queue, u8 msix_vector) 127{ 128 u32 ivar, index; 129 struct ixgbe_hw *hw = &adapter->hw; 130 if (direction == -1) { 131 /* other causes */ 132 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 134 ivar &= ~0xFF; 135 ivar |= msix_vector; 136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 137 } else { 138 /* tx or rx causes */ 139 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 140 index = ((16 * (queue & 1)) + (8 * direction)); 141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 142 ivar &= ~(0xFF << index); 143 ivar |= (msix_vector << index); 144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 145 } 146} 147 148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 149 struct ixgbevf_tx_buffer 150 *tx_buffer_info) 151{ 152 if (tx_buffer_info->dma) { 153 if (tx_buffer_info->mapped_as_page) 154 dma_unmap_page(tx_ring->dev, 155 tx_buffer_info->dma, 156 tx_buffer_info->length, 157 DMA_TO_DEVICE); 158 else 159 dma_unmap_single(tx_ring->dev, 160 tx_buffer_info->dma, 161 tx_buffer_info->length, 162 DMA_TO_DEVICE); 163 tx_buffer_info->dma = 0; 164 } 165 if (tx_buffer_info->skb) { 166 dev_kfree_skb_any(tx_buffer_info->skb); 167 tx_buffer_info->skb = NULL; 168 } 169 tx_buffer_info->time_stamp = 0; 170 /* tx_buffer_info must be completely set up in the transmit path */ 171} 172 173#define IXGBE_MAX_TXD_PWR 14 174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 175 176/* Tx Descriptors needed, worst case */ 177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 178#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 179 180static void ixgbevf_tx_timeout(struct net_device *netdev); 181 182/** 183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 184 * @q_vector: board private structure 185 * @tx_ring: tx ring to clean 186 **/ 187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 188 struct ixgbevf_ring *tx_ring) 189{ 190 struct ixgbevf_adapter *adapter = q_vector->adapter; 191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 192 struct ixgbevf_tx_buffer *tx_buffer_info; 193 unsigned int i, eop, count = 0; 194 unsigned int total_bytes = 0, total_packets = 0; 195 196 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 197 return true; 198 199 i = tx_ring->next_to_clean; 200 eop = tx_ring->tx_buffer_info[i].next_to_watch; 201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 202 203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 204 (count < tx_ring->count)) { 205 bool cleaned = false; 206 rmb(); /* read buffer_info after eop_desc */ 207 /* eop could change between read and DD-check */ 208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 209 goto cont_loop; 210 for ( ; !cleaned; count++) { 211 struct sk_buff *skb; 212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 213 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 214 cleaned = (i == eop); 215 skb = tx_buffer_info->skb; 216 217 if (cleaned && skb) { 218 unsigned int segs, bytecount; 219 220 /* gso_segs is currently only valid for tcp */ 221 segs = skb_shinfo(skb)->gso_segs ?: 1; 222 /* multiply data chunks by size of headers */ 223 bytecount = ((segs - 1) * skb_headlen(skb)) + 224 skb->len; 225 total_packets += segs; 226 total_bytes += bytecount; 227 } 228 229 ixgbevf_unmap_and_free_tx_resource(tx_ring, 230 tx_buffer_info); 231 232 tx_desc->wb.status = 0; 233 234 i++; 235 if (i == tx_ring->count) 236 i = 0; 237 } 238 239cont_loop: 240 eop = tx_ring->tx_buffer_info[i].next_to_watch; 241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 242 } 243 244 tx_ring->next_to_clean = i; 245 246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 249 /* Make sure that anybody stopping the queue after this 250 * sees the new next_to_clean. 251 */ 252 smp_mb(); 253 if (__netif_subqueue_stopped(tx_ring->netdev, 254 tx_ring->queue_index) && 255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 256 netif_wake_subqueue(tx_ring->netdev, 257 tx_ring->queue_index); 258 ++adapter->restart_queue; 259 } 260 } 261 262 u64_stats_update_begin(&tx_ring->syncp); 263 tx_ring->total_bytes += total_bytes; 264 tx_ring->total_packets += total_packets; 265 u64_stats_update_end(&tx_ring->syncp); 266 q_vector->tx.total_bytes += total_bytes; 267 q_vector->tx.total_packets += total_packets; 268 269 return count < tx_ring->count; 270} 271 272/** 273 * ixgbevf_receive_skb - Send a completed packet up the stack 274 * @q_vector: structure containing interrupt and ring information 275 * @skb: packet to send up 276 * @status: hardware indication of status of receive 277 * @rx_desc: rx descriptor 278 **/ 279static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 280 struct sk_buff *skb, u8 status, 281 union ixgbe_adv_rx_desc *rx_desc) 282{ 283 struct ixgbevf_adapter *adapter = q_vector->adapter; 284 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 286 287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 288 __vlan_hwaccel_put_tag(skb, tag); 289 290 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 291 napi_gro_receive(&q_vector->napi, skb); 292 else 293 netif_rx(skb); 294} 295 296/** 297 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 298 * @ring: pointer to Rx descriptor ring structure 299 * @status_err: hardware indication of status of receive 300 * @skb: skb currently being received and modified 301 **/ 302static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 303 u32 status_err, struct sk_buff *skb) 304{ 305 skb_checksum_none_assert(skb); 306 307 /* Rx csum disabled */ 308 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 309 return; 310 311 /* if IP and error */ 312 if ((status_err & IXGBE_RXD_STAT_IPCS) && 313 (status_err & IXGBE_RXDADV_ERR_IPE)) { 314 ring->hw_csum_rx_error++; 315 return; 316 } 317 318 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 319 return; 320 321 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 322 ring->hw_csum_rx_error++; 323 return; 324 } 325 326 /* It must be a TCP or UDP packet with a valid checksum */ 327 skb->ip_summed = CHECKSUM_UNNECESSARY; 328 ring->hw_csum_rx_good++; 329} 330 331/** 332 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 333 * @adapter: address of board private structure 334 **/ 335static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 336 struct ixgbevf_ring *rx_ring, 337 int cleaned_count) 338{ 339 struct pci_dev *pdev = adapter->pdev; 340 union ixgbe_adv_rx_desc *rx_desc; 341 struct ixgbevf_rx_buffer *bi; 342 unsigned int i = rx_ring->next_to_use; 343 344 bi = &rx_ring->rx_buffer_info[i]; 345 346 while (cleaned_count--) { 347 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 348 349 if (!bi->skb) { 350 struct sk_buff *skb; 351 352 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 353 rx_ring->rx_buf_len); 354 if (!skb) { 355 adapter->alloc_rx_buff_failed++; 356 goto no_buffers; 357 } 358 bi->skb = skb; 359 360 bi->dma = dma_map_single(&pdev->dev, skb->data, 361 rx_ring->rx_buf_len, 362 DMA_FROM_DEVICE); 363 if (dma_mapping_error(&pdev->dev, bi->dma)) { 364 dev_kfree_skb(skb); 365 bi->skb = NULL; 366 dev_err(&pdev->dev, "RX DMA map failed\n"); 367 break; 368 } 369 } 370 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 371 372 i++; 373 if (i == rx_ring->count) 374 i = 0; 375 bi = &rx_ring->rx_buffer_info[i]; 376 } 377 378no_buffers: 379 if (rx_ring->next_to_use != i) { 380 rx_ring->next_to_use = i; 381 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 382 } 383} 384 385static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 386 u32 qmask) 387{ 388 struct ixgbe_hw *hw = &adapter->hw; 389 390 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 391} 392 393static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 394 struct ixgbevf_ring *rx_ring, 395 int budget) 396{ 397 struct ixgbevf_adapter *adapter = q_vector->adapter; 398 struct pci_dev *pdev = adapter->pdev; 399 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 400 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 401 struct sk_buff *skb; 402 unsigned int i; 403 u32 len, staterr; 404 int cleaned_count = 0; 405 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 406 407 i = rx_ring->next_to_clean; 408 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 409 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 410 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 411 412 while (staterr & IXGBE_RXD_STAT_DD) { 413 if (!budget) 414 break; 415 budget--; 416 417 rmb(); /* read descriptor and rx_buffer_info after status DD */ 418 len = le16_to_cpu(rx_desc->wb.upper.length); 419 skb = rx_buffer_info->skb; 420 prefetch(skb->data - NET_IP_ALIGN); 421 rx_buffer_info->skb = NULL; 422 423 if (rx_buffer_info->dma) { 424 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 425 rx_ring->rx_buf_len, 426 DMA_FROM_DEVICE); 427 rx_buffer_info->dma = 0; 428 skb_put(skb, len); 429 } 430 431 i++; 432 if (i == rx_ring->count) 433 i = 0; 434 435 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 436 prefetch(next_rxd); 437 cleaned_count++; 438 439 next_buffer = &rx_ring->rx_buffer_info[i]; 440 441 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 442 skb->next = next_buffer->skb; 443 IXGBE_CB(skb->next)->prev = skb; 444 adapter->non_eop_descs++; 445 goto next_desc; 446 } 447 448 /* we should not be chaining buffers, if we did drop the skb */ 449 if (IXGBE_CB(skb)->prev) { 450 do { 451 struct sk_buff *this = skb; 452 skb = IXGBE_CB(skb)->prev; 453 dev_kfree_skb(this); 454 } while (skb); 455 goto next_desc; 456 } 457 458 /* ERR_MASK will only have valid bits if EOP set */ 459 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 460 dev_kfree_skb_irq(skb); 461 goto next_desc; 462 } 463 464 ixgbevf_rx_checksum(rx_ring, staterr, skb); 465 466 /* probably a little skewed due to removing CRC */ 467 total_rx_bytes += skb->len; 468 total_rx_packets++; 469 470 /* 471 * Work around issue of some types of VM to VM loop back 472 * packets not getting split correctly 473 */ 474 if (staterr & IXGBE_RXD_STAT_LB) { 475 u32 header_fixup_len = skb_headlen(skb); 476 if (header_fixup_len < 14) 477 skb_push(skb, header_fixup_len); 478 } 479 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 480 481 /* Workaround hardware that can't do proper VEPA multicast 482 * source pruning. 483 */ 484 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 485 !(compare_ether_addr(adapter->netdev->dev_addr, 486 eth_hdr(skb)->h_source))) { 487 dev_kfree_skb_irq(skb); 488 goto next_desc; 489 } 490 491 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 492 493next_desc: 494 rx_desc->wb.upper.status_error = 0; 495 496 /* return some buffers to hardware, one at a time is too slow */ 497 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 498 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 499 cleaned_count); 500 cleaned_count = 0; 501 } 502 503 /* use prefetched values */ 504 rx_desc = next_rxd; 505 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 506 507 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 508 } 509 510 rx_ring->next_to_clean = i; 511 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 512 513 if (cleaned_count) 514 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 515 516 u64_stats_update_begin(&rx_ring->syncp); 517 rx_ring->total_packets += total_rx_packets; 518 rx_ring->total_bytes += total_rx_bytes; 519 u64_stats_update_end(&rx_ring->syncp); 520 q_vector->rx.total_packets += total_rx_packets; 521 q_vector->rx.total_bytes += total_rx_bytes; 522 523 return !!budget; 524} 525 526/** 527 * ixgbevf_poll - NAPI polling calback 528 * @napi: napi struct with our devices info in it 529 * @budget: amount of work driver is allowed to do this pass, in packets 530 * 531 * This function will clean more than one or more rings associated with a 532 * q_vector. 533 **/ 534static int ixgbevf_poll(struct napi_struct *napi, int budget) 535{ 536 struct ixgbevf_q_vector *q_vector = 537 container_of(napi, struct ixgbevf_q_vector, napi); 538 struct ixgbevf_adapter *adapter = q_vector->adapter; 539 struct ixgbevf_ring *ring; 540 int per_ring_budget; 541 bool clean_complete = true; 542 543 ixgbevf_for_each_ring(ring, q_vector->tx) 544 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 545 546 /* attempt to distribute budget to each queue fairly, but don't allow 547 * the budget to go below 1 because we'll exit polling */ 548 if (q_vector->rx.count > 1) 549 per_ring_budget = max(budget/q_vector->rx.count, 1); 550 else 551 per_ring_budget = budget; 552 553 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 554 ixgbevf_for_each_ring(ring, q_vector->rx) 555 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 556 per_ring_budget); 557 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 558 559 /* If all work not completed, return budget and keep polling */ 560 if (!clean_complete) 561 return budget; 562 /* all work done, exit the polling mode */ 563 napi_complete(napi); 564 if (adapter->rx_itr_setting & 1) 565 ixgbevf_set_itr(q_vector); 566 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 567 ixgbevf_irq_enable_queues(adapter, 568 1 << q_vector->v_idx); 569 570 return 0; 571} 572 573/** 574 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 575 * @q_vector: structure containing interrupt and ring information 576 */ 577static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 578{ 579 struct ixgbevf_adapter *adapter = q_vector->adapter; 580 struct ixgbe_hw *hw = &adapter->hw; 581 int v_idx = q_vector->v_idx; 582 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 583 584 /* 585 * set the WDIS bit to not clear the timer bits and cause an 586 * immediate assertion of the interrupt 587 */ 588 itr_reg |= IXGBE_EITR_CNT_WDIS; 589 590 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 591} 592 593/** 594 * ixgbevf_configure_msix - Configure MSI-X hardware 595 * @adapter: board private structure 596 * 597 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 598 * interrupts. 599 **/ 600static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 601{ 602 struct ixgbevf_q_vector *q_vector; 603 int q_vectors, v_idx; 604 605 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 606 adapter->eims_enable_mask = 0; 607 608 /* 609 * Populate the IVAR table and set the ITR values to the 610 * corresponding register. 611 */ 612 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 613 struct ixgbevf_ring *ring; 614 q_vector = adapter->q_vector[v_idx]; 615 616 ixgbevf_for_each_ring(ring, q_vector->rx) 617 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 618 619 ixgbevf_for_each_ring(ring, q_vector->tx) 620 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 621 622 if (q_vector->tx.ring && !q_vector->rx.ring) { 623 /* tx only vector */ 624 if (adapter->tx_itr_setting == 1) 625 q_vector->itr = IXGBE_10K_ITR; 626 else 627 q_vector->itr = adapter->tx_itr_setting; 628 } else { 629 /* rx or rx/tx vector */ 630 if (adapter->rx_itr_setting == 1) 631 q_vector->itr = IXGBE_20K_ITR; 632 else 633 q_vector->itr = adapter->rx_itr_setting; 634 } 635 636 /* add q_vector eims value to global eims_enable_mask */ 637 adapter->eims_enable_mask |= 1 << v_idx; 638 639 ixgbevf_write_eitr(q_vector); 640 } 641 642 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 643 /* setup eims_other and add value to global eims_enable_mask */ 644 adapter->eims_other = 1 << v_idx; 645 adapter->eims_enable_mask |= adapter->eims_other; 646} 647 648enum latency_range { 649 lowest_latency = 0, 650 low_latency = 1, 651 bulk_latency = 2, 652 latency_invalid = 255 653}; 654 655/** 656 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 657 * @q_vector: structure containing interrupt and ring information 658 * @ring_container: structure containing ring performance data 659 * 660 * Stores a new ITR value based on packets and byte 661 * counts during the last interrupt. The advantage of per interrupt 662 * computation is faster updates and more accurate ITR for the current 663 * traffic pattern. Constants in this function were computed 664 * based on theoretical maximum wire speed and thresholds were set based 665 * on testing data as well as attempting to minimize response time 666 * while increasing bulk throughput. 667 **/ 668static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 669 struct ixgbevf_ring_container *ring_container) 670{ 671 int bytes = ring_container->total_bytes; 672 int packets = ring_container->total_packets; 673 u32 timepassed_us; 674 u64 bytes_perint; 675 u8 itr_setting = ring_container->itr; 676 677 if (packets == 0) 678 return; 679 680 /* simple throttlerate management 681 * 0-20MB/s lowest (100000 ints/s) 682 * 20-100MB/s low (20000 ints/s) 683 * 100-1249MB/s bulk (8000 ints/s) 684 */ 685 /* what was last interrupt timeslice? */ 686 timepassed_us = q_vector->itr >> 2; 687 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 688 689 switch (itr_setting) { 690 case lowest_latency: 691 if (bytes_perint > 10) 692 itr_setting = low_latency; 693 break; 694 case low_latency: 695 if (bytes_perint > 20) 696 itr_setting = bulk_latency; 697 else if (bytes_perint <= 10) 698 itr_setting = lowest_latency; 699 break; 700 case bulk_latency: 701 if (bytes_perint <= 20) 702 itr_setting = low_latency; 703 break; 704 } 705 706 /* clear work counters since we have the values we need */ 707 ring_container->total_bytes = 0; 708 ring_container->total_packets = 0; 709 710 /* write updated itr to ring container */ 711 ring_container->itr = itr_setting; 712} 713 714static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 715{ 716 u32 new_itr = q_vector->itr; 717 u8 current_itr; 718 719 ixgbevf_update_itr(q_vector, &q_vector->tx); 720 ixgbevf_update_itr(q_vector, &q_vector->rx); 721 722 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 723 724 switch (current_itr) { 725 /* counts and packets in update_itr are dependent on these numbers */ 726 case lowest_latency: 727 new_itr = IXGBE_100K_ITR; 728 break; 729 case low_latency: 730 new_itr = IXGBE_20K_ITR; 731 break; 732 case bulk_latency: 733 default: 734 new_itr = IXGBE_8K_ITR; 735 break; 736 } 737 738 if (new_itr != q_vector->itr) { 739 /* do an exponential smoothing */ 740 new_itr = (10 * new_itr * q_vector->itr) / 741 ((9 * new_itr) + q_vector->itr); 742 743 /* save the algorithm value here */ 744 q_vector->itr = new_itr; 745 746 ixgbevf_write_eitr(q_vector); 747 } 748} 749 750static irqreturn_t ixgbevf_msix_other(int irq, void *data) 751{ 752 struct ixgbevf_adapter *adapter = data; 753 struct pci_dev *pdev = adapter->pdev; 754 struct ixgbe_hw *hw = &adapter->hw; 755 u32 msg; 756 bool got_ack = false; 757 758 hw->mac.get_link_status = 1; 759 if (!hw->mbx.ops.check_for_ack(hw)) 760 got_ack = true; 761 762 if (!hw->mbx.ops.check_for_msg(hw)) { 763 hw->mbx.ops.read(hw, &msg, 1); 764 765 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) { 766 mod_timer(&adapter->watchdog_timer, 767 round_jiffies(jiffies + 1)); 768 adapter->link_up = false; 769 } 770 771 if (msg & IXGBE_VT_MSGTYPE_NACK) 772 dev_info(&pdev->dev, 773 "Last Request of type %2.2x to PF Nacked\n", 774 msg & 0xFF); 775 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; 776 } 777 778 /* checking for the ack clears the PFACK bit. Place 779 * it back in the v2p_mailbox cache so that anyone 780 * polling for an ack will not miss it 781 */ 782 if (got_ack) 783 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 784 785 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 786 787 return IRQ_HANDLED; 788} 789 790/** 791 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 792 * @irq: unused 793 * @data: pointer to our q_vector struct for this interrupt vector 794 **/ 795static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 796{ 797 struct ixgbevf_q_vector *q_vector = data; 798 799 /* EIAM disabled interrupts (on this vector) for us */ 800 if (q_vector->rx.ring || q_vector->tx.ring) 801 napi_schedule(&q_vector->napi); 802 803 return IRQ_HANDLED; 804} 805 806static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 807 int r_idx) 808{ 809 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 810 811 a->rx_ring[r_idx].next = q_vector->rx.ring; 812 q_vector->rx.ring = &a->rx_ring[r_idx]; 813 q_vector->rx.count++; 814} 815 816static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 817 int t_idx) 818{ 819 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 820 821 a->tx_ring[t_idx].next = q_vector->tx.ring; 822 q_vector->tx.ring = &a->tx_ring[t_idx]; 823 q_vector->tx.count++; 824} 825 826/** 827 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 828 * @adapter: board private structure to initialize 829 * 830 * This function maps descriptor rings to the queue-specific vectors 831 * we were allotted through the MSI-X enabling code. Ideally, we'd have 832 * one vector per ring/queue, but on a constrained vector budget, we 833 * group the rings as "efficiently" as possible. You would add new 834 * mapping configurations in here. 835 **/ 836static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 837{ 838 int q_vectors; 839 int v_start = 0; 840 int rxr_idx = 0, txr_idx = 0; 841 int rxr_remaining = adapter->num_rx_queues; 842 int txr_remaining = adapter->num_tx_queues; 843 int i, j; 844 int rqpv, tqpv; 845 int err = 0; 846 847 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 848 849 /* 850 * The ideal configuration... 851 * We have enough vectors to map one per queue. 852 */ 853 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 854 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 855 map_vector_to_rxq(adapter, v_start, rxr_idx); 856 857 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 858 map_vector_to_txq(adapter, v_start, txr_idx); 859 goto out; 860 } 861 862 /* 863 * If we don't have enough vectors for a 1-to-1 864 * mapping, we'll have to group them so there are 865 * multiple queues per vector. 866 */ 867 /* Re-adjusting *qpv takes care of the remainder. */ 868 for (i = v_start; i < q_vectors; i++) { 869 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 870 for (j = 0; j < rqpv; j++) { 871 map_vector_to_rxq(adapter, i, rxr_idx); 872 rxr_idx++; 873 rxr_remaining--; 874 } 875 } 876 for (i = v_start; i < q_vectors; i++) { 877 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 878 for (j = 0; j < tqpv; j++) { 879 map_vector_to_txq(adapter, i, txr_idx); 880 txr_idx++; 881 txr_remaining--; 882 } 883 } 884 885out: 886 return err; 887} 888 889/** 890 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 891 * @adapter: board private structure 892 * 893 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 894 * interrupts from the kernel. 895 **/ 896static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 897{ 898 struct net_device *netdev = adapter->netdev; 899 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 900 int vector, err; 901 int ri = 0, ti = 0; 902 903 for (vector = 0; vector < q_vectors; vector++) { 904 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 905 struct msix_entry *entry = &adapter->msix_entries[vector]; 906 907 if (q_vector->tx.ring && q_vector->rx.ring) { 908 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 909 "%s-%s-%d", netdev->name, "TxRx", ri++); 910 ti++; 911 } else if (q_vector->rx.ring) { 912 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 913 "%s-%s-%d", netdev->name, "rx", ri++); 914 } else if (q_vector->tx.ring) { 915 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 916 "%s-%s-%d", netdev->name, "tx", ti++); 917 } else { 918 /* skip this unused q_vector */ 919 continue; 920 } 921 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 922 q_vector->name, q_vector); 923 if (err) { 924 hw_dbg(&adapter->hw, 925 "request_irq failed for MSIX interrupt " 926 "Error: %d\n", err); 927 goto free_queue_irqs; 928 } 929 } 930 931 err = request_irq(adapter->msix_entries[vector].vector, 932 &ixgbevf_msix_other, 0, netdev->name, adapter); 933 if (err) { 934 hw_dbg(&adapter->hw, 935 "request_irq for msix_other failed: %d\n", err); 936 goto free_queue_irqs; 937 } 938 939 return 0; 940 941free_queue_irqs: 942 while (vector) { 943 vector--; 944 free_irq(adapter->msix_entries[vector].vector, 945 adapter->q_vector[vector]); 946 } 947 pci_disable_msix(adapter->pdev); 948 kfree(adapter->msix_entries); 949 adapter->msix_entries = NULL; 950 return err; 951} 952 953static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 954{ 955 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 956 957 for (i = 0; i < q_vectors; i++) { 958 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 959 q_vector->rx.ring = NULL; 960 q_vector->tx.ring = NULL; 961 q_vector->rx.count = 0; 962 q_vector->tx.count = 0; 963 } 964} 965 966/** 967 * ixgbevf_request_irq - initialize interrupts 968 * @adapter: board private structure 969 * 970 * Attempts to configure interrupts using the best available 971 * capabilities of the hardware and kernel. 972 **/ 973static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 974{ 975 int err = 0; 976 977 err = ixgbevf_request_msix_irqs(adapter); 978 979 if (err) 980 hw_dbg(&adapter->hw, 981 "request_irq failed, Error %d\n", err); 982 983 return err; 984} 985 986static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 987{ 988 int i, q_vectors; 989 990 q_vectors = adapter->num_msix_vectors; 991 i = q_vectors - 1; 992 993 free_irq(adapter->msix_entries[i].vector, adapter); 994 i--; 995 996 for (; i >= 0; i--) { 997 /* free only the irqs that were actually requested */ 998 if (!adapter->q_vector[i]->rx.ring && 999 !adapter->q_vector[i]->tx.ring) 1000 continue; 1001 1002 free_irq(adapter->msix_entries[i].vector, 1003 adapter->q_vector[i]); 1004 } 1005 1006 ixgbevf_reset_q_vectors(adapter); 1007} 1008 1009/** 1010 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1011 * @adapter: board private structure 1012 **/ 1013static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1014{ 1015 struct ixgbe_hw *hw = &adapter->hw; 1016 int i; 1017 1018 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 1019 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1020 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1021 1022 IXGBE_WRITE_FLUSH(hw); 1023 1024 for (i = 0; i < adapter->num_msix_vectors; i++) 1025 synchronize_irq(adapter->msix_entries[i].vector); 1026} 1027 1028/** 1029 * ixgbevf_irq_enable - Enable default interrupt generation settings 1030 * @adapter: board private structure 1031 **/ 1032static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1033{ 1034 struct ixgbe_hw *hw = &adapter->hw; 1035 1036 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1037 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1038 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1039} 1040 1041/** 1042 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1043 * @adapter: board private structure 1044 * 1045 * Configure the Tx unit of the MAC after a reset. 1046 **/ 1047static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1048{ 1049 u64 tdba; 1050 struct ixgbe_hw *hw = &adapter->hw; 1051 u32 i, j, tdlen, txctrl; 1052 1053 /* Setup the HW Tx Head and Tail descriptor pointers */ 1054 for (i = 0; i < adapter->num_tx_queues; i++) { 1055 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1056 j = ring->reg_idx; 1057 tdba = ring->dma; 1058 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1059 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1060 (tdba & DMA_BIT_MASK(32))); 1061 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1062 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1063 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1064 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1065 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1066 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1067 /* Disable Tx Head Writeback RO bit, since this hoses 1068 * bookkeeping if things aren't delivered in order. 1069 */ 1070 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1071 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1072 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1073 } 1074} 1075 1076#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1077 1078static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1079{ 1080 struct ixgbevf_ring *rx_ring; 1081 struct ixgbe_hw *hw = &adapter->hw; 1082 u32 srrctl; 1083 1084 rx_ring = &adapter->rx_ring[index]; 1085 1086 srrctl = IXGBE_SRRCTL_DROP_EN; 1087 1088 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1089 1090 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1091 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1092 1093 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1094} 1095 1096static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1097{ 1098 struct ixgbe_hw *hw = &adapter->hw; 1099 struct net_device *netdev = adapter->netdev; 1100 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1101 int i; 1102 u16 rx_buf_len; 1103 1104 /* notify the PF of our intent to use this size of frame */ 1105 ixgbevf_rlpml_set_vf(hw, max_frame); 1106 1107 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1108 max_frame += VLAN_HLEN; 1109 1110 /* 1111 * Allocate buffer sizes that fit well into 32K and 1112 * take into account max frame size of 9.5K 1113 */ 1114 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1115 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1116 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1117 else if (max_frame <= IXGBEVF_RXBUFFER_2K) 1118 rx_buf_len = IXGBEVF_RXBUFFER_2K; 1119 else if (max_frame <= IXGBEVF_RXBUFFER_4K) 1120 rx_buf_len = IXGBEVF_RXBUFFER_4K; 1121 else if (max_frame <= IXGBEVF_RXBUFFER_8K) 1122 rx_buf_len = IXGBEVF_RXBUFFER_8K; 1123 else 1124 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1125 1126 for (i = 0; i < adapter->num_rx_queues; i++) 1127 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1128} 1129 1130/** 1131 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1132 * @adapter: board private structure 1133 * 1134 * Configure the Rx unit of the MAC after a reset. 1135 **/ 1136static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1137{ 1138 u64 rdba; 1139 struct ixgbe_hw *hw = &adapter->hw; 1140 int i, j; 1141 u32 rdlen; 1142 1143 /* PSRTYPE must be initialized in 82599 */ 1144 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1145 1146 /* set_rx_buffer_len must be called before ring initialization */ 1147 ixgbevf_set_rx_buffer_len(adapter); 1148 1149 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1150 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1151 * the Base and Length of the Rx Descriptor Ring */ 1152 for (i = 0; i < adapter->num_rx_queues; i++) { 1153 rdba = adapter->rx_ring[i].dma; 1154 j = adapter->rx_ring[i].reg_idx; 1155 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1156 (rdba & DMA_BIT_MASK(32))); 1157 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1158 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1159 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1160 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1161 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1162 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1163 1164 ixgbevf_configure_srrctl(adapter, j); 1165 } 1166} 1167 1168static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1169{ 1170 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1171 struct ixgbe_hw *hw = &adapter->hw; 1172 int err; 1173 1174 spin_lock_bh(&adapter->mbx_lock); 1175 1176 /* add VID to filter table */ 1177 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1178 1179 spin_unlock_bh(&adapter->mbx_lock); 1180 1181 /* translate error return types so error makes sense */ 1182 if (err == IXGBE_ERR_MBX) 1183 return -EIO; 1184 1185 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1186 return -EACCES; 1187 1188 set_bit(vid, adapter->active_vlans); 1189 1190 return err; 1191} 1192 1193static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1194{ 1195 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1196 struct ixgbe_hw *hw = &adapter->hw; 1197 int err = -EOPNOTSUPP; 1198 1199 spin_lock_bh(&adapter->mbx_lock); 1200 1201 /* remove VID from filter table */ 1202 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1203 1204 spin_unlock_bh(&adapter->mbx_lock); 1205 1206 clear_bit(vid, adapter->active_vlans); 1207 1208 return err; 1209} 1210 1211static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1212{ 1213 u16 vid; 1214 1215 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1216 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1217} 1218 1219static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1220{ 1221 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1222 struct ixgbe_hw *hw = &adapter->hw; 1223 int count = 0; 1224 1225 if ((netdev_uc_count(netdev)) > 10) { 1226 pr_err("Too many unicast filters - No Space\n"); 1227 return -ENOSPC; 1228 } 1229 1230 if (!netdev_uc_empty(netdev)) { 1231 struct netdev_hw_addr *ha; 1232 netdev_for_each_uc_addr(ha, netdev) { 1233 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1234 udelay(200); 1235 } 1236 } else { 1237 /* 1238 * If the list is empty then send message to PF driver to 1239 * clear all macvlans on this VF. 1240 */ 1241 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1242 } 1243 1244 return count; 1245} 1246 1247/** 1248 * ixgbevf_set_rx_mode - Multicast and unicast set 1249 * @netdev: network interface device structure 1250 * 1251 * The set_rx_method entry point is called whenever the multicast address 1252 * list, unicast address list or the network interface flags are updated. 1253 * This routine is responsible for configuring the hardware for proper 1254 * multicast mode and configuring requested unicast filters. 1255 **/ 1256static void ixgbevf_set_rx_mode(struct net_device *netdev) 1257{ 1258 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1259 struct ixgbe_hw *hw = &adapter->hw; 1260 1261 spin_lock_bh(&adapter->mbx_lock); 1262 1263 /* reprogram multicast list */ 1264 hw->mac.ops.update_mc_addr_list(hw, netdev); 1265 1266 ixgbevf_write_uc_addr_list(netdev); 1267 1268 spin_unlock_bh(&adapter->mbx_lock); 1269} 1270 1271static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1272{ 1273 int q_idx; 1274 struct ixgbevf_q_vector *q_vector; 1275 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1276 1277 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1278 q_vector = adapter->q_vector[q_idx]; 1279 napi_enable(&q_vector->napi); 1280 } 1281} 1282 1283static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1284{ 1285 int q_idx; 1286 struct ixgbevf_q_vector *q_vector; 1287 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1288 1289 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1290 q_vector = adapter->q_vector[q_idx]; 1291 napi_disable(&q_vector->napi); 1292 } 1293} 1294 1295static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1296{ 1297 struct net_device *netdev = adapter->netdev; 1298 int i; 1299 1300 ixgbevf_set_rx_mode(netdev); 1301 1302 ixgbevf_restore_vlan(adapter); 1303 1304 ixgbevf_configure_tx(adapter); 1305 ixgbevf_configure_rx(adapter); 1306 for (i = 0; i < adapter->num_rx_queues; i++) { 1307 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1308 ixgbevf_alloc_rx_buffers(adapter, ring, 1309 IXGBE_DESC_UNUSED(ring)); 1310 } 1311} 1312 1313#define IXGBE_MAX_RX_DESC_POLL 10 1314static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1315 int rxr) 1316{ 1317 struct ixgbe_hw *hw = &adapter->hw; 1318 int j = adapter->rx_ring[rxr].reg_idx; 1319 int k; 1320 1321 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1322 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1323 break; 1324 else 1325 msleep(1); 1326 } 1327 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1328 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1329 "not set within the polling period\n", rxr); 1330 } 1331 1332 ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], 1333 adapter->rx_ring[rxr].count - 1); 1334} 1335 1336static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1337{ 1338 /* Only save pre-reset stats if there are some */ 1339 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1340 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1341 adapter->stats.base_vfgprc; 1342 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1343 adapter->stats.base_vfgptc; 1344 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1345 adapter->stats.base_vfgorc; 1346 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1347 adapter->stats.base_vfgotc; 1348 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1349 adapter->stats.base_vfmprc; 1350 } 1351} 1352 1353static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1354{ 1355 struct ixgbe_hw *hw = &adapter->hw; 1356 1357 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1358 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1359 adapter->stats.last_vfgorc |= 1360 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1361 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1362 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1363 adapter->stats.last_vfgotc |= 1364 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1365 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1366 1367 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1368 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1369 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1370 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1371 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1372} 1373 1374static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1375{ 1376 struct ixgbe_hw *hw = &adapter->hw; 1377 int api[] = { ixgbe_mbox_api_11, 1378 ixgbe_mbox_api_10, 1379 ixgbe_mbox_api_unknown }; 1380 int err = 0, idx = 0; 1381 1382 spin_lock_bh(&adapter->mbx_lock); 1383 1384 while (api[idx] != ixgbe_mbox_api_unknown) { 1385 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1386 if (!err) 1387 break; 1388 idx++; 1389 } 1390 1391 spin_unlock_bh(&adapter->mbx_lock); 1392} 1393 1394static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1395{ 1396 struct net_device *netdev = adapter->netdev; 1397 struct ixgbe_hw *hw = &adapter->hw; 1398 int i, j = 0; 1399 int num_rx_rings = adapter->num_rx_queues; 1400 u32 txdctl, rxdctl; 1401 1402 for (i = 0; i < adapter->num_tx_queues; i++) { 1403 j = adapter->tx_ring[i].reg_idx; 1404 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1405 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1406 txdctl |= (8 << 16); 1407 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1408 } 1409 1410 for (i = 0; i < adapter->num_tx_queues; i++) { 1411 j = adapter->tx_ring[i].reg_idx; 1412 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1413 txdctl |= IXGBE_TXDCTL_ENABLE; 1414 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1415 } 1416 1417 for (i = 0; i < num_rx_rings; i++) { 1418 j = adapter->rx_ring[i].reg_idx; 1419 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1420 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1421 if (hw->mac.type == ixgbe_mac_X540_vf) { 1422 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1423 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1424 IXGBE_RXDCTL_RLPML_EN); 1425 } 1426 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1427 ixgbevf_rx_desc_queue_enable(adapter, i); 1428 } 1429 1430 ixgbevf_configure_msix(adapter); 1431 1432 spin_lock_bh(&adapter->mbx_lock); 1433 1434 if (is_valid_ether_addr(hw->mac.addr)) 1435 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1436 else 1437 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1438 1439 spin_unlock_bh(&adapter->mbx_lock); 1440 1441 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1442 ixgbevf_napi_enable_all(adapter); 1443 1444 /* enable transmits */ 1445 netif_tx_start_all_queues(netdev); 1446 1447 ixgbevf_save_reset_stats(adapter); 1448 ixgbevf_init_last_counter_stats(adapter); 1449 1450 hw->mac.get_link_status = 1; 1451 mod_timer(&adapter->watchdog_timer, jiffies); 1452} 1453 1454static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) 1455{ 1456 struct ixgbe_hw *hw = &adapter->hw; 1457 struct ixgbevf_ring *rx_ring; 1458 unsigned int def_q = 0; 1459 unsigned int num_tcs = 0; 1460 unsigned int num_rx_queues = 1; 1461 int err, i; 1462 1463 spin_lock_bh(&adapter->mbx_lock); 1464 1465 /* fetch queue configuration from the PF */ 1466 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1467 1468 spin_unlock_bh(&adapter->mbx_lock); 1469 1470 if (err) 1471 return err; 1472 1473 if (num_tcs > 1) { 1474 /* update default Tx ring register index */ 1475 adapter->tx_ring[0].reg_idx = def_q; 1476 1477 /* we need as many queues as traffic classes */ 1478 num_rx_queues = num_tcs; 1479 } 1480 1481 /* nothing to do if we have the correct number of queues */ 1482 if (adapter->num_rx_queues == num_rx_queues) 1483 return 0; 1484 1485 /* allocate new rings */ 1486 rx_ring = kcalloc(num_rx_queues, 1487 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1488 if (!rx_ring) 1489 return -ENOMEM; 1490 1491 /* setup ring fields */ 1492 for (i = 0; i < num_rx_queues; i++) { 1493 rx_ring[i].count = adapter->rx_ring_count; 1494 rx_ring[i].queue_index = i; 1495 rx_ring[i].reg_idx = i; 1496 rx_ring[i].dev = &adapter->pdev->dev; 1497 rx_ring[i].netdev = adapter->netdev; 1498 1499 /* allocate resources on the ring */ 1500 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 1501 if (err) { 1502 while (i) { 1503 i--; 1504 ixgbevf_free_rx_resources(adapter, &rx_ring[i]); 1505 } 1506 kfree(rx_ring); 1507 return err; 1508 } 1509 } 1510 1511 /* free the existing rings and queues */ 1512 ixgbevf_free_all_rx_resources(adapter); 1513 adapter->num_rx_queues = 0; 1514 kfree(adapter->rx_ring); 1515 1516 /* move new rings into position on the adapter struct */ 1517 adapter->rx_ring = rx_ring; 1518 adapter->num_rx_queues = num_rx_queues; 1519 1520 /* reset ring to vector mapping */ 1521 ixgbevf_reset_q_vectors(adapter); 1522 ixgbevf_map_rings_to_vectors(adapter); 1523 1524 return 0; 1525} 1526 1527void ixgbevf_up(struct ixgbevf_adapter *adapter) 1528{ 1529 struct ixgbe_hw *hw = &adapter->hw; 1530 1531 ixgbevf_negotiate_api(adapter); 1532 1533 ixgbevf_reset_queues(adapter); 1534 1535 ixgbevf_configure(adapter); 1536 1537 ixgbevf_up_complete(adapter); 1538 1539 /* clear any pending interrupts, may auto mask */ 1540 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1541 1542 ixgbevf_irq_enable(adapter); 1543} 1544 1545/** 1546 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1547 * @adapter: board private structure 1548 * @rx_ring: ring to free buffers from 1549 **/ 1550static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1551 struct ixgbevf_ring *rx_ring) 1552{ 1553 struct pci_dev *pdev = adapter->pdev; 1554 unsigned long size; 1555 unsigned int i; 1556 1557 if (!rx_ring->rx_buffer_info) 1558 return; 1559 1560 /* Free all the Rx ring sk_buffs */ 1561 for (i = 0; i < rx_ring->count; i++) { 1562 struct ixgbevf_rx_buffer *rx_buffer_info; 1563 1564 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1565 if (rx_buffer_info->dma) { 1566 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1567 rx_ring->rx_buf_len, 1568 DMA_FROM_DEVICE); 1569 rx_buffer_info->dma = 0; 1570 } 1571 if (rx_buffer_info->skb) { 1572 struct sk_buff *skb = rx_buffer_info->skb; 1573 rx_buffer_info->skb = NULL; 1574 do { 1575 struct sk_buff *this = skb; 1576 skb = IXGBE_CB(skb)->prev; 1577 dev_kfree_skb(this); 1578 } while (skb); 1579 } 1580 } 1581 1582 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1583 memset(rx_ring->rx_buffer_info, 0, size); 1584 1585 /* Zero out the descriptor ring */ 1586 memset(rx_ring->desc, 0, rx_ring->size); 1587 1588 rx_ring->next_to_clean = 0; 1589 rx_ring->next_to_use = 0; 1590 1591 if (rx_ring->head) 1592 writel(0, adapter->hw.hw_addr + rx_ring->head); 1593 if (rx_ring->tail) 1594 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1595} 1596 1597/** 1598 * ixgbevf_clean_tx_ring - Free Tx Buffers 1599 * @adapter: board private structure 1600 * @tx_ring: ring to be cleaned 1601 **/ 1602static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1603 struct ixgbevf_ring *tx_ring) 1604{ 1605 struct ixgbevf_tx_buffer *tx_buffer_info; 1606 unsigned long size; 1607 unsigned int i; 1608 1609 if (!tx_ring->tx_buffer_info) 1610 return; 1611 1612 /* Free all the Tx ring sk_buffs */ 1613 for (i = 0; i < tx_ring->count; i++) { 1614 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1615 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1616 } 1617 1618 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1619 memset(tx_ring->tx_buffer_info, 0, size); 1620 1621 memset(tx_ring->desc, 0, tx_ring->size); 1622 1623 tx_ring->next_to_use = 0; 1624 tx_ring->next_to_clean = 0; 1625 1626 if (tx_ring->head) 1627 writel(0, adapter->hw.hw_addr + tx_ring->head); 1628 if (tx_ring->tail) 1629 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1630} 1631 1632/** 1633 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1634 * @adapter: board private structure 1635 **/ 1636static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1637{ 1638 int i; 1639 1640 for (i = 0; i < adapter->num_rx_queues; i++) 1641 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1642} 1643 1644/** 1645 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1646 * @adapter: board private structure 1647 **/ 1648static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1649{ 1650 int i; 1651 1652 for (i = 0; i < adapter->num_tx_queues; i++) 1653 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1654} 1655 1656void ixgbevf_down(struct ixgbevf_adapter *adapter) 1657{ 1658 struct net_device *netdev = adapter->netdev; 1659 struct ixgbe_hw *hw = &adapter->hw; 1660 u32 txdctl; 1661 int i, j; 1662 1663 /* signal that we are down to the interrupt handler */ 1664 set_bit(__IXGBEVF_DOWN, &adapter->state); 1665 /* disable receives */ 1666 1667 netif_tx_disable(netdev); 1668 1669 msleep(10); 1670 1671 netif_tx_stop_all_queues(netdev); 1672 1673 ixgbevf_irq_disable(adapter); 1674 1675 ixgbevf_napi_disable_all(adapter); 1676 1677 del_timer_sync(&adapter->watchdog_timer); 1678 /* can't call flush scheduled work here because it can deadlock 1679 * if linkwatch_event tries to acquire the rtnl_lock which we are 1680 * holding */ 1681 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1682 msleep(1); 1683 1684 /* disable transmits in the hardware now that interrupts are off */ 1685 for (i = 0; i < adapter->num_tx_queues; i++) { 1686 j = adapter->tx_ring[i].reg_idx; 1687 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1688 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1689 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1690 } 1691 1692 netif_carrier_off(netdev); 1693 1694 if (!pci_channel_offline(adapter->pdev)) 1695 ixgbevf_reset(adapter); 1696 1697 ixgbevf_clean_all_tx_rings(adapter); 1698 ixgbevf_clean_all_rx_rings(adapter); 1699} 1700 1701void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1702{ 1703 WARN_ON(in_interrupt()); 1704 1705 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1706 msleep(1); 1707 1708 ixgbevf_down(adapter); 1709 ixgbevf_up(adapter); 1710 1711 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1712} 1713 1714void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1715{ 1716 struct ixgbe_hw *hw = &adapter->hw; 1717 struct net_device *netdev = adapter->netdev; 1718 1719 if (hw->mac.ops.reset_hw(hw)) 1720 hw_dbg(hw, "PF still resetting\n"); 1721 else 1722 hw->mac.ops.init_hw(hw); 1723 1724 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1725 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1726 netdev->addr_len); 1727 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1728 netdev->addr_len); 1729 } 1730} 1731 1732static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1733 int vectors) 1734{ 1735 int err = 0; 1736 int vector_threshold; 1737 1738 /* We'll want at least 2 (vector_threshold): 1739 * 1) TxQ[0] + RxQ[0] handler 1740 * 2) Other (Link Status Change, etc.) 1741 */ 1742 vector_threshold = MIN_MSIX_COUNT; 1743 1744 /* The more we get, the more we will assign to Tx/Rx Cleanup 1745 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1746 * Right now, we simply care about how many we'll get; we'll 1747 * set them up later while requesting irq's. 1748 */ 1749 while (vectors >= vector_threshold) { 1750 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1751 vectors); 1752 if (!err || err < 0) /* Success or a nasty failure. */ 1753 break; 1754 else /* err == number of vectors we should try again with */ 1755 vectors = err; 1756 } 1757 1758 if (vectors < vector_threshold) 1759 err = -ENOMEM; 1760 1761 if (err) { 1762 dev_err(&adapter->pdev->dev, 1763 "Unable to allocate MSI-X interrupts\n"); 1764 kfree(adapter->msix_entries); 1765 adapter->msix_entries = NULL; 1766 } else { 1767 /* 1768 * Adjust for only the vectors we'll use, which is minimum 1769 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1770 * vectors we were allocated. 1771 */ 1772 adapter->num_msix_vectors = vectors; 1773 } 1774 1775 return err; 1776} 1777 1778/** 1779 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1780 * @adapter: board private structure to initialize 1781 * 1782 * This is the top level queue allocation routine. The order here is very 1783 * important, starting with the "most" number of features turned on at once, 1784 * and ending with the smallest set of features. This way large combinations 1785 * can be allocated if they're turned on, and smaller combinations are the 1786 * fallthrough conditions. 1787 * 1788 **/ 1789static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1790{ 1791 /* Start with base case */ 1792 adapter->num_rx_queues = 1; 1793 adapter->num_tx_queues = 1; 1794} 1795 1796/** 1797 * ixgbevf_alloc_queues - Allocate memory for all rings 1798 * @adapter: board private structure to initialize 1799 * 1800 * We allocate one ring per queue at run-time since we don't know the 1801 * number of queues at compile-time. The polling_netdev array is 1802 * intended for Multiqueue, but should work fine with a single queue. 1803 **/ 1804static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1805{ 1806 int i; 1807 1808 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1809 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1810 if (!adapter->tx_ring) 1811 goto err_tx_ring_allocation; 1812 1813 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1814 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1815 if (!adapter->rx_ring) 1816 goto err_rx_ring_allocation; 1817 1818 for (i = 0; i < adapter->num_tx_queues; i++) { 1819 adapter->tx_ring[i].count = adapter->tx_ring_count; 1820 adapter->tx_ring[i].queue_index = i; 1821 /* reg_idx may be remapped later by DCB config */ 1822 adapter->tx_ring[i].reg_idx = i; 1823 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1824 adapter->tx_ring[i].netdev = adapter->netdev; 1825 } 1826 1827 for (i = 0; i < adapter->num_rx_queues; i++) { 1828 adapter->rx_ring[i].count = adapter->rx_ring_count; 1829 adapter->rx_ring[i].queue_index = i; 1830 adapter->rx_ring[i].reg_idx = i; 1831 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1832 adapter->rx_ring[i].netdev = adapter->netdev; 1833 } 1834 1835 return 0; 1836 1837err_rx_ring_allocation: 1838 kfree(adapter->tx_ring); 1839err_tx_ring_allocation: 1840 return -ENOMEM; 1841} 1842 1843/** 1844 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1845 * @adapter: board private structure to initialize 1846 * 1847 * Attempt to configure the interrupts using the best available 1848 * capabilities of the hardware and the kernel. 1849 **/ 1850static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1851{ 1852 struct net_device *netdev = adapter->netdev; 1853 int err = 0; 1854 int vector, v_budget; 1855 1856 /* 1857 * It's easy to be greedy for MSI-X vectors, but it really 1858 * doesn't do us much good if we have a lot more vectors 1859 * than CPU's. So let's be conservative and only ask for 1860 * (roughly) the same number of vectors as there are CPU's. 1861 * The default is to use pairs of vectors. 1862 */ 1863 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1864 v_budget = min_t(int, v_budget, num_online_cpus()); 1865 v_budget += NON_Q_VECTORS; 1866 1867 /* A failure in MSI-X entry allocation isn't fatal, but it does 1868 * mean we disable MSI-X capabilities of the adapter. */ 1869 adapter->msix_entries = kcalloc(v_budget, 1870 sizeof(struct msix_entry), GFP_KERNEL); 1871 if (!adapter->msix_entries) { 1872 err = -ENOMEM; 1873 goto out; 1874 } 1875 1876 for (vector = 0; vector < v_budget; vector++) 1877 adapter->msix_entries[vector].entry = vector; 1878 1879 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 1880 if (err) 1881 goto out; 1882 1883 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1884 if (err) 1885 goto out; 1886 1887 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 1888 1889out: 1890 return err; 1891} 1892 1893/** 1894 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 1895 * @adapter: board private structure to initialize 1896 * 1897 * We allocate one q_vector per queue interrupt. If allocation fails we 1898 * return -ENOMEM. 1899 **/ 1900static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 1901{ 1902 int q_idx, num_q_vectors; 1903 struct ixgbevf_q_vector *q_vector; 1904 1905 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1906 1907 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1908 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 1909 if (!q_vector) 1910 goto err_out; 1911 q_vector->adapter = adapter; 1912 q_vector->v_idx = q_idx; 1913 netif_napi_add(adapter->netdev, &q_vector->napi, 1914 ixgbevf_poll, 64); 1915 adapter->q_vector[q_idx] = q_vector; 1916 } 1917 1918 return 0; 1919 1920err_out: 1921 while (q_idx) { 1922 q_idx--; 1923 q_vector = adapter->q_vector[q_idx]; 1924 netif_napi_del(&q_vector->napi); 1925 kfree(q_vector); 1926 adapter->q_vector[q_idx] = NULL; 1927 } 1928 return -ENOMEM; 1929} 1930 1931/** 1932 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 1933 * @adapter: board private structure to initialize 1934 * 1935 * This function frees the memory allocated to the q_vectors. In addition if 1936 * NAPI is enabled it will delete any references to the NAPI struct prior 1937 * to freeing the q_vector. 1938 **/ 1939static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1940{ 1941 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1942 1943 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1944 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1945 1946 adapter->q_vector[q_idx] = NULL; 1947 netif_napi_del(&q_vector->napi); 1948 kfree(q_vector); 1949 } 1950} 1951 1952/** 1953 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 1954 * @adapter: board private structure 1955 * 1956 **/ 1957static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 1958{ 1959 pci_disable_msix(adapter->pdev); 1960 kfree(adapter->msix_entries); 1961 adapter->msix_entries = NULL; 1962} 1963 1964/** 1965 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 1966 * @adapter: board private structure to initialize 1967 * 1968 **/ 1969static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 1970{ 1971 int err; 1972 1973 /* Number of supported queues */ 1974 ixgbevf_set_num_queues(adapter); 1975 1976 err = ixgbevf_set_interrupt_capability(adapter); 1977 if (err) { 1978 hw_dbg(&adapter->hw, 1979 "Unable to setup interrupt capabilities\n"); 1980 goto err_set_interrupt; 1981 } 1982 1983 err = ixgbevf_alloc_q_vectors(adapter); 1984 if (err) { 1985 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 1986 "vectors\n"); 1987 goto err_alloc_q_vectors; 1988 } 1989 1990 err = ixgbevf_alloc_queues(adapter); 1991 if (err) { 1992 pr_err("Unable to allocate memory for queues\n"); 1993 goto err_alloc_queues; 1994 } 1995 1996 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 1997 "Tx Queue count = %u\n", 1998 (adapter->num_rx_queues > 1) ? "Enabled" : 1999 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2000 2001 set_bit(__IXGBEVF_DOWN, &adapter->state); 2002 2003 return 0; 2004err_alloc_queues: 2005 ixgbevf_free_q_vectors(adapter); 2006err_alloc_q_vectors: 2007 ixgbevf_reset_interrupt_capability(adapter); 2008err_set_interrupt: 2009 return err; 2010} 2011 2012/** 2013 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2014 * @adapter: board private structure to clear interrupt scheme on 2015 * 2016 * We go through and clear interrupt specific resources and reset the structure 2017 * to pre-load conditions 2018 **/ 2019static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2020{ 2021 adapter->num_tx_queues = 0; 2022 adapter->num_rx_queues = 0; 2023 2024 ixgbevf_free_q_vectors(adapter); 2025 ixgbevf_reset_interrupt_capability(adapter); 2026} 2027 2028/** 2029 * ixgbevf_sw_init - Initialize general software structures 2030 * (struct ixgbevf_adapter) 2031 * @adapter: board private structure to initialize 2032 * 2033 * ixgbevf_sw_init initializes the Adapter private data structure. 2034 * Fields are initialized based on PCI device information and 2035 * OS network device settings (MTU size). 2036 **/ 2037static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2038{ 2039 struct ixgbe_hw *hw = &adapter->hw; 2040 struct pci_dev *pdev = adapter->pdev; 2041 int err; 2042 2043 /* PCI config space info */ 2044 2045 hw->vendor_id = pdev->vendor; 2046 hw->device_id = pdev->device; 2047 hw->revision_id = pdev->revision; 2048 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2049 hw->subsystem_device_id = pdev->subsystem_device; 2050 2051 hw->mbx.ops.init_params(hw); 2052 2053 /* assume legacy case in which PF would only give VF 2 queues */ 2054 hw->mac.max_tx_queues = 2; 2055 hw->mac.max_rx_queues = 2; 2056 2057 err = hw->mac.ops.reset_hw(hw); 2058 if (err) { 2059 dev_info(&pdev->dev, 2060 "PF still in reset state, assigning new address\n"); 2061 eth_hw_addr_random(adapter->netdev); 2062 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, 2063 adapter->netdev->addr_len); 2064 } else { 2065 err = hw->mac.ops.init_hw(hw); 2066 if (err) { 2067 pr_err("init_shared_code failed: %d\n", err); 2068 goto out; 2069 } 2070 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2071 adapter->netdev->addr_len); 2072 } 2073 2074 /* lock to protect mailbox accesses */ 2075 spin_lock_init(&adapter->mbx_lock); 2076 2077 /* Enable dynamic interrupt throttling rates */ 2078 adapter->rx_itr_setting = 1; 2079 adapter->tx_itr_setting = 1; 2080 2081 /* set default ring sizes */ 2082 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2083 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2084 2085 set_bit(__IXGBEVF_DOWN, &adapter->state); 2086 return 0; 2087 2088out: 2089 return err; 2090} 2091 2092#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2093 { \ 2094 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2095 if (current_counter < last_counter) \ 2096 counter += 0x100000000LL; \ 2097 last_counter = current_counter; \ 2098 counter &= 0xFFFFFFFF00000000LL; \ 2099 counter |= current_counter; \ 2100 } 2101 2102#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2103 { \ 2104 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2105 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2106 u64 current_counter = (current_counter_msb << 32) | \ 2107 current_counter_lsb; \ 2108 if (current_counter < last_counter) \ 2109 counter += 0x1000000000LL; \ 2110 last_counter = current_counter; \ 2111 counter &= 0xFFFFFFF000000000LL; \ 2112 counter |= current_counter; \ 2113 } 2114/** 2115 * ixgbevf_update_stats - Update the board statistics counters. 2116 * @adapter: board private structure 2117 **/ 2118void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2119{ 2120 struct ixgbe_hw *hw = &adapter->hw; 2121 int i; 2122 2123 if (!adapter->link_up) 2124 return; 2125 2126 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2127 adapter->stats.vfgprc); 2128 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2129 adapter->stats.vfgptc); 2130 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2131 adapter->stats.last_vfgorc, 2132 adapter->stats.vfgorc); 2133 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2134 adapter->stats.last_vfgotc, 2135 adapter->stats.vfgotc); 2136 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2137 adapter->stats.vfmprc); 2138 2139 for (i = 0; i < adapter->num_rx_queues; i++) { 2140 adapter->hw_csum_rx_error += 2141 adapter->rx_ring[i].hw_csum_rx_error; 2142 adapter->hw_csum_rx_good += 2143 adapter->rx_ring[i].hw_csum_rx_good; 2144 adapter->rx_ring[i].hw_csum_rx_error = 0; 2145 adapter->rx_ring[i].hw_csum_rx_good = 0; 2146 } 2147} 2148 2149/** 2150 * ixgbevf_watchdog - Timer Call-back 2151 * @data: pointer to adapter cast into an unsigned long 2152 **/ 2153static void ixgbevf_watchdog(unsigned long data) 2154{ 2155 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2156 struct ixgbe_hw *hw = &adapter->hw; 2157 u32 eics = 0; 2158 int i; 2159 2160 /* 2161 * Do the watchdog outside of interrupt context due to the lovely 2162 * delays that some of the newer hardware requires 2163 */ 2164 2165 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2166 goto watchdog_short_circuit; 2167 2168 /* get one bit for every active tx/rx interrupt vector */ 2169 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2170 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2171 if (qv->rx.ring || qv->tx.ring) 2172 eics |= 1 << i; 2173 } 2174 2175 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2176 2177watchdog_short_circuit: 2178 schedule_work(&adapter->watchdog_task); 2179} 2180 2181/** 2182 * ixgbevf_tx_timeout - Respond to a Tx Hang 2183 * @netdev: network interface device structure 2184 **/ 2185static void ixgbevf_tx_timeout(struct net_device *netdev) 2186{ 2187 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2188 2189 /* Do the reset outside of interrupt context */ 2190 schedule_work(&adapter->reset_task); 2191} 2192 2193static void ixgbevf_reset_task(struct work_struct *work) 2194{ 2195 struct ixgbevf_adapter *adapter; 2196 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2197 2198 /* If we're already down or resetting, just bail */ 2199 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2200 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2201 return; 2202 2203 adapter->tx_timeout_count++; 2204 2205 ixgbevf_reinit_locked(adapter); 2206} 2207 2208/** 2209 * ixgbevf_watchdog_task - worker thread to bring link up 2210 * @work: pointer to work_struct containing our data 2211 **/ 2212static void ixgbevf_watchdog_task(struct work_struct *work) 2213{ 2214 struct ixgbevf_adapter *adapter = container_of(work, 2215 struct ixgbevf_adapter, 2216 watchdog_task); 2217 struct net_device *netdev = adapter->netdev; 2218 struct ixgbe_hw *hw = &adapter->hw; 2219 u32 link_speed = adapter->link_speed; 2220 bool link_up = adapter->link_up; 2221 s32 need_reset; 2222 2223 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2224 2225 /* 2226 * Always check the link on the watchdog because we have 2227 * no LSC interrupt 2228 */ 2229 spin_lock_bh(&adapter->mbx_lock); 2230 2231 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2232 2233 spin_unlock_bh(&adapter->mbx_lock); 2234 2235 if (need_reset) { 2236 adapter->link_up = link_up; 2237 adapter->link_speed = link_speed; 2238 netif_carrier_off(netdev); 2239 netif_tx_stop_all_queues(netdev); 2240 schedule_work(&adapter->reset_task); 2241 goto pf_has_reset; 2242 } 2243 adapter->link_up = link_up; 2244 adapter->link_speed = link_speed; 2245 2246 if (link_up) { 2247 if (!netif_carrier_ok(netdev)) { 2248 char *link_speed_string; 2249 switch (link_speed) { 2250 case IXGBE_LINK_SPEED_10GB_FULL: 2251 link_speed_string = "10 Gbps"; 2252 break; 2253 case IXGBE_LINK_SPEED_1GB_FULL: 2254 link_speed_string = "1 Gbps"; 2255 break; 2256 case IXGBE_LINK_SPEED_100_FULL: 2257 link_speed_string = "100 Mbps"; 2258 break; 2259 default: 2260 link_speed_string = "unknown speed"; 2261 break; 2262 } 2263 dev_info(&adapter->pdev->dev, 2264 "NIC Link is Up, %s\n", link_speed_string); 2265 netif_carrier_on(netdev); 2266 netif_tx_wake_all_queues(netdev); 2267 } 2268 } else { 2269 adapter->link_up = false; 2270 adapter->link_speed = 0; 2271 if (netif_carrier_ok(netdev)) { 2272 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2273 netif_carrier_off(netdev); 2274 netif_tx_stop_all_queues(netdev); 2275 } 2276 } 2277 2278 ixgbevf_update_stats(adapter); 2279 2280pf_has_reset: 2281 /* Reset the timer */ 2282 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2283 mod_timer(&adapter->watchdog_timer, 2284 round_jiffies(jiffies + (2 * HZ))); 2285 2286 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2287} 2288 2289/** 2290 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2291 * @adapter: board private structure 2292 * @tx_ring: Tx descriptor ring for a specific queue 2293 * 2294 * Free all transmit software resources 2295 **/ 2296void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2297 struct ixgbevf_ring *tx_ring) 2298{ 2299 struct pci_dev *pdev = adapter->pdev; 2300 2301 ixgbevf_clean_tx_ring(adapter, tx_ring); 2302 2303 vfree(tx_ring->tx_buffer_info); 2304 tx_ring->tx_buffer_info = NULL; 2305 2306 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2307 tx_ring->dma); 2308 2309 tx_ring->desc = NULL; 2310} 2311 2312/** 2313 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2314 * @adapter: board private structure 2315 * 2316 * Free all transmit software resources 2317 **/ 2318static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2319{ 2320 int i; 2321 2322 for (i = 0; i < adapter->num_tx_queues; i++) 2323 if (adapter->tx_ring[i].desc) 2324 ixgbevf_free_tx_resources(adapter, 2325 &adapter->tx_ring[i]); 2326 2327} 2328 2329/** 2330 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2331 * @adapter: board private structure 2332 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2333 * 2334 * Return 0 on success, negative on failure 2335 **/ 2336int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2337 struct ixgbevf_ring *tx_ring) 2338{ 2339 struct pci_dev *pdev = adapter->pdev; 2340 int size; 2341 2342 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2343 tx_ring->tx_buffer_info = vzalloc(size); 2344 if (!tx_ring->tx_buffer_info) 2345 goto err; 2346 2347 /* round up to nearest 4K */ 2348 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2349 tx_ring->size = ALIGN(tx_ring->size, 4096); 2350 2351 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2352 &tx_ring->dma, GFP_KERNEL); 2353 if (!tx_ring->desc) 2354 goto err; 2355 2356 tx_ring->next_to_use = 0; 2357 tx_ring->next_to_clean = 0; 2358 return 0; 2359 2360err: 2361 vfree(tx_ring->tx_buffer_info); 2362 tx_ring->tx_buffer_info = NULL; 2363 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2364 "descriptor ring\n"); 2365 return -ENOMEM; 2366} 2367 2368/** 2369 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2370 * @adapter: board private structure 2371 * 2372 * If this function returns with an error, then it's possible one or 2373 * more of the rings is populated (while the rest are not). It is the 2374 * callers duty to clean those orphaned rings. 2375 * 2376 * Return 0 on success, negative on failure 2377 **/ 2378static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2379{ 2380 int i, err = 0; 2381 2382 for (i = 0; i < adapter->num_tx_queues; i++) { 2383 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2384 if (!err) 2385 continue; 2386 hw_dbg(&adapter->hw, 2387 "Allocation for Tx Queue %u failed\n", i); 2388 break; 2389 } 2390 2391 return err; 2392} 2393 2394/** 2395 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2396 * @adapter: board private structure 2397 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2398 * 2399 * Returns 0 on success, negative on failure 2400 **/ 2401int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2402 struct ixgbevf_ring *rx_ring) 2403{ 2404 struct pci_dev *pdev = adapter->pdev; 2405 int size; 2406 2407 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2408 rx_ring->rx_buffer_info = vzalloc(size); 2409 if (!rx_ring->rx_buffer_info) 2410 goto alloc_failed; 2411 2412 /* Round up to nearest 4K */ 2413 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2414 rx_ring->size = ALIGN(rx_ring->size, 4096); 2415 2416 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2417 &rx_ring->dma, GFP_KERNEL); 2418 2419 if (!rx_ring->desc) { 2420 hw_dbg(&adapter->hw, 2421 "Unable to allocate memory for " 2422 "the receive descriptor ring\n"); 2423 vfree(rx_ring->rx_buffer_info); 2424 rx_ring->rx_buffer_info = NULL; 2425 goto alloc_failed; 2426 } 2427 2428 rx_ring->next_to_clean = 0; 2429 rx_ring->next_to_use = 0; 2430 2431 return 0; 2432alloc_failed: 2433 return -ENOMEM; 2434} 2435 2436/** 2437 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2438 * @adapter: board private structure 2439 * 2440 * If this function returns with an error, then it's possible one or 2441 * more of the rings is populated (while the rest are not). It is the 2442 * callers duty to clean those orphaned rings. 2443 * 2444 * Return 0 on success, negative on failure 2445 **/ 2446static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2447{ 2448 int i, err = 0; 2449 2450 for (i = 0; i < adapter->num_rx_queues; i++) { 2451 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2452 if (!err) 2453 continue; 2454 hw_dbg(&adapter->hw, 2455 "Allocation for Rx Queue %u failed\n", i); 2456 break; 2457 } 2458 return err; 2459} 2460 2461/** 2462 * ixgbevf_free_rx_resources - Free Rx Resources 2463 * @adapter: board private structure 2464 * @rx_ring: ring to clean the resources from 2465 * 2466 * Free all receive software resources 2467 **/ 2468void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2469 struct ixgbevf_ring *rx_ring) 2470{ 2471 struct pci_dev *pdev = adapter->pdev; 2472 2473 ixgbevf_clean_rx_ring(adapter, rx_ring); 2474 2475 vfree(rx_ring->rx_buffer_info); 2476 rx_ring->rx_buffer_info = NULL; 2477 2478 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2479 rx_ring->dma); 2480 2481 rx_ring->desc = NULL; 2482} 2483 2484/** 2485 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2486 * @adapter: board private structure 2487 * 2488 * Free all receive software resources 2489 **/ 2490static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2491{ 2492 int i; 2493 2494 for (i = 0; i < adapter->num_rx_queues; i++) 2495 if (adapter->rx_ring[i].desc) 2496 ixgbevf_free_rx_resources(adapter, 2497 &adapter->rx_ring[i]); 2498} 2499 2500static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) 2501{ 2502 struct ixgbe_hw *hw = &adapter->hw; 2503 struct ixgbevf_ring *rx_ring; 2504 unsigned int def_q = 0; 2505 unsigned int num_tcs = 0; 2506 unsigned int num_rx_queues = 1; 2507 int err, i; 2508 2509 spin_lock_bh(&adapter->mbx_lock); 2510 2511 /* fetch queue configuration from the PF */ 2512 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2513 2514 spin_unlock_bh(&adapter->mbx_lock); 2515 2516 if (err) 2517 return err; 2518 2519 if (num_tcs > 1) { 2520 /* update default Tx ring register index */ 2521 adapter->tx_ring[0].reg_idx = def_q; 2522 2523 /* we need as many queues as traffic classes */ 2524 num_rx_queues = num_tcs; 2525 } 2526 2527 /* nothing to do if we have the correct number of queues */ 2528 if (adapter->num_rx_queues == num_rx_queues) 2529 return 0; 2530 2531 /* allocate new rings */ 2532 rx_ring = kcalloc(num_rx_queues, 2533 sizeof(struct ixgbevf_ring), GFP_KERNEL); 2534 if (!rx_ring) 2535 return -ENOMEM; 2536 2537 /* setup ring fields */ 2538 for (i = 0; i < num_rx_queues; i++) { 2539 rx_ring[i].count = adapter->rx_ring_count; 2540 rx_ring[i].queue_index = i; 2541 rx_ring[i].reg_idx = i; 2542 rx_ring[i].dev = &adapter->pdev->dev; 2543 rx_ring[i].netdev = adapter->netdev; 2544 } 2545 2546 /* free the existing ring and queues */ 2547 adapter->num_rx_queues = 0; 2548 kfree(adapter->rx_ring); 2549 2550 /* move new rings into position on the adapter struct */ 2551 adapter->rx_ring = rx_ring; 2552 adapter->num_rx_queues = num_rx_queues; 2553 2554 return 0; 2555} 2556 2557/** 2558 * ixgbevf_open - Called when a network interface is made active 2559 * @netdev: network interface device structure 2560 * 2561 * Returns 0 on success, negative value on failure 2562 * 2563 * The open entry point is called when a network interface is made 2564 * active by the system (IFF_UP). At this point all resources needed 2565 * for transmit and receive operations are allocated, the interrupt 2566 * handler is registered with the OS, the watchdog timer is started, 2567 * and the stack is notified that the interface is ready. 2568 **/ 2569static int ixgbevf_open(struct net_device *netdev) 2570{ 2571 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2572 struct ixgbe_hw *hw = &adapter->hw; 2573 int err; 2574 2575 /* disallow open during test */ 2576 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2577 return -EBUSY; 2578 2579 if (hw->adapter_stopped) { 2580 ixgbevf_reset(adapter); 2581 /* if adapter is still stopped then PF isn't up and 2582 * the vf can't start. */ 2583 if (hw->adapter_stopped) { 2584 err = IXGBE_ERR_MBX; 2585 pr_err("Unable to start - perhaps the PF Driver isn't " 2586 "up yet\n"); 2587 goto err_setup_reset; 2588 } 2589 } 2590 2591 ixgbevf_negotiate_api(adapter); 2592 2593 /* setup queue reg_idx and Rx queue count */ 2594 err = ixgbevf_setup_queues(adapter); 2595 if (err) 2596 goto err_setup_queues; 2597 2598 /* allocate transmit descriptors */ 2599 err = ixgbevf_setup_all_tx_resources(adapter); 2600 if (err) 2601 goto err_setup_tx; 2602 2603 /* allocate receive descriptors */ 2604 err = ixgbevf_setup_all_rx_resources(adapter); 2605 if (err) 2606 goto err_setup_rx; 2607 2608 ixgbevf_configure(adapter); 2609 2610 /* 2611 * Map the Tx/Rx rings to the vectors we were allotted. 2612 * if request_irq will be called in this function map_rings 2613 * must be called *before* up_complete 2614 */ 2615 ixgbevf_map_rings_to_vectors(adapter); 2616 2617 ixgbevf_up_complete(adapter); 2618 2619 /* clear any pending interrupts, may auto mask */ 2620 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2621 err = ixgbevf_request_irq(adapter); 2622 if (err) 2623 goto err_req_irq; 2624 2625 ixgbevf_irq_enable(adapter); 2626 2627 return 0; 2628 2629err_req_irq: 2630 ixgbevf_down(adapter); 2631 ixgbevf_free_irq(adapter); 2632err_setup_rx: 2633 ixgbevf_free_all_rx_resources(adapter); 2634err_setup_tx: 2635 ixgbevf_free_all_tx_resources(adapter); 2636err_setup_queues: 2637 ixgbevf_reset(adapter); 2638 2639err_setup_reset: 2640 2641 return err; 2642} 2643 2644/** 2645 * ixgbevf_close - Disables a network interface 2646 * @netdev: network interface device structure 2647 * 2648 * Returns 0, this is not allowed to fail 2649 * 2650 * The close entry point is called when an interface is de-activated 2651 * by the OS. The hardware is still under the drivers control, but 2652 * needs to be disabled. A global MAC reset is issued to stop the 2653 * hardware, and all transmit and receive resources are freed. 2654 **/ 2655static int ixgbevf_close(struct net_device *netdev) 2656{ 2657 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2658 2659 ixgbevf_down(adapter); 2660 ixgbevf_free_irq(adapter); 2661 2662 ixgbevf_free_all_tx_resources(adapter); 2663 ixgbevf_free_all_rx_resources(adapter); 2664 2665 return 0; 2666} 2667 2668static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2669 u32 vlan_macip_lens, u32 type_tucmd, 2670 u32 mss_l4len_idx) 2671{ 2672 struct ixgbe_adv_tx_context_desc *context_desc; 2673 u16 i = tx_ring->next_to_use; 2674 2675 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2676 2677 i++; 2678 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2679 2680 /* set bits to identify this as an advanced context descriptor */ 2681 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2682 2683 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2684 context_desc->seqnum_seed = 0; 2685 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2686 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2687} 2688 2689static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2690 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2691{ 2692 u32 vlan_macip_lens, type_tucmd; 2693 u32 mss_l4len_idx, l4len; 2694 2695 if (!skb_is_gso(skb)) 2696 return 0; 2697 2698 if (skb_header_cloned(skb)) { 2699 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2700 if (err) 2701 return err; 2702 } 2703 2704 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2705 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2706 2707 if (skb->protocol == htons(ETH_P_IP)) { 2708 struct iphdr *iph = ip_hdr(skb); 2709 iph->tot_len = 0; 2710 iph->check = 0; 2711 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2712 iph->daddr, 0, 2713 IPPROTO_TCP, 2714 0); 2715 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2716 } else if (skb_is_gso_v6(skb)) { 2717 ipv6_hdr(skb)->payload_len = 0; 2718 tcp_hdr(skb)->check = 2719 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2720 &ipv6_hdr(skb)->daddr, 2721 0, IPPROTO_TCP, 0); 2722 } 2723 2724 /* compute header lengths */ 2725 l4len = tcp_hdrlen(skb); 2726 *hdr_len += l4len; 2727 *hdr_len = skb_transport_offset(skb) + l4len; 2728 2729 /* mss_l4len_id: use 1 as index for TSO */ 2730 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2731 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2732 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2733 2734 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2735 vlan_macip_lens = skb_network_header_len(skb); 2736 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2737 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2738 2739 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2740 type_tucmd, mss_l4len_idx); 2741 2742 return 1; 2743} 2744 2745static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2746 struct sk_buff *skb, u32 tx_flags) 2747{ 2748 u32 vlan_macip_lens = 0; 2749 u32 mss_l4len_idx = 0; 2750 u32 type_tucmd = 0; 2751 2752 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2753 u8 l4_hdr = 0; 2754 switch (skb->protocol) { 2755 case __constant_htons(ETH_P_IP): 2756 vlan_macip_lens |= skb_network_header_len(skb); 2757 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2758 l4_hdr = ip_hdr(skb)->protocol; 2759 break; 2760 case __constant_htons(ETH_P_IPV6): 2761 vlan_macip_lens |= skb_network_header_len(skb); 2762 l4_hdr = ipv6_hdr(skb)->nexthdr; 2763 break; 2764 default: 2765 if (unlikely(net_ratelimit())) { 2766 dev_warn(tx_ring->dev, 2767 "partial checksum but proto=%x!\n", 2768 skb->protocol); 2769 } 2770 break; 2771 } 2772 2773 switch (l4_hdr) { 2774 case IPPROTO_TCP: 2775 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2776 mss_l4len_idx = tcp_hdrlen(skb) << 2777 IXGBE_ADVTXD_L4LEN_SHIFT; 2778 break; 2779 case IPPROTO_SCTP: 2780 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2781 mss_l4len_idx = sizeof(struct sctphdr) << 2782 IXGBE_ADVTXD_L4LEN_SHIFT; 2783 break; 2784 case IPPROTO_UDP: 2785 mss_l4len_idx = sizeof(struct udphdr) << 2786 IXGBE_ADVTXD_L4LEN_SHIFT; 2787 break; 2788 default: 2789 if (unlikely(net_ratelimit())) { 2790 dev_warn(tx_ring->dev, 2791 "partial checksum but l4 proto=%x!\n", 2792 l4_hdr); 2793 } 2794 break; 2795 } 2796 } 2797 2798 /* vlan_macip_lens: MACLEN, VLAN tag */ 2799 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2800 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2801 2802 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2803 type_tucmd, mss_l4len_idx); 2804 2805 return (skb->ip_summed == CHECKSUM_PARTIAL); 2806} 2807 2808static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2809 struct sk_buff *skb, u32 tx_flags, 2810 unsigned int first) 2811{ 2812 struct ixgbevf_tx_buffer *tx_buffer_info; 2813 unsigned int len; 2814 unsigned int total = skb->len; 2815 unsigned int offset = 0, size; 2816 int count = 0; 2817 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2818 unsigned int f; 2819 int i; 2820 2821 i = tx_ring->next_to_use; 2822 2823 len = min(skb_headlen(skb), total); 2824 while (len) { 2825 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2826 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2827 2828 tx_buffer_info->length = size; 2829 tx_buffer_info->mapped_as_page = false; 2830 tx_buffer_info->dma = dma_map_single(tx_ring->dev, 2831 skb->data + offset, 2832 size, DMA_TO_DEVICE); 2833 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2834 goto dma_error; 2835 tx_buffer_info->next_to_watch = i; 2836 2837 len -= size; 2838 total -= size; 2839 offset += size; 2840 count++; 2841 i++; 2842 if (i == tx_ring->count) 2843 i = 0; 2844 } 2845 2846 for (f = 0; f < nr_frags; f++) { 2847 const struct skb_frag_struct *frag; 2848 2849 frag = &skb_shinfo(skb)->frags[f]; 2850 len = min((unsigned int)skb_frag_size(frag), total); 2851 offset = 0; 2852 2853 while (len) { 2854 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2855 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2856 2857 tx_buffer_info->length = size; 2858 tx_buffer_info->dma = 2859 skb_frag_dma_map(tx_ring->dev, frag, 2860 offset, size, DMA_TO_DEVICE); 2861 if (dma_mapping_error(tx_ring->dev, 2862 tx_buffer_info->dma)) 2863 goto dma_error; 2864 tx_buffer_info->mapped_as_page = true; 2865 tx_buffer_info->next_to_watch = i; 2866 2867 len -= size; 2868 total -= size; 2869 offset += size; 2870 count++; 2871 i++; 2872 if (i == tx_ring->count) 2873 i = 0; 2874 } 2875 if (total == 0) 2876 break; 2877 } 2878 2879 if (i == 0) 2880 i = tx_ring->count - 1; 2881 else 2882 i = i - 1; 2883 tx_ring->tx_buffer_info[i].skb = skb; 2884 tx_ring->tx_buffer_info[first].next_to_watch = i; 2885 tx_ring->tx_buffer_info[first].time_stamp = jiffies; 2886 2887 return count; 2888 2889dma_error: 2890 dev_err(tx_ring->dev, "TX DMA map failed\n"); 2891 2892 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2893 tx_buffer_info->dma = 0; 2894 tx_buffer_info->next_to_watch = 0; 2895 count--; 2896 2897 /* clear timestamp and dma mappings for remaining portion of packet */ 2898 while (count >= 0) { 2899 count--; 2900 i--; 2901 if (i < 0) 2902 i += tx_ring->count; 2903 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2904 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2905 } 2906 2907 return count; 2908} 2909 2910static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2911 int count, u32 paylen, u8 hdr_len) 2912{ 2913 union ixgbe_adv_tx_desc *tx_desc = NULL; 2914 struct ixgbevf_tx_buffer *tx_buffer_info; 2915 u32 olinfo_status = 0, cmd_type_len = 0; 2916 unsigned int i; 2917 2918 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 2919 2920 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 2921 2922 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 2923 2924 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2925 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2926 2927 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2928 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 2929 2930 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2931 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2932 2933 /* use index 1 context for tso */ 2934 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2935 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2936 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 2937 } 2938 2939 /* 2940 * Check Context must be set if Tx switch is enabled, which it 2941 * always is for case where virtual functions are running 2942 */ 2943 olinfo_status |= IXGBE_ADVTXD_CC; 2944 2945 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2946 2947 i = tx_ring->next_to_use; 2948 while (count--) { 2949 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2950 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2951 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 2952 tx_desc->read.cmd_type_len = 2953 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 2954 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2955 i++; 2956 if (i == tx_ring->count) 2957 i = 0; 2958 } 2959 2960 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2961 2962 tx_ring->next_to_use = i; 2963} 2964 2965static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2966{ 2967 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 2968 2969 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 2970 /* Herbert's original patch had: 2971 * smp_mb__after_netif_stop_queue(); 2972 * but since that doesn't exist yet, just open code it. */ 2973 smp_mb(); 2974 2975 /* We need to check again in a case another CPU has just 2976 * made room available. */ 2977 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 2978 return -EBUSY; 2979 2980 /* A reprieve! - use start_queue because it doesn't call schedule */ 2981 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 2982 ++adapter->restart_queue; 2983 return 0; 2984} 2985 2986static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2987{ 2988 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 2989 return 0; 2990 return __ixgbevf_maybe_stop_tx(tx_ring, size); 2991} 2992 2993static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2994{ 2995 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2996 struct ixgbevf_ring *tx_ring; 2997 unsigned int first; 2998 unsigned int tx_flags = 0; 2999 u8 hdr_len = 0; 3000 int r_idx = 0, tso; 3001 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 3002#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3003 unsigned short f; 3004#endif 3005 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 3006 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 3007 dev_kfree_skb(skb); 3008 return NETDEV_TX_OK; 3009 } 3010 3011 tx_ring = &adapter->tx_ring[r_idx]; 3012 3013 /* 3014 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 3015 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 3016 * + 2 desc gap to keep tail from touching head, 3017 * + 1 desc for context descriptor, 3018 * otherwise try next time 3019 */ 3020#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3021 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3022 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3023#else 3024 count += skb_shinfo(skb)->nr_frags; 3025#endif 3026 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 3027 adapter->tx_busy++; 3028 return NETDEV_TX_BUSY; 3029 } 3030 3031 if (vlan_tx_tag_present(skb)) { 3032 tx_flags |= vlan_tx_tag_get(skb); 3033 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3034 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3035 } 3036 3037 first = tx_ring->next_to_use; 3038 3039 if (skb->protocol == htons(ETH_P_IP)) 3040 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3041 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); 3042 if (tso < 0) { 3043 dev_kfree_skb_any(skb); 3044 return NETDEV_TX_OK; 3045 } 3046 3047 if (tso) 3048 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; 3049 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) 3050 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3051 3052 ixgbevf_tx_queue(tx_ring, tx_flags, 3053 ixgbevf_tx_map(tx_ring, skb, tx_flags, first), 3054 skb->len, hdr_len); 3055 /* 3056 * Force memory writes to complete before letting h/w 3057 * know there are new descriptors to fetch. (Only 3058 * applicable for weak-ordered memory model archs, 3059 * such as IA-64). 3060 */ 3061 wmb(); 3062 3063 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 3064 3065 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3066 3067 return NETDEV_TX_OK; 3068} 3069 3070/** 3071 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3072 * @netdev: network interface device structure 3073 * @p: pointer to an address structure 3074 * 3075 * Returns 0 on success, negative on failure 3076 **/ 3077static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3078{ 3079 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3080 struct ixgbe_hw *hw = &adapter->hw; 3081 struct sockaddr *addr = p; 3082 3083 if (!is_valid_ether_addr(addr->sa_data)) 3084 return -EADDRNOTAVAIL; 3085 3086 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3087 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3088 3089 spin_lock_bh(&adapter->mbx_lock); 3090 3091 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3092 3093 spin_unlock_bh(&adapter->mbx_lock); 3094 3095 return 0; 3096} 3097 3098/** 3099 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3100 * @netdev: network interface device structure 3101 * @new_mtu: new value for maximum frame size 3102 * 3103 * Returns 0 on success, negative on failure 3104 **/ 3105static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3106{ 3107 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3108 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3109 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3110 3111 switch (adapter->hw.api_version) { 3112 case ixgbe_mbox_api_11: 3113 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3114 break; 3115 default: 3116 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3117 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3118 break; 3119 } 3120 3121 /* MTU < 68 is an error and causes problems on some kernels */ 3122 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3123 return -EINVAL; 3124 3125 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3126 netdev->mtu, new_mtu); 3127 /* must set new MTU before calling down or up */ 3128 netdev->mtu = new_mtu; 3129 3130 if (netif_running(netdev)) 3131 ixgbevf_reinit_locked(adapter); 3132 3133 return 0; 3134} 3135 3136static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3137{ 3138 struct net_device *netdev = pci_get_drvdata(pdev); 3139 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3140#ifdef CONFIG_PM 3141 int retval = 0; 3142#endif 3143 3144 netif_device_detach(netdev); 3145 3146 if (netif_running(netdev)) { 3147 rtnl_lock(); 3148 ixgbevf_down(adapter); 3149 ixgbevf_free_irq(adapter); 3150 ixgbevf_free_all_tx_resources(adapter); 3151 ixgbevf_free_all_rx_resources(adapter); 3152 rtnl_unlock(); 3153 } 3154 3155 ixgbevf_clear_interrupt_scheme(adapter); 3156 3157#ifdef CONFIG_PM 3158 retval = pci_save_state(pdev); 3159 if (retval) 3160 return retval; 3161 3162#endif 3163 pci_disable_device(pdev); 3164 3165 return 0; 3166} 3167 3168#ifdef CONFIG_PM 3169static int ixgbevf_resume(struct pci_dev *pdev) 3170{ 3171 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); 3172 struct net_device *netdev = adapter->netdev; 3173 u32 err; 3174 3175 pci_set_power_state(pdev, PCI_D0); 3176 pci_restore_state(pdev); 3177 /* 3178 * pci_restore_state clears dev->state_saved so call 3179 * pci_save_state to restore it. 3180 */ 3181 pci_save_state(pdev); 3182 3183 err = pci_enable_device_mem(pdev); 3184 if (err) { 3185 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3186 return err; 3187 } 3188 pci_set_master(pdev); 3189 3190 rtnl_lock(); 3191 err = ixgbevf_init_interrupt_scheme(adapter); 3192 rtnl_unlock(); 3193 if (err) { 3194 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3195 return err; 3196 } 3197 3198 ixgbevf_reset(adapter); 3199 3200 if (netif_running(netdev)) { 3201 err = ixgbevf_open(netdev); 3202 if (err) 3203 return err; 3204 } 3205 3206 netif_device_attach(netdev); 3207 3208 return err; 3209} 3210 3211#endif /* CONFIG_PM */ 3212static void ixgbevf_shutdown(struct pci_dev *pdev) 3213{ 3214 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3215} 3216 3217static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3218 struct rtnl_link_stats64 *stats) 3219{ 3220 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3221 unsigned int start; 3222 u64 bytes, packets; 3223 const struct ixgbevf_ring *ring; 3224 int i; 3225 3226 ixgbevf_update_stats(adapter); 3227 3228 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3229 3230 for (i = 0; i < adapter->num_rx_queues; i++) { 3231 ring = &adapter->rx_ring[i]; 3232 do { 3233 start = u64_stats_fetch_begin_bh(&ring->syncp); 3234 bytes = ring->total_bytes; 3235 packets = ring->total_packets; 3236 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3237 stats->rx_bytes += bytes; 3238 stats->rx_packets += packets; 3239 } 3240 3241 for (i = 0; i < adapter->num_tx_queues; i++) { 3242 ring = &adapter->tx_ring[i]; 3243 do { 3244 start = u64_stats_fetch_begin_bh(&ring->syncp); 3245 bytes = ring->total_bytes; 3246 packets = ring->total_packets; 3247 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3248 stats->tx_bytes += bytes; 3249 stats->tx_packets += packets; 3250 } 3251 3252 return stats; 3253} 3254 3255static const struct net_device_ops ixgbevf_netdev_ops = { 3256 .ndo_open = ixgbevf_open, 3257 .ndo_stop = ixgbevf_close, 3258 .ndo_start_xmit = ixgbevf_xmit_frame, 3259 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3260 .ndo_get_stats64 = ixgbevf_get_stats, 3261 .ndo_validate_addr = eth_validate_addr, 3262 .ndo_set_mac_address = ixgbevf_set_mac, 3263 .ndo_change_mtu = ixgbevf_change_mtu, 3264 .ndo_tx_timeout = ixgbevf_tx_timeout, 3265 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3266 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3267}; 3268 3269static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3270{ 3271 dev->netdev_ops = &ixgbevf_netdev_ops; 3272 ixgbevf_set_ethtool_ops(dev); 3273 dev->watchdog_timeo = 5 * HZ; 3274} 3275 3276/** 3277 * ixgbevf_probe - Device Initialization Routine 3278 * @pdev: PCI device information struct 3279 * @ent: entry in ixgbevf_pci_tbl 3280 * 3281 * Returns 0 on success, negative on failure 3282 * 3283 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3284 * The OS initialization, configuring of the adapter private structure, 3285 * and a hardware reset occur. 3286 **/ 3287static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3288{ 3289 struct net_device *netdev; 3290 struct ixgbevf_adapter *adapter = NULL; 3291 struct ixgbe_hw *hw = NULL; 3292 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3293 static int cards_found; 3294 int err, pci_using_dac; 3295 3296 err = pci_enable_device(pdev); 3297 if (err) 3298 return err; 3299 3300 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3301 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3302 pci_using_dac = 1; 3303 } else { 3304 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3305 if (err) { 3306 err = dma_set_coherent_mask(&pdev->dev, 3307 DMA_BIT_MASK(32)); 3308 if (err) { 3309 dev_err(&pdev->dev, "No usable DMA " 3310 "configuration, aborting\n"); 3311 goto err_dma; 3312 } 3313 } 3314 pci_using_dac = 0; 3315 } 3316 3317 err = pci_request_regions(pdev, ixgbevf_driver_name); 3318 if (err) { 3319 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3320 goto err_pci_reg; 3321 } 3322 3323 pci_set_master(pdev); 3324 3325 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3326 MAX_TX_QUEUES); 3327 if (!netdev) { 3328 err = -ENOMEM; 3329 goto err_alloc_etherdev; 3330 } 3331 3332 SET_NETDEV_DEV(netdev, &pdev->dev); 3333 3334 pci_set_drvdata(pdev, netdev); 3335 adapter = netdev_priv(netdev); 3336 3337 adapter->netdev = netdev; 3338 adapter->pdev = pdev; 3339 hw = &adapter->hw; 3340 hw->back = adapter; 3341 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3342 3343 /* 3344 * call save state here in standalone driver because it relies on 3345 * adapter struct to exist, and needs to call netdev_priv 3346 */ 3347 pci_save_state(pdev); 3348 3349 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3350 pci_resource_len(pdev, 0)); 3351 if (!hw->hw_addr) { 3352 err = -EIO; 3353 goto err_ioremap; 3354 } 3355 3356 ixgbevf_assign_netdev_ops(netdev); 3357 3358 adapter->bd_number = cards_found; 3359 3360 /* Setup hw api */ 3361 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3362 hw->mac.type = ii->mac; 3363 3364 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3365 sizeof(struct ixgbe_mbx_operations)); 3366 3367 /* setup the private structure */ 3368 err = ixgbevf_sw_init(adapter); 3369 if (err) 3370 goto err_sw_init; 3371 3372 /* The HW MAC address was set and/or determined in sw_init */ 3373 if (!is_valid_ether_addr(netdev->dev_addr)) { 3374 pr_err("invalid MAC address\n"); 3375 err = -EIO; 3376 goto err_sw_init; 3377 } 3378 3379 netdev->hw_features = NETIF_F_SG | 3380 NETIF_F_IP_CSUM | 3381 NETIF_F_IPV6_CSUM | 3382 NETIF_F_TSO | 3383 NETIF_F_TSO6 | 3384 NETIF_F_RXCSUM; 3385 3386 netdev->features = netdev->hw_features | 3387 NETIF_F_HW_VLAN_TX | 3388 NETIF_F_HW_VLAN_RX | 3389 NETIF_F_HW_VLAN_FILTER; 3390 3391 netdev->vlan_features |= NETIF_F_TSO; 3392 netdev->vlan_features |= NETIF_F_TSO6; 3393 netdev->vlan_features |= NETIF_F_IP_CSUM; 3394 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3395 netdev->vlan_features |= NETIF_F_SG; 3396 3397 if (pci_using_dac) 3398 netdev->features |= NETIF_F_HIGHDMA; 3399 3400 netdev->priv_flags |= IFF_UNICAST_FLT; 3401 3402 init_timer(&adapter->watchdog_timer); 3403 adapter->watchdog_timer.function = ixgbevf_watchdog; 3404 adapter->watchdog_timer.data = (unsigned long)adapter; 3405 3406 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3407 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3408 3409 err = ixgbevf_init_interrupt_scheme(adapter); 3410 if (err) 3411 goto err_sw_init; 3412 3413 strcpy(netdev->name, "eth%d"); 3414 3415 err = register_netdev(netdev); 3416 if (err) 3417 goto err_register; 3418 3419 netif_carrier_off(netdev); 3420 3421 ixgbevf_init_last_counter_stats(adapter); 3422 3423 /* print the MAC address */ 3424 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3425 3426 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3427 3428 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3429 cards_found++; 3430 return 0; 3431 3432err_register: 3433 ixgbevf_clear_interrupt_scheme(adapter); 3434err_sw_init: 3435 ixgbevf_reset_interrupt_capability(adapter); 3436 iounmap(hw->hw_addr); 3437err_ioremap: 3438 free_netdev(netdev); 3439err_alloc_etherdev: 3440 pci_release_regions(pdev); 3441err_pci_reg: 3442err_dma: 3443 pci_disable_device(pdev); 3444 return err; 3445} 3446 3447/** 3448 * ixgbevf_remove - Device Removal Routine 3449 * @pdev: PCI device information struct 3450 * 3451 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3452 * that it should release a PCI device. The could be caused by a 3453 * Hot-Plug event, or because the driver is going to be removed from 3454 * memory. 3455 **/ 3456static void ixgbevf_remove(struct pci_dev *pdev) 3457{ 3458 struct net_device *netdev = pci_get_drvdata(pdev); 3459 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3460 3461 set_bit(__IXGBEVF_DOWN, &adapter->state); 3462 3463 del_timer_sync(&adapter->watchdog_timer); 3464 3465 cancel_work_sync(&adapter->reset_task); 3466 cancel_work_sync(&adapter->watchdog_task); 3467 3468 if (netdev->reg_state == NETREG_REGISTERED) 3469 unregister_netdev(netdev); 3470 3471 ixgbevf_clear_interrupt_scheme(adapter); 3472 ixgbevf_reset_interrupt_capability(adapter); 3473 3474 iounmap(adapter->hw.hw_addr); 3475 pci_release_regions(pdev); 3476 3477 hw_dbg(&adapter->hw, "Remove complete\n"); 3478 3479 kfree(adapter->tx_ring); 3480 kfree(adapter->rx_ring); 3481 3482 free_netdev(netdev); 3483 3484 pci_disable_device(pdev); 3485} 3486 3487/** 3488 * ixgbevf_io_error_detected - called when PCI error is detected 3489 * @pdev: Pointer to PCI device 3490 * @state: The current pci connection state 3491 * 3492 * This function is called after a PCI bus error affecting 3493 * this device has been detected. 3494 */ 3495static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3496 pci_channel_state_t state) 3497{ 3498 struct net_device *netdev = pci_get_drvdata(pdev); 3499 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3500 3501 netif_device_detach(netdev); 3502 3503 if (state == pci_channel_io_perm_failure) 3504 return PCI_ERS_RESULT_DISCONNECT; 3505 3506 if (netif_running(netdev)) 3507 ixgbevf_down(adapter); 3508 3509 pci_disable_device(pdev); 3510 3511 /* Request a slot slot reset. */ 3512 return PCI_ERS_RESULT_NEED_RESET; 3513} 3514 3515/** 3516 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3517 * @pdev: Pointer to PCI device 3518 * 3519 * Restart the card from scratch, as if from a cold-boot. Implementation 3520 * resembles the first-half of the ixgbevf_resume routine. 3521 */ 3522static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3523{ 3524 struct net_device *netdev = pci_get_drvdata(pdev); 3525 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3526 3527 if (pci_enable_device_mem(pdev)) { 3528 dev_err(&pdev->dev, 3529 "Cannot re-enable PCI device after reset.\n"); 3530 return PCI_ERS_RESULT_DISCONNECT; 3531 } 3532 3533 pci_set_master(pdev); 3534 3535 ixgbevf_reset(adapter); 3536 3537 return PCI_ERS_RESULT_RECOVERED; 3538} 3539 3540/** 3541 * ixgbevf_io_resume - called when traffic can start flowing again. 3542 * @pdev: Pointer to PCI device 3543 * 3544 * This callback is called when the error recovery driver tells us that 3545 * its OK to resume normal operation. Implementation resembles the 3546 * second-half of the ixgbevf_resume routine. 3547 */ 3548static void ixgbevf_io_resume(struct pci_dev *pdev) 3549{ 3550 struct net_device *netdev = pci_get_drvdata(pdev); 3551 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3552 3553 if (netif_running(netdev)) 3554 ixgbevf_up(adapter); 3555 3556 netif_device_attach(netdev); 3557} 3558 3559/* PCI Error Recovery (ERS) */ 3560static const struct pci_error_handlers ixgbevf_err_handler = { 3561 .error_detected = ixgbevf_io_error_detected, 3562 .slot_reset = ixgbevf_io_slot_reset, 3563 .resume = ixgbevf_io_resume, 3564}; 3565 3566static struct pci_driver ixgbevf_driver = { 3567 .name = ixgbevf_driver_name, 3568 .id_table = ixgbevf_pci_tbl, 3569 .probe = ixgbevf_probe, 3570 .remove = ixgbevf_remove, 3571#ifdef CONFIG_PM 3572 /* Power Management Hooks */ 3573 .suspend = ixgbevf_suspend, 3574 .resume = ixgbevf_resume, 3575#endif 3576 .shutdown = ixgbevf_shutdown, 3577 .err_handler = &ixgbevf_err_handler 3578}; 3579 3580/** 3581 * ixgbevf_init_module - Driver Registration Routine 3582 * 3583 * ixgbevf_init_module is the first routine called when the driver is 3584 * loaded. All it does is register with the PCI subsystem. 3585 **/ 3586static int __init ixgbevf_init_module(void) 3587{ 3588 int ret; 3589 pr_info("%s - version %s\n", ixgbevf_driver_string, 3590 ixgbevf_driver_version); 3591 3592 pr_info("%s\n", ixgbevf_copyright); 3593 3594 ret = pci_register_driver(&ixgbevf_driver); 3595 return ret; 3596} 3597 3598module_init(ixgbevf_init_module); 3599 3600/** 3601 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3602 * 3603 * ixgbevf_exit_module is called just before the driver is removed 3604 * from memory. 3605 **/ 3606static void __exit ixgbevf_exit_module(void) 3607{ 3608 pci_unregister_driver(&ixgbevf_driver); 3609} 3610 3611#ifdef DEBUG 3612/** 3613 * ixgbevf_get_hw_dev_name - return device name string 3614 * used by hardware layer to print debugging information 3615 **/ 3616char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3617{ 3618 struct ixgbevf_adapter *adapter = hw->back; 3619 return adapter->netdev->name; 3620} 3621 3622#endif 3623module_exit(ixgbevf_exit_module); 3624 3625/* ixgbevf_main.c */ 3626