ixgbevf_main.c revision 6fe59675500760dd21ef8a339fa129f7adb596c8
1/******************************************************************************* 2 3 Intel 82599 Virtual Function driver 4 Copyright(c) 1999 - 2012 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28 29/****************************************************************************** 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 31******************************************************************************/ 32 33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 35#include <linux/types.h> 36#include <linux/bitops.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/netdevice.h> 40#include <linux/vmalloc.h> 41#include <linux/string.h> 42#include <linux/in.h> 43#include <linux/ip.h> 44#include <linux/tcp.h> 45#include <linux/sctp.h> 46#include <linux/ipv6.h> 47#include <linux/slab.h> 48#include <net/checksum.h> 49#include <net/ip6_checksum.h> 50#include <linux/ethtool.h> 51#include <linux/if.h> 52#include <linux/if_vlan.h> 53#include <linux/prefetch.h> 54 55#include "ixgbevf.h" 56 57const char ixgbevf_driver_name[] = "ixgbevf"; 58static const char ixgbevf_driver_string[] = 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 60 61#define DRV_VERSION "2.7.12-k" 62const char ixgbevf_driver_version[] = DRV_VERSION; 63static char ixgbevf_copyright[] = 64 "Copyright (c) 2009 - 2012 Intel Corporation."; 65 66static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 67 [board_82599_vf] = &ixgbevf_82599_vf_info, 68 [board_X540_vf] = &ixgbevf_X540_vf_info, 69}; 70 71/* ixgbevf_pci_tbl - PCI Device ID Table 72 * 73 * Wildcard entries (PCI_ANY_ID) should come last 74 * Last entry must be all 0s 75 * 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 77 * Class, Class Mask, private data (not used) } 78 */ 79static struct pci_device_id ixgbevf_pci_tbl[] = { 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 81 board_82599_vf}, 82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 83 board_X540_vf}, 84 85 /* required last entry */ 86 {0, } 87}; 88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 89 90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 92MODULE_LICENSE("GPL"); 93MODULE_VERSION(DRV_VERSION); 94 95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 96static int debug = -1; 97module_param(debug, int, 0); 98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 99 100/* forward decls */ 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 103 104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 105 struct ixgbevf_ring *rx_ring, 106 u32 val) 107{ 108 /* 109 * Force memory writes to complete before letting h/w 110 * know there are new descriptors to fetch. (Only 111 * applicable for weak-ordered memory model archs, 112 * such as IA-64). 113 */ 114 wmb(); 115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 116} 117 118/** 119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 120 * @adapter: pointer to adapter struct 121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 122 * @queue: queue to map the corresponding interrupt to 123 * @msix_vector: the vector to map to the corresponding queue 124 */ 125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 126 u8 queue, u8 msix_vector) 127{ 128 u32 ivar, index; 129 struct ixgbe_hw *hw = &adapter->hw; 130 if (direction == -1) { 131 /* other causes */ 132 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 134 ivar &= ~0xFF; 135 ivar |= msix_vector; 136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 137 } else { 138 /* tx or rx causes */ 139 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 140 index = ((16 * (queue & 1)) + (8 * direction)); 141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 142 ivar &= ~(0xFF << index); 143 ivar |= (msix_vector << index); 144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 145 } 146} 147 148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, 149 struct ixgbevf_tx_buffer 150 *tx_buffer_info) 151{ 152 if (tx_buffer_info->dma) { 153 if (tx_buffer_info->mapped_as_page) 154 dma_unmap_page(tx_ring->dev, 155 tx_buffer_info->dma, 156 tx_buffer_info->length, 157 DMA_TO_DEVICE); 158 else 159 dma_unmap_single(tx_ring->dev, 160 tx_buffer_info->dma, 161 tx_buffer_info->length, 162 DMA_TO_DEVICE); 163 tx_buffer_info->dma = 0; 164 } 165 if (tx_buffer_info->skb) { 166 dev_kfree_skb_any(tx_buffer_info->skb); 167 tx_buffer_info->skb = NULL; 168 } 169 tx_buffer_info->time_stamp = 0; 170 /* tx_buffer_info must be completely set up in the transmit path */ 171} 172 173#define IXGBE_MAX_TXD_PWR 14 174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 175 176/* Tx Descriptors needed, worst case */ 177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 178#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 179 180static void ixgbevf_tx_timeout(struct net_device *netdev); 181 182/** 183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 184 * @q_vector: board private structure 185 * @tx_ring: tx ring to clean 186 **/ 187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, 188 struct ixgbevf_ring *tx_ring) 189{ 190 struct ixgbevf_adapter *adapter = q_vector->adapter; 191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 192 struct ixgbevf_tx_buffer *tx_buffer_info; 193 unsigned int i, eop, count = 0; 194 unsigned int total_bytes = 0, total_packets = 0; 195 196 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 197 return true; 198 199 i = tx_ring->next_to_clean; 200 eop = tx_ring->tx_buffer_info[i].next_to_watch; 201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 202 203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 204 (count < tx_ring->count)) { 205 bool cleaned = false; 206 rmb(); /* read buffer_info after eop_desc */ 207 /* eop could change between read and DD-check */ 208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 209 goto cont_loop; 210 for ( ; !cleaned; count++) { 211 struct sk_buff *skb; 212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 213 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 214 cleaned = (i == eop); 215 skb = tx_buffer_info->skb; 216 217 if (cleaned && skb) { 218 unsigned int segs, bytecount; 219 220 /* gso_segs is currently only valid for tcp */ 221 segs = skb_shinfo(skb)->gso_segs ?: 1; 222 /* multiply data chunks by size of headers */ 223 bytecount = ((segs - 1) * skb_headlen(skb)) + 224 skb->len; 225 total_packets += segs; 226 total_bytes += bytecount; 227 } 228 229 ixgbevf_unmap_and_free_tx_resource(tx_ring, 230 tx_buffer_info); 231 232 tx_desc->wb.status = 0; 233 234 i++; 235 if (i == tx_ring->count) 236 i = 0; 237 } 238 239cont_loop: 240 eop = tx_ring->tx_buffer_info[i].next_to_watch; 241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 242 } 243 244 tx_ring->next_to_clean = i; 245 246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && 248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 249 /* Make sure that anybody stopping the queue after this 250 * sees the new next_to_clean. 251 */ 252 smp_mb(); 253 if (__netif_subqueue_stopped(tx_ring->netdev, 254 tx_ring->queue_index) && 255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 256 netif_wake_subqueue(tx_ring->netdev, 257 tx_ring->queue_index); 258 ++adapter->restart_queue; 259 } 260 } 261 262 u64_stats_update_begin(&tx_ring->syncp); 263 tx_ring->total_bytes += total_bytes; 264 tx_ring->total_packets += total_packets; 265 u64_stats_update_end(&tx_ring->syncp); 266 q_vector->tx.total_bytes += total_bytes; 267 q_vector->tx.total_packets += total_packets; 268 269 return count < tx_ring->count; 270} 271 272/** 273 * ixgbevf_receive_skb - Send a completed packet up the stack 274 * @q_vector: structure containing interrupt and ring information 275 * @skb: packet to send up 276 * @status: hardware indication of status of receive 277 * @rx_desc: rx descriptor 278 **/ 279static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 280 struct sk_buff *skb, u8 status, 281 union ixgbe_adv_rx_desc *rx_desc) 282{ 283 struct ixgbevf_adapter *adapter = q_vector->adapter; 284 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 286 287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 288 __vlan_hwaccel_put_tag(skb, tag); 289 290 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 291 napi_gro_receive(&q_vector->napi, skb); 292 else 293 netif_rx(skb); 294} 295 296/** 297 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 298 * @ring: pointer to Rx descriptor ring structure 299 * @status_err: hardware indication of status of receive 300 * @skb: skb currently being received and modified 301 **/ 302static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, 303 u32 status_err, struct sk_buff *skb) 304{ 305 skb_checksum_none_assert(skb); 306 307 /* Rx csum disabled */ 308 if (!(ring->netdev->features & NETIF_F_RXCSUM)) 309 return; 310 311 /* if IP and error */ 312 if ((status_err & IXGBE_RXD_STAT_IPCS) && 313 (status_err & IXGBE_RXDADV_ERR_IPE)) { 314 ring->hw_csum_rx_error++; 315 return; 316 } 317 318 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 319 return; 320 321 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 322 ring->hw_csum_rx_error++; 323 return; 324 } 325 326 /* It must be a TCP or UDP packet with a valid checksum */ 327 skb->ip_summed = CHECKSUM_UNNECESSARY; 328 ring->hw_csum_rx_good++; 329} 330 331/** 332 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 333 * @adapter: address of board private structure 334 **/ 335static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 336 struct ixgbevf_ring *rx_ring, 337 int cleaned_count) 338{ 339 struct pci_dev *pdev = adapter->pdev; 340 union ixgbe_adv_rx_desc *rx_desc; 341 struct ixgbevf_rx_buffer *bi; 342 unsigned int i = rx_ring->next_to_use; 343 344 bi = &rx_ring->rx_buffer_info[i]; 345 346 while (cleaned_count--) { 347 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 348 349 if (!bi->skb) { 350 struct sk_buff *skb; 351 352 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 353 rx_ring->rx_buf_len); 354 if (!skb) { 355 adapter->alloc_rx_buff_failed++; 356 goto no_buffers; 357 } 358 bi->skb = skb; 359 360 bi->dma = dma_map_single(&pdev->dev, skb->data, 361 rx_ring->rx_buf_len, 362 DMA_FROM_DEVICE); 363 if (dma_mapping_error(&pdev->dev, bi->dma)) { 364 dev_kfree_skb(skb); 365 bi->skb = NULL; 366 dev_err(&pdev->dev, "RX DMA map failed\n"); 367 break; 368 } 369 } 370 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 371 372 i++; 373 if (i == rx_ring->count) 374 i = 0; 375 bi = &rx_ring->rx_buffer_info[i]; 376 } 377 378no_buffers: 379 if (rx_ring->next_to_use != i) { 380 rx_ring->next_to_use = i; 381 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 382 } 383} 384 385static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 386 u32 qmask) 387{ 388 struct ixgbe_hw *hw = &adapter->hw; 389 390 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); 391} 392 393static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 394 struct ixgbevf_ring *rx_ring, 395 int budget) 396{ 397 struct ixgbevf_adapter *adapter = q_vector->adapter; 398 struct pci_dev *pdev = adapter->pdev; 399 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 400 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 401 struct sk_buff *skb; 402 unsigned int i; 403 u32 len, staterr; 404 int cleaned_count = 0; 405 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 406 407 i = rx_ring->next_to_clean; 408 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 409 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 410 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 411 412 while (staterr & IXGBE_RXD_STAT_DD) { 413 if (!budget) 414 break; 415 budget--; 416 417 rmb(); /* read descriptor and rx_buffer_info after status DD */ 418 len = le16_to_cpu(rx_desc->wb.upper.length); 419 skb = rx_buffer_info->skb; 420 prefetch(skb->data - NET_IP_ALIGN); 421 rx_buffer_info->skb = NULL; 422 423 if (rx_buffer_info->dma) { 424 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 425 rx_ring->rx_buf_len, 426 DMA_FROM_DEVICE); 427 rx_buffer_info->dma = 0; 428 skb_put(skb, len); 429 } 430 431 i++; 432 if (i == rx_ring->count) 433 i = 0; 434 435 next_rxd = IXGBEVF_RX_DESC(rx_ring, i); 436 prefetch(next_rxd); 437 cleaned_count++; 438 439 next_buffer = &rx_ring->rx_buffer_info[i]; 440 441 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 442 skb->next = next_buffer->skb; 443 IXGBE_CB(skb->next)->prev = skb; 444 adapter->non_eop_descs++; 445 goto next_desc; 446 } 447 448 /* we should not be chaining buffers, if we did drop the skb */ 449 if (IXGBE_CB(skb)->prev) { 450 do { 451 struct sk_buff *this = skb; 452 skb = IXGBE_CB(skb)->prev; 453 dev_kfree_skb(this); 454 } while (skb); 455 goto next_desc; 456 } 457 458 /* ERR_MASK will only have valid bits if EOP set */ 459 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 460 dev_kfree_skb_irq(skb); 461 goto next_desc; 462 } 463 464 ixgbevf_rx_checksum(rx_ring, staterr, skb); 465 466 /* probably a little skewed due to removing CRC */ 467 total_rx_bytes += skb->len; 468 total_rx_packets++; 469 470 /* 471 * Work around issue of some types of VM to VM loop back 472 * packets not getting split correctly 473 */ 474 if (staterr & IXGBE_RXD_STAT_LB) { 475 u32 header_fixup_len = skb_headlen(skb); 476 if (header_fixup_len < 14) 477 skb_push(skb, header_fixup_len); 478 } 479 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 480 481 /* Workaround hardware that can't do proper VEPA multicast 482 * source pruning. 483 */ 484 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 485 !(compare_ether_addr(adapter->netdev->dev_addr, 486 eth_hdr(skb)->h_source))) { 487 dev_kfree_skb_irq(skb); 488 goto next_desc; 489 } 490 491 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 492 493next_desc: 494 rx_desc->wb.upper.status_error = 0; 495 496 /* return some buffers to hardware, one at a time is too slow */ 497 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 498 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 499 cleaned_count); 500 cleaned_count = 0; 501 } 502 503 /* use prefetched values */ 504 rx_desc = next_rxd; 505 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 506 507 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 508 } 509 510 rx_ring->next_to_clean = i; 511 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 512 513 if (cleaned_count) 514 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 515 516 u64_stats_update_begin(&rx_ring->syncp); 517 rx_ring->total_packets += total_rx_packets; 518 rx_ring->total_bytes += total_rx_bytes; 519 u64_stats_update_end(&rx_ring->syncp); 520 q_vector->rx.total_packets += total_rx_packets; 521 q_vector->rx.total_bytes += total_rx_bytes; 522 523 return !!budget; 524} 525 526/** 527 * ixgbevf_poll - NAPI polling calback 528 * @napi: napi struct with our devices info in it 529 * @budget: amount of work driver is allowed to do this pass, in packets 530 * 531 * This function will clean more than one or more rings associated with a 532 * q_vector. 533 **/ 534static int ixgbevf_poll(struct napi_struct *napi, int budget) 535{ 536 struct ixgbevf_q_vector *q_vector = 537 container_of(napi, struct ixgbevf_q_vector, napi); 538 struct ixgbevf_adapter *adapter = q_vector->adapter; 539 struct ixgbevf_ring *ring; 540 int per_ring_budget; 541 bool clean_complete = true; 542 543 ixgbevf_for_each_ring(ring, q_vector->tx) 544 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); 545 546 /* attempt to distribute budget to each queue fairly, but don't allow 547 * the budget to go below 1 because we'll exit polling */ 548 if (q_vector->rx.count > 1) 549 per_ring_budget = max(budget/q_vector->rx.count, 1); 550 else 551 per_ring_budget = budget; 552 553 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 554 ixgbevf_for_each_ring(ring, q_vector->rx) 555 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 556 per_ring_budget); 557 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 558 559 /* If all work not completed, return budget and keep polling */ 560 if (!clean_complete) 561 return budget; 562 /* all work done, exit the polling mode */ 563 napi_complete(napi); 564 if (adapter->rx_itr_setting & 1) 565 ixgbevf_set_itr(q_vector); 566 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 567 ixgbevf_irq_enable_queues(adapter, 568 1 << q_vector->v_idx); 569 570 return 0; 571} 572 573/** 574 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 575 * @q_vector: structure containing interrupt and ring information 576 */ 577static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) 578{ 579 struct ixgbevf_adapter *adapter = q_vector->adapter; 580 struct ixgbe_hw *hw = &adapter->hw; 581 int v_idx = q_vector->v_idx; 582 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; 583 584 /* 585 * set the WDIS bit to not clear the timer bits and cause an 586 * immediate assertion of the interrupt 587 */ 588 itr_reg |= IXGBE_EITR_CNT_WDIS; 589 590 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 591} 592 593/** 594 * ixgbevf_configure_msix - Configure MSI-X hardware 595 * @adapter: board private structure 596 * 597 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 598 * interrupts. 599 **/ 600static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 601{ 602 struct ixgbevf_q_vector *q_vector; 603 int q_vectors, v_idx; 604 605 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 606 adapter->eims_enable_mask = 0; 607 608 /* 609 * Populate the IVAR table and set the ITR values to the 610 * corresponding register. 611 */ 612 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 613 struct ixgbevf_ring *ring; 614 q_vector = adapter->q_vector[v_idx]; 615 616 ixgbevf_for_each_ring(ring, q_vector->rx) 617 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); 618 619 ixgbevf_for_each_ring(ring, q_vector->tx) 620 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 621 622 if (q_vector->tx.ring && !q_vector->rx.ring) { 623 /* tx only vector */ 624 if (adapter->tx_itr_setting == 1) 625 q_vector->itr = IXGBE_10K_ITR; 626 else 627 q_vector->itr = adapter->tx_itr_setting; 628 } else { 629 /* rx or rx/tx vector */ 630 if (adapter->rx_itr_setting == 1) 631 q_vector->itr = IXGBE_20K_ITR; 632 else 633 q_vector->itr = adapter->rx_itr_setting; 634 } 635 636 /* add q_vector eims value to global eims_enable_mask */ 637 adapter->eims_enable_mask |= 1 << v_idx; 638 639 ixgbevf_write_eitr(q_vector); 640 } 641 642 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 643 /* setup eims_other and add value to global eims_enable_mask */ 644 adapter->eims_other = 1 << v_idx; 645 adapter->eims_enable_mask |= adapter->eims_other; 646} 647 648enum latency_range { 649 lowest_latency = 0, 650 low_latency = 1, 651 bulk_latency = 2, 652 latency_invalid = 255 653}; 654 655/** 656 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 657 * @q_vector: structure containing interrupt and ring information 658 * @ring_container: structure containing ring performance data 659 * 660 * Stores a new ITR value based on packets and byte 661 * counts during the last interrupt. The advantage of per interrupt 662 * computation is faster updates and more accurate ITR for the current 663 * traffic pattern. Constants in this function were computed 664 * based on theoretical maximum wire speed and thresholds were set based 665 * on testing data as well as attempting to minimize response time 666 * while increasing bulk throughput. 667 **/ 668static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, 669 struct ixgbevf_ring_container *ring_container) 670{ 671 int bytes = ring_container->total_bytes; 672 int packets = ring_container->total_packets; 673 u32 timepassed_us; 674 u64 bytes_perint; 675 u8 itr_setting = ring_container->itr; 676 677 if (packets == 0) 678 return; 679 680 /* simple throttlerate management 681 * 0-20MB/s lowest (100000 ints/s) 682 * 20-100MB/s low (20000 ints/s) 683 * 100-1249MB/s bulk (8000 ints/s) 684 */ 685 /* what was last interrupt timeslice? */ 686 timepassed_us = q_vector->itr >> 2; 687 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 688 689 switch (itr_setting) { 690 case lowest_latency: 691 if (bytes_perint > 10) 692 itr_setting = low_latency; 693 break; 694 case low_latency: 695 if (bytes_perint > 20) 696 itr_setting = bulk_latency; 697 else if (bytes_perint <= 10) 698 itr_setting = lowest_latency; 699 break; 700 case bulk_latency: 701 if (bytes_perint <= 20) 702 itr_setting = low_latency; 703 break; 704 } 705 706 /* clear work counters since we have the values we need */ 707 ring_container->total_bytes = 0; 708 ring_container->total_packets = 0; 709 710 /* write updated itr to ring container */ 711 ring_container->itr = itr_setting; 712} 713 714static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 715{ 716 u32 new_itr = q_vector->itr; 717 u8 current_itr; 718 719 ixgbevf_update_itr(q_vector, &q_vector->tx); 720 ixgbevf_update_itr(q_vector, &q_vector->rx); 721 722 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 723 724 switch (current_itr) { 725 /* counts and packets in update_itr are dependent on these numbers */ 726 case lowest_latency: 727 new_itr = IXGBE_100K_ITR; 728 break; 729 case low_latency: 730 new_itr = IXGBE_20K_ITR; 731 break; 732 case bulk_latency: 733 default: 734 new_itr = IXGBE_8K_ITR; 735 break; 736 } 737 738 if (new_itr != q_vector->itr) { 739 /* do an exponential smoothing */ 740 new_itr = (10 * new_itr * q_vector->itr) / 741 ((9 * new_itr) + q_vector->itr); 742 743 /* save the algorithm value here */ 744 q_vector->itr = new_itr; 745 746 ixgbevf_write_eitr(q_vector); 747 } 748} 749 750static irqreturn_t ixgbevf_msix_other(int irq, void *data) 751{ 752 struct ixgbevf_adapter *adapter = data; 753 struct pci_dev *pdev = adapter->pdev; 754 struct ixgbe_hw *hw = &adapter->hw; 755 u32 msg; 756 bool got_ack = false; 757 758 hw->mac.get_link_status = 1; 759 if (!hw->mbx.ops.check_for_ack(hw)) 760 got_ack = true; 761 762 if (!hw->mbx.ops.check_for_msg(hw)) { 763 hw->mbx.ops.read(hw, &msg, 1); 764 765 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 766 mod_timer(&adapter->watchdog_timer, 767 round_jiffies(jiffies + 1)); 768 769 if (msg & IXGBE_VT_MSGTYPE_NACK) 770 dev_info(&pdev->dev, 771 "Last Request of type %2.2x to PF Nacked\n", 772 msg & 0xFF); 773 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; 774 } 775 776 /* checking for the ack clears the PFACK bit. Place 777 * it back in the v2p_mailbox cache so that anyone 778 * polling for an ack will not miss it 779 */ 780 if (got_ack) 781 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 782 783 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 784 785 return IRQ_HANDLED; 786} 787 788/** 789 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 790 * @irq: unused 791 * @data: pointer to our q_vector struct for this interrupt vector 792 **/ 793static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 794{ 795 struct ixgbevf_q_vector *q_vector = data; 796 797 /* EIAM disabled interrupts (on this vector) for us */ 798 if (q_vector->rx.ring || q_vector->tx.ring) 799 napi_schedule(&q_vector->napi); 800 801 return IRQ_HANDLED; 802} 803 804static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 805 int r_idx) 806{ 807 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 808 809 a->rx_ring[r_idx].next = q_vector->rx.ring; 810 q_vector->rx.ring = &a->rx_ring[r_idx]; 811 q_vector->rx.count++; 812} 813 814static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 815 int t_idx) 816{ 817 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 818 819 a->tx_ring[t_idx].next = q_vector->tx.ring; 820 q_vector->tx.ring = &a->tx_ring[t_idx]; 821 q_vector->tx.count++; 822} 823 824/** 825 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 826 * @adapter: board private structure to initialize 827 * 828 * This function maps descriptor rings to the queue-specific vectors 829 * we were allotted through the MSI-X enabling code. Ideally, we'd have 830 * one vector per ring/queue, but on a constrained vector budget, we 831 * group the rings as "efficiently" as possible. You would add new 832 * mapping configurations in here. 833 **/ 834static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 835{ 836 int q_vectors; 837 int v_start = 0; 838 int rxr_idx = 0, txr_idx = 0; 839 int rxr_remaining = adapter->num_rx_queues; 840 int txr_remaining = adapter->num_tx_queues; 841 int i, j; 842 int rqpv, tqpv; 843 int err = 0; 844 845 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 846 847 /* 848 * The ideal configuration... 849 * We have enough vectors to map one per queue. 850 */ 851 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 852 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 853 map_vector_to_rxq(adapter, v_start, rxr_idx); 854 855 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 856 map_vector_to_txq(adapter, v_start, txr_idx); 857 goto out; 858 } 859 860 /* 861 * If we don't have enough vectors for a 1-to-1 862 * mapping, we'll have to group them so there are 863 * multiple queues per vector. 864 */ 865 /* Re-adjusting *qpv takes care of the remainder. */ 866 for (i = v_start; i < q_vectors; i++) { 867 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 868 for (j = 0; j < rqpv; j++) { 869 map_vector_to_rxq(adapter, i, rxr_idx); 870 rxr_idx++; 871 rxr_remaining--; 872 } 873 } 874 for (i = v_start; i < q_vectors; i++) { 875 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 876 for (j = 0; j < tqpv; j++) { 877 map_vector_to_txq(adapter, i, txr_idx); 878 txr_idx++; 879 txr_remaining--; 880 } 881 } 882 883out: 884 return err; 885} 886 887/** 888 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 889 * @adapter: board private structure 890 * 891 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 892 * interrupts from the kernel. 893 **/ 894static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 895{ 896 struct net_device *netdev = adapter->netdev; 897 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 898 int vector, err; 899 int ri = 0, ti = 0; 900 901 for (vector = 0; vector < q_vectors; vector++) { 902 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; 903 struct msix_entry *entry = &adapter->msix_entries[vector]; 904 905 if (q_vector->tx.ring && q_vector->rx.ring) { 906 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 907 "%s-%s-%d", netdev->name, "TxRx", ri++); 908 ti++; 909 } else if (q_vector->rx.ring) { 910 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 911 "%s-%s-%d", netdev->name, "rx", ri++); 912 } else if (q_vector->tx.ring) { 913 snprintf(q_vector->name, sizeof(q_vector->name) - 1, 914 "%s-%s-%d", netdev->name, "tx", ti++); 915 } else { 916 /* skip this unused q_vector */ 917 continue; 918 } 919 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, 920 q_vector->name, q_vector); 921 if (err) { 922 hw_dbg(&adapter->hw, 923 "request_irq failed for MSIX interrupt " 924 "Error: %d\n", err); 925 goto free_queue_irqs; 926 } 927 } 928 929 err = request_irq(adapter->msix_entries[vector].vector, 930 &ixgbevf_msix_other, 0, netdev->name, adapter); 931 if (err) { 932 hw_dbg(&adapter->hw, 933 "request_irq for msix_other failed: %d\n", err); 934 goto free_queue_irqs; 935 } 936 937 return 0; 938 939free_queue_irqs: 940 while (vector) { 941 vector--; 942 free_irq(adapter->msix_entries[vector].vector, 943 adapter->q_vector[vector]); 944 } 945 pci_disable_msix(adapter->pdev); 946 kfree(adapter->msix_entries); 947 adapter->msix_entries = NULL; 948 return err; 949} 950 951static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 952{ 953 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 954 955 for (i = 0; i < q_vectors; i++) { 956 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 957 q_vector->rx.ring = NULL; 958 q_vector->tx.ring = NULL; 959 q_vector->rx.count = 0; 960 q_vector->tx.count = 0; 961 } 962} 963 964/** 965 * ixgbevf_request_irq - initialize interrupts 966 * @adapter: board private structure 967 * 968 * Attempts to configure interrupts using the best available 969 * capabilities of the hardware and kernel. 970 **/ 971static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 972{ 973 int err = 0; 974 975 err = ixgbevf_request_msix_irqs(adapter); 976 977 if (err) 978 hw_dbg(&adapter->hw, 979 "request_irq failed, Error %d\n", err); 980 981 return err; 982} 983 984static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 985{ 986 int i, q_vectors; 987 988 q_vectors = adapter->num_msix_vectors; 989 i = q_vectors - 1; 990 991 free_irq(adapter->msix_entries[i].vector, adapter); 992 i--; 993 994 for (; i >= 0; i--) { 995 /* free only the irqs that were actually requested */ 996 if (!adapter->q_vector[i]->rx.ring && 997 !adapter->q_vector[i]->tx.ring) 998 continue; 999 1000 free_irq(adapter->msix_entries[i].vector, 1001 adapter->q_vector[i]); 1002 } 1003 1004 ixgbevf_reset_q_vectors(adapter); 1005} 1006 1007/** 1008 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1009 * @adapter: board private structure 1010 **/ 1011static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1012{ 1013 struct ixgbe_hw *hw = &adapter->hw; 1014 int i; 1015 1016 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); 1017 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1018 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); 1019 1020 IXGBE_WRITE_FLUSH(hw); 1021 1022 for (i = 0; i < adapter->num_msix_vectors; i++) 1023 synchronize_irq(adapter->msix_entries[i].vector); 1024} 1025 1026/** 1027 * ixgbevf_irq_enable - Enable default interrupt generation settings 1028 * @adapter: board private structure 1029 **/ 1030static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) 1031{ 1032 struct ixgbe_hw *hw = &adapter->hw; 1033 1034 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); 1035 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); 1036 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); 1037} 1038 1039/** 1040 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1041 * @adapter: board private structure 1042 * 1043 * Configure the Tx unit of the MAC after a reset. 1044 **/ 1045static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1046{ 1047 u64 tdba; 1048 struct ixgbe_hw *hw = &adapter->hw; 1049 u32 i, j, tdlen, txctrl; 1050 1051 /* Setup the HW Tx Head and Tail descriptor pointers */ 1052 for (i = 0; i < adapter->num_tx_queues; i++) { 1053 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1054 j = ring->reg_idx; 1055 tdba = ring->dma; 1056 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1057 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1058 (tdba & DMA_BIT_MASK(32))); 1059 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1060 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1061 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1062 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1063 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1064 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1065 /* Disable Tx Head Writeback RO bit, since this hoses 1066 * bookkeeping if things aren't delivered in order. 1067 */ 1068 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1069 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1070 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1071 } 1072} 1073 1074#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1075 1076static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1077{ 1078 struct ixgbevf_ring *rx_ring; 1079 struct ixgbe_hw *hw = &adapter->hw; 1080 u32 srrctl; 1081 1082 rx_ring = &adapter->rx_ring[index]; 1083 1084 srrctl = IXGBE_SRRCTL_DROP_EN; 1085 1086 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1087 1088 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> 1089 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1090 1091 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1092} 1093 1094static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) 1095{ 1096 struct ixgbe_hw *hw = &adapter->hw; 1097 struct net_device *netdev = adapter->netdev; 1098 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1099 int i; 1100 u16 rx_buf_len; 1101 1102 /* notify the PF of our intent to use this size of frame */ 1103 ixgbevf_rlpml_set_vf(hw, max_frame); 1104 1105 /* PF will allow an extra 4 bytes past for vlan tagged frames */ 1106 max_frame += VLAN_HLEN; 1107 1108 /* 1109 * Allocate buffer sizes that fit well into 32K and 1110 * take into account max frame size of 9.5K 1111 */ 1112 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1113 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1114 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1115 else if (max_frame <= IXGBEVF_RXBUFFER_2K) 1116 rx_buf_len = IXGBEVF_RXBUFFER_2K; 1117 else if (max_frame <= IXGBEVF_RXBUFFER_4K) 1118 rx_buf_len = IXGBEVF_RXBUFFER_4K; 1119 else if (max_frame <= IXGBEVF_RXBUFFER_8K) 1120 rx_buf_len = IXGBEVF_RXBUFFER_8K; 1121 else 1122 rx_buf_len = IXGBEVF_RXBUFFER_10K; 1123 1124 for (i = 0; i < adapter->num_rx_queues; i++) 1125 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1126} 1127 1128/** 1129 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1130 * @adapter: board private structure 1131 * 1132 * Configure the Rx unit of the MAC after a reset. 1133 **/ 1134static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1135{ 1136 u64 rdba; 1137 struct ixgbe_hw *hw = &adapter->hw; 1138 int i, j; 1139 u32 rdlen; 1140 1141 /* PSRTYPE must be initialized in 82599 */ 1142 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1143 1144 /* set_rx_buffer_len must be called before ring initialization */ 1145 ixgbevf_set_rx_buffer_len(adapter); 1146 1147 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1148 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1149 * the Base and Length of the Rx Descriptor Ring */ 1150 for (i = 0; i < adapter->num_rx_queues; i++) { 1151 rdba = adapter->rx_ring[i].dma; 1152 j = adapter->rx_ring[i].reg_idx; 1153 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1154 (rdba & DMA_BIT_MASK(32))); 1155 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1156 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1157 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1158 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1159 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1160 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1161 1162 ixgbevf_configure_srrctl(adapter, j); 1163 } 1164} 1165 1166static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1167{ 1168 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1169 struct ixgbe_hw *hw = &adapter->hw; 1170 int err; 1171 1172 spin_lock_bh(&adapter->mbx_lock); 1173 1174 /* add VID to filter table */ 1175 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1176 1177 spin_unlock_bh(&adapter->mbx_lock); 1178 1179 /* translate error return types so error makes sense */ 1180 if (err == IXGBE_ERR_MBX) 1181 return -EIO; 1182 1183 if (err == IXGBE_ERR_INVALID_ARGUMENT) 1184 return -EACCES; 1185 1186 set_bit(vid, adapter->active_vlans); 1187 1188 return err; 1189} 1190 1191static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1192{ 1193 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1194 struct ixgbe_hw *hw = &adapter->hw; 1195 int err = -EOPNOTSUPP; 1196 1197 spin_lock_bh(&adapter->mbx_lock); 1198 1199 /* remove VID from filter table */ 1200 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1201 1202 spin_unlock_bh(&adapter->mbx_lock); 1203 1204 clear_bit(vid, adapter->active_vlans); 1205 1206 return err; 1207} 1208 1209static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1210{ 1211 u16 vid; 1212 1213 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1214 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1215} 1216 1217static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1218{ 1219 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1220 struct ixgbe_hw *hw = &adapter->hw; 1221 int count = 0; 1222 1223 if ((netdev_uc_count(netdev)) > 10) { 1224 pr_err("Too many unicast filters - No Space\n"); 1225 return -ENOSPC; 1226 } 1227 1228 if (!netdev_uc_empty(netdev)) { 1229 struct netdev_hw_addr *ha; 1230 netdev_for_each_uc_addr(ha, netdev) { 1231 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1232 udelay(200); 1233 } 1234 } else { 1235 /* 1236 * If the list is empty then send message to PF driver to 1237 * clear all macvlans on this VF. 1238 */ 1239 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1240 } 1241 1242 return count; 1243} 1244 1245/** 1246 * ixgbevf_set_rx_mode - Multicast and unicast set 1247 * @netdev: network interface device structure 1248 * 1249 * The set_rx_method entry point is called whenever the multicast address 1250 * list, unicast address list or the network interface flags are updated. 1251 * This routine is responsible for configuring the hardware for proper 1252 * multicast mode and configuring requested unicast filters. 1253 **/ 1254static void ixgbevf_set_rx_mode(struct net_device *netdev) 1255{ 1256 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1257 struct ixgbe_hw *hw = &adapter->hw; 1258 1259 spin_lock_bh(&adapter->mbx_lock); 1260 1261 /* reprogram multicast list */ 1262 hw->mac.ops.update_mc_addr_list(hw, netdev); 1263 1264 ixgbevf_write_uc_addr_list(netdev); 1265 1266 spin_unlock_bh(&adapter->mbx_lock); 1267} 1268 1269static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1270{ 1271 int q_idx; 1272 struct ixgbevf_q_vector *q_vector; 1273 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1274 1275 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1276 q_vector = adapter->q_vector[q_idx]; 1277 napi_enable(&q_vector->napi); 1278 } 1279} 1280 1281static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1282{ 1283 int q_idx; 1284 struct ixgbevf_q_vector *q_vector; 1285 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1286 1287 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1288 q_vector = adapter->q_vector[q_idx]; 1289 napi_disable(&q_vector->napi); 1290 } 1291} 1292 1293static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1294{ 1295 struct net_device *netdev = adapter->netdev; 1296 int i; 1297 1298 ixgbevf_set_rx_mode(netdev); 1299 1300 ixgbevf_restore_vlan(adapter); 1301 1302 ixgbevf_configure_tx(adapter); 1303 ixgbevf_configure_rx(adapter); 1304 for (i = 0; i < adapter->num_rx_queues; i++) { 1305 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1306 ixgbevf_alloc_rx_buffers(adapter, ring, 1307 IXGBE_DESC_UNUSED(ring)); 1308 } 1309} 1310 1311#define IXGBE_MAX_RX_DESC_POLL 10 1312static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1313 int rxr) 1314{ 1315 struct ixgbe_hw *hw = &adapter->hw; 1316 int j = adapter->rx_ring[rxr].reg_idx; 1317 int k; 1318 1319 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1320 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1321 break; 1322 else 1323 msleep(1); 1324 } 1325 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1326 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1327 "not set within the polling period\n", rxr); 1328 } 1329 1330 ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], 1331 adapter->rx_ring[rxr].count - 1); 1332} 1333 1334static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1335{ 1336 /* Only save pre-reset stats if there are some */ 1337 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1338 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1339 adapter->stats.base_vfgprc; 1340 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1341 adapter->stats.base_vfgptc; 1342 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1343 adapter->stats.base_vfgorc; 1344 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1345 adapter->stats.base_vfgotc; 1346 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1347 adapter->stats.base_vfmprc; 1348 } 1349} 1350 1351static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1352{ 1353 struct ixgbe_hw *hw = &adapter->hw; 1354 1355 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1356 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1357 adapter->stats.last_vfgorc |= 1358 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1359 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1360 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1361 adapter->stats.last_vfgotc |= 1362 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1363 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1364 1365 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1366 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1367 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1368 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1369 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1370} 1371 1372static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1373{ 1374 struct ixgbe_hw *hw = &adapter->hw; 1375 int api[] = { ixgbe_mbox_api_11, 1376 ixgbe_mbox_api_10, 1377 ixgbe_mbox_api_unknown }; 1378 int err = 0, idx = 0; 1379 1380 spin_lock_bh(&adapter->mbx_lock); 1381 1382 while (api[idx] != ixgbe_mbox_api_unknown) { 1383 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1384 if (!err) 1385 break; 1386 idx++; 1387 } 1388 1389 spin_unlock_bh(&adapter->mbx_lock); 1390} 1391 1392static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1393{ 1394 struct net_device *netdev = adapter->netdev; 1395 struct ixgbe_hw *hw = &adapter->hw; 1396 int i, j = 0; 1397 int num_rx_rings = adapter->num_rx_queues; 1398 u32 txdctl, rxdctl; 1399 1400 for (i = 0; i < adapter->num_tx_queues; i++) { 1401 j = adapter->tx_ring[i].reg_idx; 1402 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1403 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1404 txdctl |= (8 << 16); 1405 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1406 } 1407 1408 for (i = 0; i < adapter->num_tx_queues; i++) { 1409 j = adapter->tx_ring[i].reg_idx; 1410 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1411 txdctl |= IXGBE_TXDCTL_ENABLE; 1412 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1413 } 1414 1415 for (i = 0; i < num_rx_rings; i++) { 1416 j = adapter->rx_ring[i].reg_idx; 1417 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1418 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1419 if (hw->mac.type == ixgbe_mac_X540_vf) { 1420 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1421 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1422 IXGBE_RXDCTL_RLPML_EN); 1423 } 1424 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1425 ixgbevf_rx_desc_queue_enable(adapter, i); 1426 } 1427 1428 ixgbevf_configure_msix(adapter); 1429 1430 spin_lock_bh(&adapter->mbx_lock); 1431 1432 if (is_valid_ether_addr(hw->mac.addr)) 1433 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1434 else 1435 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1436 1437 spin_unlock_bh(&adapter->mbx_lock); 1438 1439 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1440 ixgbevf_napi_enable_all(adapter); 1441 1442 /* enable transmits */ 1443 netif_tx_start_all_queues(netdev); 1444 1445 ixgbevf_save_reset_stats(adapter); 1446 ixgbevf_init_last_counter_stats(adapter); 1447 1448 hw->mac.get_link_status = 1; 1449 mod_timer(&adapter->watchdog_timer, jiffies); 1450} 1451 1452static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) 1453{ 1454 struct ixgbe_hw *hw = &adapter->hw; 1455 struct ixgbevf_ring *rx_ring; 1456 unsigned int def_q = 0; 1457 unsigned int num_tcs = 0; 1458 unsigned int num_rx_queues = 1; 1459 int err, i; 1460 1461 spin_lock_bh(&adapter->mbx_lock); 1462 1463 /* fetch queue configuration from the PF */ 1464 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 1465 1466 spin_unlock_bh(&adapter->mbx_lock); 1467 1468 if (err) 1469 return err; 1470 1471 if (num_tcs > 1) { 1472 /* update default Tx ring register index */ 1473 adapter->tx_ring[0].reg_idx = def_q; 1474 1475 /* we need as many queues as traffic classes */ 1476 num_rx_queues = num_tcs; 1477 } 1478 1479 /* nothing to do if we have the correct number of queues */ 1480 if (adapter->num_rx_queues == num_rx_queues) 1481 return 0; 1482 1483 /* allocate new rings */ 1484 rx_ring = kcalloc(num_rx_queues, 1485 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1486 if (!rx_ring) 1487 return -ENOMEM; 1488 1489 /* setup ring fields */ 1490 for (i = 0; i < num_rx_queues; i++) { 1491 rx_ring[i].count = adapter->rx_ring_count; 1492 rx_ring[i].queue_index = i; 1493 rx_ring[i].reg_idx = i; 1494 rx_ring[i].dev = &adapter->pdev->dev; 1495 rx_ring[i].netdev = adapter->netdev; 1496 1497 /* allocate resources on the ring */ 1498 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); 1499 if (err) { 1500 while (i) { 1501 i--; 1502 ixgbevf_free_rx_resources(adapter, &rx_ring[i]); 1503 } 1504 kfree(rx_ring); 1505 return err; 1506 } 1507 } 1508 1509 /* free the existing rings and queues */ 1510 ixgbevf_free_all_rx_resources(adapter); 1511 adapter->num_rx_queues = 0; 1512 kfree(adapter->rx_ring); 1513 1514 /* move new rings into position on the adapter struct */ 1515 adapter->rx_ring = rx_ring; 1516 adapter->num_rx_queues = num_rx_queues; 1517 1518 /* reset ring to vector mapping */ 1519 ixgbevf_reset_q_vectors(adapter); 1520 ixgbevf_map_rings_to_vectors(adapter); 1521 1522 return 0; 1523} 1524 1525void ixgbevf_up(struct ixgbevf_adapter *adapter) 1526{ 1527 struct ixgbe_hw *hw = &adapter->hw; 1528 1529 ixgbevf_negotiate_api(adapter); 1530 1531 ixgbevf_reset_queues(adapter); 1532 1533 ixgbevf_configure(adapter); 1534 1535 ixgbevf_up_complete(adapter); 1536 1537 /* clear any pending interrupts, may auto mask */ 1538 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1539 1540 ixgbevf_irq_enable(adapter); 1541} 1542 1543/** 1544 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1545 * @adapter: board private structure 1546 * @rx_ring: ring to free buffers from 1547 **/ 1548static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1549 struct ixgbevf_ring *rx_ring) 1550{ 1551 struct pci_dev *pdev = adapter->pdev; 1552 unsigned long size; 1553 unsigned int i; 1554 1555 if (!rx_ring->rx_buffer_info) 1556 return; 1557 1558 /* Free all the Rx ring sk_buffs */ 1559 for (i = 0; i < rx_ring->count; i++) { 1560 struct ixgbevf_rx_buffer *rx_buffer_info; 1561 1562 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1563 if (rx_buffer_info->dma) { 1564 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1565 rx_ring->rx_buf_len, 1566 DMA_FROM_DEVICE); 1567 rx_buffer_info->dma = 0; 1568 } 1569 if (rx_buffer_info->skb) { 1570 struct sk_buff *skb = rx_buffer_info->skb; 1571 rx_buffer_info->skb = NULL; 1572 do { 1573 struct sk_buff *this = skb; 1574 skb = IXGBE_CB(skb)->prev; 1575 dev_kfree_skb(this); 1576 } while (skb); 1577 } 1578 } 1579 1580 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1581 memset(rx_ring->rx_buffer_info, 0, size); 1582 1583 /* Zero out the descriptor ring */ 1584 memset(rx_ring->desc, 0, rx_ring->size); 1585 1586 rx_ring->next_to_clean = 0; 1587 rx_ring->next_to_use = 0; 1588 1589 if (rx_ring->head) 1590 writel(0, adapter->hw.hw_addr + rx_ring->head); 1591 if (rx_ring->tail) 1592 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1593} 1594 1595/** 1596 * ixgbevf_clean_tx_ring - Free Tx Buffers 1597 * @adapter: board private structure 1598 * @tx_ring: ring to be cleaned 1599 **/ 1600static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1601 struct ixgbevf_ring *tx_ring) 1602{ 1603 struct ixgbevf_tx_buffer *tx_buffer_info; 1604 unsigned long size; 1605 unsigned int i; 1606 1607 if (!tx_ring->tx_buffer_info) 1608 return; 1609 1610 /* Free all the Tx ring sk_buffs */ 1611 for (i = 0; i < tx_ring->count; i++) { 1612 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1613 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1614 } 1615 1616 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1617 memset(tx_ring->tx_buffer_info, 0, size); 1618 1619 memset(tx_ring->desc, 0, tx_ring->size); 1620 1621 tx_ring->next_to_use = 0; 1622 tx_ring->next_to_clean = 0; 1623 1624 if (tx_ring->head) 1625 writel(0, adapter->hw.hw_addr + tx_ring->head); 1626 if (tx_ring->tail) 1627 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1628} 1629 1630/** 1631 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1632 * @adapter: board private structure 1633 **/ 1634static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1635{ 1636 int i; 1637 1638 for (i = 0; i < adapter->num_rx_queues; i++) 1639 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1640} 1641 1642/** 1643 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1644 * @adapter: board private structure 1645 **/ 1646static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1647{ 1648 int i; 1649 1650 for (i = 0; i < adapter->num_tx_queues; i++) 1651 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1652} 1653 1654void ixgbevf_down(struct ixgbevf_adapter *adapter) 1655{ 1656 struct net_device *netdev = adapter->netdev; 1657 struct ixgbe_hw *hw = &adapter->hw; 1658 u32 txdctl; 1659 int i, j; 1660 1661 /* signal that we are down to the interrupt handler */ 1662 set_bit(__IXGBEVF_DOWN, &adapter->state); 1663 /* disable receives */ 1664 1665 netif_tx_disable(netdev); 1666 1667 msleep(10); 1668 1669 netif_tx_stop_all_queues(netdev); 1670 1671 ixgbevf_irq_disable(adapter); 1672 1673 ixgbevf_napi_disable_all(adapter); 1674 1675 del_timer_sync(&adapter->watchdog_timer); 1676 /* can't call flush scheduled work here because it can deadlock 1677 * if linkwatch_event tries to acquire the rtnl_lock which we are 1678 * holding */ 1679 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1680 msleep(1); 1681 1682 /* disable transmits in the hardware now that interrupts are off */ 1683 for (i = 0; i < adapter->num_tx_queues; i++) { 1684 j = adapter->tx_ring[i].reg_idx; 1685 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1686 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1687 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1688 } 1689 1690 netif_carrier_off(netdev); 1691 1692 if (!pci_channel_offline(adapter->pdev)) 1693 ixgbevf_reset(adapter); 1694 1695 ixgbevf_clean_all_tx_rings(adapter); 1696 ixgbevf_clean_all_rx_rings(adapter); 1697} 1698 1699void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1700{ 1701 WARN_ON(in_interrupt()); 1702 1703 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1704 msleep(1); 1705 1706 ixgbevf_down(adapter); 1707 ixgbevf_up(adapter); 1708 1709 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1710} 1711 1712void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1713{ 1714 struct ixgbe_hw *hw = &adapter->hw; 1715 struct net_device *netdev = adapter->netdev; 1716 1717 if (hw->mac.ops.reset_hw(hw)) 1718 hw_dbg(hw, "PF still resetting\n"); 1719 else 1720 hw->mac.ops.init_hw(hw); 1721 1722 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1723 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1724 netdev->addr_len); 1725 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1726 netdev->addr_len); 1727 } 1728} 1729 1730static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1731 int vectors) 1732{ 1733 int err = 0; 1734 int vector_threshold; 1735 1736 /* We'll want at least 2 (vector_threshold): 1737 * 1) TxQ[0] + RxQ[0] handler 1738 * 2) Other (Link Status Change, etc.) 1739 */ 1740 vector_threshold = MIN_MSIX_COUNT; 1741 1742 /* The more we get, the more we will assign to Tx/Rx Cleanup 1743 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1744 * Right now, we simply care about how many we'll get; we'll 1745 * set them up later while requesting irq's. 1746 */ 1747 while (vectors >= vector_threshold) { 1748 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1749 vectors); 1750 if (!err || err < 0) /* Success or a nasty failure. */ 1751 break; 1752 else /* err == number of vectors we should try again with */ 1753 vectors = err; 1754 } 1755 1756 if (vectors < vector_threshold) 1757 err = -ENOMEM; 1758 1759 if (err) { 1760 dev_err(&adapter->pdev->dev, 1761 "Unable to allocate MSI-X interrupts\n"); 1762 kfree(adapter->msix_entries); 1763 adapter->msix_entries = NULL; 1764 } else { 1765 /* 1766 * Adjust for only the vectors we'll use, which is minimum 1767 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1768 * vectors we were allocated. 1769 */ 1770 adapter->num_msix_vectors = vectors; 1771 } 1772 1773 return err; 1774} 1775 1776/** 1777 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent 1778 * @adapter: board private structure to initialize 1779 * 1780 * This is the top level queue allocation routine. The order here is very 1781 * important, starting with the "most" number of features turned on at once, 1782 * and ending with the smallest set of features. This way large combinations 1783 * can be allocated if they're turned on, and smaller combinations are the 1784 * fallthrough conditions. 1785 * 1786 **/ 1787static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1788{ 1789 /* Start with base case */ 1790 adapter->num_rx_queues = 1; 1791 adapter->num_tx_queues = 1; 1792} 1793 1794/** 1795 * ixgbevf_alloc_queues - Allocate memory for all rings 1796 * @adapter: board private structure to initialize 1797 * 1798 * We allocate one ring per queue at run-time since we don't know the 1799 * number of queues at compile-time. The polling_netdev array is 1800 * intended for Multiqueue, but should work fine with a single queue. 1801 **/ 1802static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1803{ 1804 int i; 1805 1806 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1807 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1808 if (!adapter->tx_ring) 1809 goto err_tx_ring_allocation; 1810 1811 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1812 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1813 if (!adapter->rx_ring) 1814 goto err_rx_ring_allocation; 1815 1816 for (i = 0; i < adapter->num_tx_queues; i++) { 1817 adapter->tx_ring[i].count = adapter->tx_ring_count; 1818 adapter->tx_ring[i].queue_index = i; 1819 /* reg_idx may be remapped later by DCB config */ 1820 adapter->tx_ring[i].reg_idx = i; 1821 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1822 adapter->tx_ring[i].netdev = adapter->netdev; 1823 } 1824 1825 for (i = 0; i < adapter->num_rx_queues; i++) { 1826 adapter->rx_ring[i].count = adapter->rx_ring_count; 1827 adapter->rx_ring[i].queue_index = i; 1828 adapter->rx_ring[i].reg_idx = i; 1829 adapter->rx_ring[i].dev = &adapter->pdev->dev; 1830 adapter->rx_ring[i].netdev = adapter->netdev; 1831 } 1832 1833 return 0; 1834 1835err_rx_ring_allocation: 1836 kfree(adapter->tx_ring); 1837err_tx_ring_allocation: 1838 return -ENOMEM; 1839} 1840 1841/** 1842 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 1843 * @adapter: board private structure to initialize 1844 * 1845 * Attempt to configure the interrupts using the best available 1846 * capabilities of the hardware and the kernel. 1847 **/ 1848static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 1849{ 1850 struct net_device *netdev = adapter->netdev; 1851 int err = 0; 1852 int vector, v_budget; 1853 1854 /* 1855 * It's easy to be greedy for MSI-X vectors, but it really 1856 * doesn't do us much good if we have a lot more vectors 1857 * than CPU's. So let's be conservative and only ask for 1858 * (roughly) the same number of vectors as there are CPU's. 1859 * The default is to use pairs of vectors. 1860 */ 1861 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); 1862 v_budget = min_t(int, v_budget, num_online_cpus()); 1863 v_budget += NON_Q_VECTORS; 1864 1865 /* A failure in MSI-X entry allocation isn't fatal, but it does 1866 * mean we disable MSI-X capabilities of the adapter. */ 1867 adapter->msix_entries = kcalloc(v_budget, 1868 sizeof(struct msix_entry), GFP_KERNEL); 1869 if (!adapter->msix_entries) { 1870 err = -ENOMEM; 1871 goto out; 1872 } 1873 1874 for (vector = 0; vector < v_budget; vector++) 1875 adapter->msix_entries[vector].entry = vector; 1876 1877 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); 1878 if (err) 1879 goto out; 1880 1881 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1882 if (err) 1883 goto out; 1884 1885 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); 1886 1887out: 1888 return err; 1889} 1890 1891/** 1892 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 1893 * @adapter: board private structure to initialize 1894 * 1895 * We allocate one q_vector per queue interrupt. If allocation fails we 1896 * return -ENOMEM. 1897 **/ 1898static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 1899{ 1900 int q_idx, num_q_vectors; 1901 struct ixgbevf_q_vector *q_vector; 1902 1903 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1904 1905 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1906 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 1907 if (!q_vector) 1908 goto err_out; 1909 q_vector->adapter = adapter; 1910 q_vector->v_idx = q_idx; 1911 netif_napi_add(adapter->netdev, &q_vector->napi, 1912 ixgbevf_poll, 64); 1913 adapter->q_vector[q_idx] = q_vector; 1914 } 1915 1916 return 0; 1917 1918err_out: 1919 while (q_idx) { 1920 q_idx--; 1921 q_vector = adapter->q_vector[q_idx]; 1922 netif_napi_del(&q_vector->napi); 1923 kfree(q_vector); 1924 adapter->q_vector[q_idx] = NULL; 1925 } 1926 return -ENOMEM; 1927} 1928 1929/** 1930 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 1931 * @adapter: board private structure to initialize 1932 * 1933 * This function frees the memory allocated to the q_vectors. In addition if 1934 * NAPI is enabled it will delete any references to the NAPI struct prior 1935 * to freeing the q_vector. 1936 **/ 1937static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1938{ 1939 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1940 1941 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1942 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1943 1944 adapter->q_vector[q_idx] = NULL; 1945 netif_napi_del(&q_vector->napi); 1946 kfree(q_vector); 1947 } 1948} 1949 1950/** 1951 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 1952 * @adapter: board private structure 1953 * 1954 **/ 1955static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 1956{ 1957 pci_disable_msix(adapter->pdev); 1958 kfree(adapter->msix_entries); 1959 adapter->msix_entries = NULL; 1960} 1961 1962/** 1963 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 1964 * @adapter: board private structure to initialize 1965 * 1966 **/ 1967static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 1968{ 1969 int err; 1970 1971 /* Number of supported queues */ 1972 ixgbevf_set_num_queues(adapter); 1973 1974 err = ixgbevf_set_interrupt_capability(adapter); 1975 if (err) { 1976 hw_dbg(&adapter->hw, 1977 "Unable to setup interrupt capabilities\n"); 1978 goto err_set_interrupt; 1979 } 1980 1981 err = ixgbevf_alloc_q_vectors(adapter); 1982 if (err) { 1983 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 1984 "vectors\n"); 1985 goto err_alloc_q_vectors; 1986 } 1987 1988 err = ixgbevf_alloc_queues(adapter); 1989 if (err) { 1990 pr_err("Unable to allocate memory for queues\n"); 1991 goto err_alloc_queues; 1992 } 1993 1994 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 1995 "Tx Queue count = %u\n", 1996 (adapter->num_rx_queues > 1) ? "Enabled" : 1997 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 1998 1999 set_bit(__IXGBEVF_DOWN, &adapter->state); 2000 2001 return 0; 2002err_alloc_queues: 2003 ixgbevf_free_q_vectors(adapter); 2004err_alloc_q_vectors: 2005 ixgbevf_reset_interrupt_capability(adapter); 2006err_set_interrupt: 2007 return err; 2008} 2009 2010/** 2011 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings 2012 * @adapter: board private structure to clear interrupt scheme on 2013 * 2014 * We go through and clear interrupt specific resources and reset the structure 2015 * to pre-load conditions 2016 **/ 2017static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) 2018{ 2019 adapter->num_tx_queues = 0; 2020 adapter->num_rx_queues = 0; 2021 2022 ixgbevf_free_q_vectors(adapter); 2023 ixgbevf_reset_interrupt_capability(adapter); 2024} 2025 2026/** 2027 * ixgbevf_sw_init - Initialize general software structures 2028 * (struct ixgbevf_adapter) 2029 * @adapter: board private structure to initialize 2030 * 2031 * ixgbevf_sw_init initializes the Adapter private data structure. 2032 * Fields are initialized based on PCI device information and 2033 * OS network device settings (MTU size). 2034 **/ 2035static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2036{ 2037 struct ixgbe_hw *hw = &adapter->hw; 2038 struct pci_dev *pdev = adapter->pdev; 2039 int err; 2040 2041 /* PCI config space info */ 2042 2043 hw->vendor_id = pdev->vendor; 2044 hw->device_id = pdev->device; 2045 hw->revision_id = pdev->revision; 2046 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2047 hw->subsystem_device_id = pdev->subsystem_device; 2048 2049 hw->mbx.ops.init_params(hw); 2050 2051 /* assume legacy case in which PF would only give VF 2 queues */ 2052 hw->mac.max_tx_queues = 2; 2053 hw->mac.max_rx_queues = 2; 2054 2055 err = hw->mac.ops.reset_hw(hw); 2056 if (err) { 2057 dev_info(&pdev->dev, 2058 "PF still in reset state, assigning new address\n"); 2059 eth_hw_addr_random(adapter->netdev); 2060 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, 2061 adapter->netdev->addr_len); 2062 } else { 2063 err = hw->mac.ops.init_hw(hw); 2064 if (err) { 2065 pr_err("init_shared_code failed: %d\n", err); 2066 goto out; 2067 } 2068 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2069 adapter->netdev->addr_len); 2070 } 2071 2072 /* lock to protect mailbox accesses */ 2073 spin_lock_init(&adapter->mbx_lock); 2074 2075 /* Enable dynamic interrupt throttling rates */ 2076 adapter->rx_itr_setting = 1; 2077 adapter->tx_itr_setting = 1; 2078 2079 /* set default ring sizes */ 2080 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2081 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2082 2083 set_bit(__IXGBEVF_DOWN, &adapter->state); 2084 return 0; 2085 2086out: 2087 return err; 2088} 2089 2090#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2091 { \ 2092 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2093 if (current_counter < last_counter) \ 2094 counter += 0x100000000LL; \ 2095 last_counter = current_counter; \ 2096 counter &= 0xFFFFFFFF00000000LL; \ 2097 counter |= current_counter; \ 2098 } 2099 2100#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2101 { \ 2102 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2103 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2104 u64 current_counter = (current_counter_msb << 32) | \ 2105 current_counter_lsb; \ 2106 if (current_counter < last_counter) \ 2107 counter += 0x1000000000LL; \ 2108 last_counter = current_counter; \ 2109 counter &= 0xFFFFFFF000000000LL; \ 2110 counter |= current_counter; \ 2111 } 2112/** 2113 * ixgbevf_update_stats - Update the board statistics counters. 2114 * @adapter: board private structure 2115 **/ 2116void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2117{ 2118 struct ixgbe_hw *hw = &adapter->hw; 2119 int i; 2120 2121 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2122 adapter->stats.vfgprc); 2123 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2124 adapter->stats.vfgptc); 2125 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2126 adapter->stats.last_vfgorc, 2127 adapter->stats.vfgorc); 2128 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2129 adapter->stats.last_vfgotc, 2130 adapter->stats.vfgotc); 2131 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2132 adapter->stats.vfmprc); 2133 2134 for (i = 0; i < adapter->num_rx_queues; i++) { 2135 adapter->hw_csum_rx_error += 2136 adapter->rx_ring[i].hw_csum_rx_error; 2137 adapter->hw_csum_rx_good += 2138 adapter->rx_ring[i].hw_csum_rx_good; 2139 adapter->rx_ring[i].hw_csum_rx_error = 0; 2140 adapter->rx_ring[i].hw_csum_rx_good = 0; 2141 } 2142} 2143 2144/** 2145 * ixgbevf_watchdog - Timer Call-back 2146 * @data: pointer to adapter cast into an unsigned long 2147 **/ 2148static void ixgbevf_watchdog(unsigned long data) 2149{ 2150 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2151 struct ixgbe_hw *hw = &adapter->hw; 2152 u32 eics = 0; 2153 int i; 2154 2155 /* 2156 * Do the watchdog outside of interrupt context due to the lovely 2157 * delays that some of the newer hardware requires 2158 */ 2159 2160 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2161 goto watchdog_short_circuit; 2162 2163 /* get one bit for every active tx/rx interrupt vector */ 2164 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2165 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2166 if (qv->rx.ring || qv->tx.ring) 2167 eics |= 1 << i; 2168 } 2169 2170 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); 2171 2172watchdog_short_circuit: 2173 schedule_work(&adapter->watchdog_task); 2174} 2175 2176/** 2177 * ixgbevf_tx_timeout - Respond to a Tx Hang 2178 * @netdev: network interface device structure 2179 **/ 2180static void ixgbevf_tx_timeout(struct net_device *netdev) 2181{ 2182 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2183 2184 /* Do the reset outside of interrupt context */ 2185 schedule_work(&adapter->reset_task); 2186} 2187 2188static void ixgbevf_reset_task(struct work_struct *work) 2189{ 2190 struct ixgbevf_adapter *adapter; 2191 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2192 2193 /* If we're already down or resetting, just bail */ 2194 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2195 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2196 return; 2197 2198 adapter->tx_timeout_count++; 2199 2200 ixgbevf_reinit_locked(adapter); 2201} 2202 2203/** 2204 * ixgbevf_watchdog_task - worker thread to bring link up 2205 * @work: pointer to work_struct containing our data 2206 **/ 2207static void ixgbevf_watchdog_task(struct work_struct *work) 2208{ 2209 struct ixgbevf_adapter *adapter = container_of(work, 2210 struct ixgbevf_adapter, 2211 watchdog_task); 2212 struct net_device *netdev = adapter->netdev; 2213 struct ixgbe_hw *hw = &adapter->hw; 2214 u32 link_speed = adapter->link_speed; 2215 bool link_up = adapter->link_up; 2216 s32 need_reset; 2217 2218 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2219 2220 /* 2221 * Always check the link on the watchdog because we have 2222 * no LSC interrupt 2223 */ 2224 spin_lock_bh(&adapter->mbx_lock); 2225 2226 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 2227 2228 spin_unlock_bh(&adapter->mbx_lock); 2229 2230 if (need_reset) { 2231 adapter->link_up = link_up; 2232 adapter->link_speed = link_speed; 2233 netif_carrier_off(netdev); 2234 netif_tx_stop_all_queues(netdev); 2235 schedule_work(&adapter->reset_task); 2236 goto pf_has_reset; 2237 } 2238 adapter->link_up = link_up; 2239 adapter->link_speed = link_speed; 2240 2241 if (link_up) { 2242 if (!netif_carrier_ok(netdev)) { 2243 dev_info(&adapter->pdev->dev, 2244 "NIC Link is Up, %u Gbps\n", 2245 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2246 10 : 1); 2247 netif_carrier_on(netdev); 2248 netif_tx_wake_all_queues(netdev); 2249 } 2250 } else { 2251 adapter->link_up = false; 2252 adapter->link_speed = 0; 2253 if (netif_carrier_ok(netdev)) { 2254 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2255 netif_carrier_off(netdev); 2256 netif_tx_stop_all_queues(netdev); 2257 } 2258 } 2259 2260 ixgbevf_update_stats(adapter); 2261 2262pf_has_reset: 2263 /* Reset the timer */ 2264 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2265 mod_timer(&adapter->watchdog_timer, 2266 round_jiffies(jiffies + (2 * HZ))); 2267 2268 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2269} 2270 2271/** 2272 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2273 * @adapter: board private structure 2274 * @tx_ring: Tx descriptor ring for a specific queue 2275 * 2276 * Free all transmit software resources 2277 **/ 2278void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2279 struct ixgbevf_ring *tx_ring) 2280{ 2281 struct pci_dev *pdev = adapter->pdev; 2282 2283 ixgbevf_clean_tx_ring(adapter, tx_ring); 2284 2285 vfree(tx_ring->tx_buffer_info); 2286 tx_ring->tx_buffer_info = NULL; 2287 2288 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2289 tx_ring->dma); 2290 2291 tx_ring->desc = NULL; 2292} 2293 2294/** 2295 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2296 * @adapter: board private structure 2297 * 2298 * Free all transmit software resources 2299 **/ 2300static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2301{ 2302 int i; 2303 2304 for (i = 0; i < adapter->num_tx_queues; i++) 2305 if (adapter->tx_ring[i].desc) 2306 ixgbevf_free_tx_resources(adapter, 2307 &adapter->tx_ring[i]); 2308 2309} 2310 2311/** 2312 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2313 * @adapter: board private structure 2314 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2315 * 2316 * Return 0 on success, negative on failure 2317 **/ 2318int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2319 struct ixgbevf_ring *tx_ring) 2320{ 2321 struct pci_dev *pdev = adapter->pdev; 2322 int size; 2323 2324 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2325 tx_ring->tx_buffer_info = vzalloc(size); 2326 if (!tx_ring->tx_buffer_info) 2327 goto err; 2328 2329 /* round up to nearest 4K */ 2330 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2331 tx_ring->size = ALIGN(tx_ring->size, 4096); 2332 2333 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2334 &tx_ring->dma, GFP_KERNEL); 2335 if (!tx_ring->desc) 2336 goto err; 2337 2338 tx_ring->next_to_use = 0; 2339 tx_ring->next_to_clean = 0; 2340 return 0; 2341 2342err: 2343 vfree(tx_ring->tx_buffer_info); 2344 tx_ring->tx_buffer_info = NULL; 2345 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2346 "descriptor ring\n"); 2347 return -ENOMEM; 2348} 2349 2350/** 2351 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2352 * @adapter: board private structure 2353 * 2354 * If this function returns with an error, then it's possible one or 2355 * more of the rings is populated (while the rest are not). It is the 2356 * callers duty to clean those orphaned rings. 2357 * 2358 * Return 0 on success, negative on failure 2359 **/ 2360static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2361{ 2362 int i, err = 0; 2363 2364 for (i = 0; i < adapter->num_tx_queues; i++) { 2365 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2366 if (!err) 2367 continue; 2368 hw_dbg(&adapter->hw, 2369 "Allocation for Tx Queue %u failed\n", i); 2370 break; 2371 } 2372 2373 return err; 2374} 2375 2376/** 2377 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2378 * @adapter: board private structure 2379 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2380 * 2381 * Returns 0 on success, negative on failure 2382 **/ 2383int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2384 struct ixgbevf_ring *rx_ring) 2385{ 2386 struct pci_dev *pdev = adapter->pdev; 2387 int size; 2388 2389 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2390 rx_ring->rx_buffer_info = vzalloc(size); 2391 if (!rx_ring->rx_buffer_info) 2392 goto alloc_failed; 2393 2394 /* Round up to nearest 4K */ 2395 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2396 rx_ring->size = ALIGN(rx_ring->size, 4096); 2397 2398 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2399 &rx_ring->dma, GFP_KERNEL); 2400 2401 if (!rx_ring->desc) { 2402 hw_dbg(&adapter->hw, 2403 "Unable to allocate memory for " 2404 "the receive descriptor ring\n"); 2405 vfree(rx_ring->rx_buffer_info); 2406 rx_ring->rx_buffer_info = NULL; 2407 goto alloc_failed; 2408 } 2409 2410 rx_ring->next_to_clean = 0; 2411 rx_ring->next_to_use = 0; 2412 2413 return 0; 2414alloc_failed: 2415 return -ENOMEM; 2416} 2417 2418/** 2419 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2420 * @adapter: board private structure 2421 * 2422 * If this function returns with an error, then it's possible one or 2423 * more of the rings is populated (while the rest are not). It is the 2424 * callers duty to clean those orphaned rings. 2425 * 2426 * Return 0 on success, negative on failure 2427 **/ 2428static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2429{ 2430 int i, err = 0; 2431 2432 for (i = 0; i < adapter->num_rx_queues; i++) { 2433 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2434 if (!err) 2435 continue; 2436 hw_dbg(&adapter->hw, 2437 "Allocation for Rx Queue %u failed\n", i); 2438 break; 2439 } 2440 return err; 2441} 2442 2443/** 2444 * ixgbevf_free_rx_resources - Free Rx Resources 2445 * @adapter: board private structure 2446 * @rx_ring: ring to clean the resources from 2447 * 2448 * Free all receive software resources 2449 **/ 2450void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2451 struct ixgbevf_ring *rx_ring) 2452{ 2453 struct pci_dev *pdev = adapter->pdev; 2454 2455 ixgbevf_clean_rx_ring(adapter, rx_ring); 2456 2457 vfree(rx_ring->rx_buffer_info); 2458 rx_ring->rx_buffer_info = NULL; 2459 2460 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2461 rx_ring->dma); 2462 2463 rx_ring->desc = NULL; 2464} 2465 2466/** 2467 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2468 * @adapter: board private structure 2469 * 2470 * Free all receive software resources 2471 **/ 2472static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2473{ 2474 int i; 2475 2476 for (i = 0; i < adapter->num_rx_queues; i++) 2477 if (adapter->rx_ring[i].desc) 2478 ixgbevf_free_rx_resources(adapter, 2479 &adapter->rx_ring[i]); 2480} 2481 2482static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) 2483{ 2484 struct ixgbe_hw *hw = &adapter->hw; 2485 struct ixgbevf_ring *rx_ring; 2486 unsigned int def_q = 0; 2487 unsigned int num_tcs = 0; 2488 unsigned int num_rx_queues = 1; 2489 int err, i; 2490 2491 spin_lock_bh(&adapter->mbx_lock); 2492 2493 /* fetch queue configuration from the PF */ 2494 err = ixgbevf_get_queues(hw, &num_tcs, &def_q); 2495 2496 spin_unlock_bh(&adapter->mbx_lock); 2497 2498 if (err) 2499 return err; 2500 2501 if (num_tcs > 1) { 2502 /* update default Tx ring register index */ 2503 adapter->tx_ring[0].reg_idx = def_q; 2504 2505 /* we need as many queues as traffic classes */ 2506 num_rx_queues = num_tcs; 2507 } 2508 2509 /* nothing to do if we have the correct number of queues */ 2510 if (adapter->num_rx_queues == num_rx_queues) 2511 return 0; 2512 2513 /* allocate new rings */ 2514 rx_ring = kcalloc(num_rx_queues, 2515 sizeof(struct ixgbevf_ring), GFP_KERNEL); 2516 if (!rx_ring) 2517 return -ENOMEM; 2518 2519 /* setup ring fields */ 2520 for (i = 0; i < num_rx_queues; i++) { 2521 rx_ring[i].count = adapter->rx_ring_count; 2522 rx_ring[i].queue_index = i; 2523 rx_ring[i].reg_idx = i; 2524 rx_ring[i].dev = &adapter->pdev->dev; 2525 rx_ring[i].netdev = adapter->netdev; 2526 } 2527 2528 /* free the existing ring and queues */ 2529 adapter->num_rx_queues = 0; 2530 kfree(adapter->rx_ring); 2531 2532 /* move new rings into position on the adapter struct */ 2533 adapter->rx_ring = rx_ring; 2534 adapter->num_rx_queues = num_rx_queues; 2535 2536 return 0; 2537} 2538 2539/** 2540 * ixgbevf_open - Called when a network interface is made active 2541 * @netdev: network interface device structure 2542 * 2543 * Returns 0 on success, negative value on failure 2544 * 2545 * The open entry point is called when a network interface is made 2546 * active by the system (IFF_UP). At this point all resources needed 2547 * for transmit and receive operations are allocated, the interrupt 2548 * handler is registered with the OS, the watchdog timer is started, 2549 * and the stack is notified that the interface is ready. 2550 **/ 2551static int ixgbevf_open(struct net_device *netdev) 2552{ 2553 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2554 struct ixgbe_hw *hw = &adapter->hw; 2555 int err; 2556 2557 /* disallow open during test */ 2558 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2559 return -EBUSY; 2560 2561 if (hw->adapter_stopped) { 2562 ixgbevf_reset(adapter); 2563 /* if adapter is still stopped then PF isn't up and 2564 * the vf can't start. */ 2565 if (hw->adapter_stopped) { 2566 err = IXGBE_ERR_MBX; 2567 pr_err("Unable to start - perhaps the PF Driver isn't " 2568 "up yet\n"); 2569 goto err_setup_reset; 2570 } 2571 } 2572 2573 ixgbevf_negotiate_api(adapter); 2574 2575 /* setup queue reg_idx and Rx queue count */ 2576 err = ixgbevf_setup_queues(adapter); 2577 if (err) 2578 goto err_setup_queues; 2579 2580 /* allocate transmit descriptors */ 2581 err = ixgbevf_setup_all_tx_resources(adapter); 2582 if (err) 2583 goto err_setup_tx; 2584 2585 /* allocate receive descriptors */ 2586 err = ixgbevf_setup_all_rx_resources(adapter); 2587 if (err) 2588 goto err_setup_rx; 2589 2590 ixgbevf_configure(adapter); 2591 2592 /* 2593 * Map the Tx/Rx rings to the vectors we were allotted. 2594 * if request_irq will be called in this function map_rings 2595 * must be called *before* up_complete 2596 */ 2597 ixgbevf_map_rings_to_vectors(adapter); 2598 2599 ixgbevf_up_complete(adapter); 2600 2601 /* clear any pending interrupts, may auto mask */ 2602 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2603 err = ixgbevf_request_irq(adapter); 2604 if (err) 2605 goto err_req_irq; 2606 2607 ixgbevf_irq_enable(adapter); 2608 2609 return 0; 2610 2611err_req_irq: 2612 ixgbevf_down(adapter); 2613 ixgbevf_free_irq(adapter); 2614err_setup_rx: 2615 ixgbevf_free_all_rx_resources(adapter); 2616err_setup_tx: 2617 ixgbevf_free_all_tx_resources(adapter); 2618err_setup_queues: 2619 ixgbevf_reset(adapter); 2620 2621err_setup_reset: 2622 2623 return err; 2624} 2625 2626/** 2627 * ixgbevf_close - Disables a network interface 2628 * @netdev: network interface device structure 2629 * 2630 * Returns 0, this is not allowed to fail 2631 * 2632 * The close entry point is called when an interface is de-activated 2633 * by the OS. The hardware is still under the drivers control, but 2634 * needs to be disabled. A global MAC reset is issued to stop the 2635 * hardware, and all transmit and receive resources are freed. 2636 **/ 2637static int ixgbevf_close(struct net_device *netdev) 2638{ 2639 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2640 2641 ixgbevf_down(adapter); 2642 ixgbevf_free_irq(adapter); 2643 2644 ixgbevf_free_all_tx_resources(adapter); 2645 ixgbevf_free_all_rx_resources(adapter); 2646 2647 return 0; 2648} 2649 2650static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, 2651 u32 vlan_macip_lens, u32 type_tucmd, 2652 u32 mss_l4len_idx) 2653{ 2654 struct ixgbe_adv_tx_context_desc *context_desc; 2655 u16 i = tx_ring->next_to_use; 2656 2657 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); 2658 2659 i++; 2660 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; 2661 2662 /* set bits to identify this as an advanced context descriptor */ 2663 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; 2664 2665 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2666 context_desc->seqnum_seed = 0; 2667 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 2668 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2669} 2670 2671static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, 2672 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2673{ 2674 u32 vlan_macip_lens, type_tucmd; 2675 u32 mss_l4len_idx, l4len; 2676 2677 if (!skb_is_gso(skb)) 2678 return 0; 2679 2680 if (skb_header_cloned(skb)) { 2681 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2682 if (err) 2683 return err; 2684 } 2685 2686 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2687 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 2688 2689 if (skb->protocol == htons(ETH_P_IP)) { 2690 struct iphdr *iph = ip_hdr(skb); 2691 iph->tot_len = 0; 2692 iph->check = 0; 2693 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2694 iph->daddr, 0, 2695 IPPROTO_TCP, 2696 0); 2697 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2698 } else if (skb_is_gso_v6(skb)) { 2699 ipv6_hdr(skb)->payload_len = 0; 2700 tcp_hdr(skb)->check = 2701 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2702 &ipv6_hdr(skb)->daddr, 2703 0, IPPROTO_TCP, 0); 2704 } 2705 2706 /* compute header lengths */ 2707 l4len = tcp_hdrlen(skb); 2708 *hdr_len += l4len; 2709 *hdr_len = skb_transport_offset(skb) + l4len; 2710 2711 /* mss_l4len_id: use 1 as index for TSO */ 2712 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; 2713 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; 2714 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; 2715 2716 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 2717 vlan_macip_lens = skb_network_header_len(skb); 2718 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2719 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2720 2721 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2722 type_tucmd, mss_l4len_idx); 2723 2724 return 1; 2725} 2726 2727static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2728 struct sk_buff *skb, u32 tx_flags) 2729{ 2730 u32 vlan_macip_lens = 0; 2731 u32 mss_l4len_idx = 0; 2732 u32 type_tucmd = 0; 2733 2734 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2735 u8 l4_hdr = 0; 2736 switch (skb->protocol) { 2737 case __constant_htons(ETH_P_IP): 2738 vlan_macip_lens |= skb_network_header_len(skb); 2739 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2740 l4_hdr = ip_hdr(skb)->protocol; 2741 break; 2742 case __constant_htons(ETH_P_IPV6): 2743 vlan_macip_lens |= skb_network_header_len(skb); 2744 l4_hdr = ipv6_hdr(skb)->nexthdr; 2745 break; 2746 default: 2747 if (unlikely(net_ratelimit())) { 2748 dev_warn(tx_ring->dev, 2749 "partial checksum but proto=%x!\n", 2750 skb->protocol); 2751 } 2752 break; 2753 } 2754 2755 switch (l4_hdr) { 2756 case IPPROTO_TCP: 2757 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2758 mss_l4len_idx = tcp_hdrlen(skb) << 2759 IXGBE_ADVTXD_L4LEN_SHIFT; 2760 break; 2761 case IPPROTO_SCTP: 2762 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; 2763 mss_l4len_idx = sizeof(struct sctphdr) << 2764 IXGBE_ADVTXD_L4LEN_SHIFT; 2765 break; 2766 case IPPROTO_UDP: 2767 mss_l4len_idx = sizeof(struct udphdr) << 2768 IXGBE_ADVTXD_L4LEN_SHIFT; 2769 break; 2770 default: 2771 if (unlikely(net_ratelimit())) { 2772 dev_warn(tx_ring->dev, 2773 "partial checksum but l4 proto=%x!\n", 2774 l4_hdr); 2775 } 2776 break; 2777 } 2778 } 2779 2780 /* vlan_macip_lens: MACLEN, VLAN tag */ 2781 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; 2782 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; 2783 2784 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, 2785 type_tucmd, mss_l4len_idx); 2786 2787 return (skb->ip_summed == CHECKSUM_PARTIAL); 2788} 2789 2790static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2791 struct sk_buff *skb, u32 tx_flags, 2792 unsigned int first) 2793{ 2794 struct ixgbevf_tx_buffer *tx_buffer_info; 2795 unsigned int len; 2796 unsigned int total = skb->len; 2797 unsigned int offset = 0, size; 2798 int count = 0; 2799 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2800 unsigned int f; 2801 int i; 2802 2803 i = tx_ring->next_to_use; 2804 2805 len = min(skb_headlen(skb), total); 2806 while (len) { 2807 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2808 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2809 2810 tx_buffer_info->length = size; 2811 tx_buffer_info->mapped_as_page = false; 2812 tx_buffer_info->dma = dma_map_single(tx_ring->dev, 2813 skb->data + offset, 2814 size, DMA_TO_DEVICE); 2815 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2816 goto dma_error; 2817 tx_buffer_info->next_to_watch = i; 2818 2819 len -= size; 2820 total -= size; 2821 offset += size; 2822 count++; 2823 i++; 2824 if (i == tx_ring->count) 2825 i = 0; 2826 } 2827 2828 for (f = 0; f < nr_frags; f++) { 2829 const struct skb_frag_struct *frag; 2830 2831 frag = &skb_shinfo(skb)->frags[f]; 2832 len = min((unsigned int)skb_frag_size(frag), total); 2833 offset = 0; 2834 2835 while (len) { 2836 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2837 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2838 2839 tx_buffer_info->length = size; 2840 tx_buffer_info->dma = 2841 skb_frag_dma_map(tx_ring->dev, frag, 2842 offset, size, DMA_TO_DEVICE); 2843 if (dma_mapping_error(tx_ring->dev, 2844 tx_buffer_info->dma)) 2845 goto dma_error; 2846 tx_buffer_info->mapped_as_page = true; 2847 tx_buffer_info->next_to_watch = i; 2848 2849 len -= size; 2850 total -= size; 2851 offset += size; 2852 count++; 2853 i++; 2854 if (i == tx_ring->count) 2855 i = 0; 2856 } 2857 if (total == 0) 2858 break; 2859 } 2860 2861 if (i == 0) 2862 i = tx_ring->count - 1; 2863 else 2864 i = i - 1; 2865 tx_ring->tx_buffer_info[i].skb = skb; 2866 tx_ring->tx_buffer_info[first].next_to_watch = i; 2867 tx_ring->tx_buffer_info[first].time_stamp = jiffies; 2868 2869 return count; 2870 2871dma_error: 2872 dev_err(tx_ring->dev, "TX DMA map failed\n"); 2873 2874 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2875 tx_buffer_info->dma = 0; 2876 tx_buffer_info->next_to_watch = 0; 2877 count--; 2878 2879 /* clear timestamp and dma mappings for remaining portion of packet */ 2880 while (count >= 0) { 2881 count--; 2882 i--; 2883 if (i < 0) 2884 i += tx_ring->count; 2885 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2886 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2887 } 2888 2889 return count; 2890} 2891 2892static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2893 int count, u32 paylen, u8 hdr_len) 2894{ 2895 union ixgbe_adv_tx_desc *tx_desc = NULL; 2896 struct ixgbevf_tx_buffer *tx_buffer_info; 2897 u32 olinfo_status = 0, cmd_type_len = 0; 2898 unsigned int i; 2899 2900 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 2901 2902 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 2903 2904 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 2905 2906 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2907 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 2908 2909 if (tx_flags & IXGBE_TX_FLAGS_CSUM) 2910 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; 2911 2912 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 2913 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 2914 2915 /* use index 1 context for tso */ 2916 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2917 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2918 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 2919 } 2920 2921 /* 2922 * Check Context must be set if Tx switch is enabled, which it 2923 * always is for case where virtual functions are running 2924 */ 2925 olinfo_status |= IXGBE_ADVTXD_CC; 2926 2927 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 2928 2929 i = tx_ring->next_to_use; 2930 while (count--) { 2931 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2932 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 2933 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 2934 tx_desc->read.cmd_type_len = 2935 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 2936 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2937 i++; 2938 if (i == tx_ring->count) 2939 i = 0; 2940 } 2941 2942 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2943 2944 tx_ring->next_to_use = i; 2945} 2946 2947static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2948{ 2949 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); 2950 2951 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 2952 /* Herbert's original patch had: 2953 * smp_mb__after_netif_stop_queue(); 2954 * but since that doesn't exist yet, just open code it. */ 2955 smp_mb(); 2956 2957 /* We need to check again in a case another CPU has just 2958 * made room available. */ 2959 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 2960 return -EBUSY; 2961 2962 /* A reprieve! - use start_queue because it doesn't call schedule */ 2963 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); 2964 ++adapter->restart_queue; 2965 return 0; 2966} 2967 2968static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) 2969{ 2970 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 2971 return 0; 2972 return __ixgbevf_maybe_stop_tx(tx_ring, size); 2973} 2974 2975static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2976{ 2977 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2978 struct ixgbevf_ring *tx_ring; 2979 unsigned int first; 2980 unsigned int tx_flags = 0; 2981 u8 hdr_len = 0; 2982 int r_idx = 0, tso; 2983 u16 count = TXD_USE_COUNT(skb_headlen(skb)); 2984#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2985 unsigned short f; 2986#endif 2987 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); 2988 if (!dst_mac || is_link_local_ether_addr(dst_mac)) { 2989 dev_kfree_skb(skb); 2990 return NETDEV_TX_OK; 2991 } 2992 2993 tx_ring = &adapter->tx_ring[r_idx]; 2994 2995 /* 2996 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, 2997 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, 2998 * + 2 desc gap to keep tail from touching head, 2999 * + 1 desc for context descriptor, 3000 * otherwise try next time 3001 */ 3002#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 3003 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3004 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3005#else 3006 count += skb_shinfo(skb)->nr_frags; 3007#endif 3008 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { 3009 adapter->tx_busy++; 3010 return NETDEV_TX_BUSY; 3011 } 3012 3013 if (vlan_tx_tag_present(skb)) { 3014 tx_flags |= vlan_tx_tag_get(skb); 3015 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3016 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3017 } 3018 3019 first = tx_ring->next_to_use; 3020 3021 if (skb->protocol == htons(ETH_P_IP)) 3022 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3023 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); 3024 if (tso < 0) { 3025 dev_kfree_skb_any(skb); 3026 return NETDEV_TX_OK; 3027 } 3028 3029 if (tso) 3030 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; 3031 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) 3032 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3033 3034 ixgbevf_tx_queue(tx_ring, tx_flags, 3035 ixgbevf_tx_map(tx_ring, skb, tx_flags, first), 3036 skb->len, hdr_len); 3037 /* 3038 * Force memory writes to complete before letting h/w 3039 * know there are new descriptors to fetch. (Only 3040 * applicable for weak-ordered memory model archs, 3041 * such as IA-64). 3042 */ 3043 wmb(); 3044 3045 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 3046 3047 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); 3048 3049 return NETDEV_TX_OK; 3050} 3051 3052/** 3053 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3054 * @netdev: network interface device structure 3055 * @p: pointer to an address structure 3056 * 3057 * Returns 0 on success, negative on failure 3058 **/ 3059static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3060{ 3061 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3062 struct ixgbe_hw *hw = &adapter->hw; 3063 struct sockaddr *addr = p; 3064 3065 if (!is_valid_ether_addr(addr->sa_data)) 3066 return -EADDRNOTAVAIL; 3067 3068 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3069 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3070 3071 spin_lock_bh(&adapter->mbx_lock); 3072 3073 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3074 3075 spin_unlock_bh(&adapter->mbx_lock); 3076 3077 return 0; 3078} 3079 3080/** 3081 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3082 * @netdev: network interface device structure 3083 * @new_mtu: new value for maximum frame size 3084 * 3085 * Returns 0 on success, negative on failure 3086 **/ 3087static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3088{ 3089 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3090 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3091 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3092 3093 switch (adapter->hw.api_version) { 3094 case ixgbe_mbox_api_11: 3095 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3096 break; 3097 default: 3098 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3099 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3100 break; 3101 } 3102 3103 /* MTU < 68 is an error and causes problems on some kernels */ 3104 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3105 return -EINVAL; 3106 3107 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3108 netdev->mtu, new_mtu); 3109 /* must set new MTU before calling down or up */ 3110 netdev->mtu = new_mtu; 3111 3112 if (netif_running(netdev)) 3113 ixgbevf_reinit_locked(adapter); 3114 3115 return 0; 3116} 3117 3118static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 3119{ 3120 struct net_device *netdev = pci_get_drvdata(pdev); 3121 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3122#ifdef CONFIG_PM 3123 int retval = 0; 3124#endif 3125 3126 netif_device_detach(netdev); 3127 3128 if (netif_running(netdev)) { 3129 rtnl_lock(); 3130 ixgbevf_down(adapter); 3131 ixgbevf_free_irq(adapter); 3132 ixgbevf_free_all_tx_resources(adapter); 3133 ixgbevf_free_all_rx_resources(adapter); 3134 rtnl_unlock(); 3135 } 3136 3137 ixgbevf_clear_interrupt_scheme(adapter); 3138 3139#ifdef CONFIG_PM 3140 retval = pci_save_state(pdev); 3141 if (retval) 3142 return retval; 3143 3144#endif 3145 pci_disable_device(pdev); 3146 3147 return 0; 3148} 3149 3150#ifdef CONFIG_PM 3151static int ixgbevf_resume(struct pci_dev *pdev) 3152{ 3153 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); 3154 struct net_device *netdev = adapter->netdev; 3155 u32 err; 3156 3157 pci_set_power_state(pdev, PCI_D0); 3158 pci_restore_state(pdev); 3159 /* 3160 * pci_restore_state clears dev->state_saved so call 3161 * pci_save_state to restore it. 3162 */ 3163 pci_save_state(pdev); 3164 3165 err = pci_enable_device_mem(pdev); 3166 if (err) { 3167 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3168 return err; 3169 } 3170 pci_set_master(pdev); 3171 3172 rtnl_lock(); 3173 err = ixgbevf_init_interrupt_scheme(adapter); 3174 rtnl_unlock(); 3175 if (err) { 3176 dev_err(&pdev->dev, "Cannot initialize interrupts\n"); 3177 return err; 3178 } 3179 3180 ixgbevf_reset(adapter); 3181 3182 if (netif_running(netdev)) { 3183 err = ixgbevf_open(netdev); 3184 if (err) 3185 return err; 3186 } 3187 3188 netif_device_attach(netdev); 3189 3190 return err; 3191} 3192 3193#endif /* CONFIG_PM */ 3194static void ixgbevf_shutdown(struct pci_dev *pdev) 3195{ 3196 ixgbevf_suspend(pdev, PMSG_SUSPEND); 3197} 3198 3199static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3200 struct rtnl_link_stats64 *stats) 3201{ 3202 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3203 unsigned int start; 3204 u64 bytes, packets; 3205 const struct ixgbevf_ring *ring; 3206 int i; 3207 3208 ixgbevf_update_stats(adapter); 3209 3210 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3211 3212 for (i = 0; i < adapter->num_rx_queues; i++) { 3213 ring = &adapter->rx_ring[i]; 3214 do { 3215 start = u64_stats_fetch_begin_bh(&ring->syncp); 3216 bytes = ring->total_bytes; 3217 packets = ring->total_packets; 3218 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3219 stats->rx_bytes += bytes; 3220 stats->rx_packets += packets; 3221 } 3222 3223 for (i = 0; i < adapter->num_tx_queues; i++) { 3224 ring = &adapter->tx_ring[i]; 3225 do { 3226 start = u64_stats_fetch_begin_bh(&ring->syncp); 3227 bytes = ring->total_bytes; 3228 packets = ring->total_packets; 3229 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3230 stats->tx_bytes += bytes; 3231 stats->tx_packets += packets; 3232 } 3233 3234 return stats; 3235} 3236 3237static const struct net_device_ops ixgbevf_netdev_ops = { 3238 .ndo_open = ixgbevf_open, 3239 .ndo_stop = ixgbevf_close, 3240 .ndo_start_xmit = ixgbevf_xmit_frame, 3241 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3242 .ndo_get_stats64 = ixgbevf_get_stats, 3243 .ndo_validate_addr = eth_validate_addr, 3244 .ndo_set_mac_address = ixgbevf_set_mac, 3245 .ndo_change_mtu = ixgbevf_change_mtu, 3246 .ndo_tx_timeout = ixgbevf_tx_timeout, 3247 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3248 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3249}; 3250 3251static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3252{ 3253 dev->netdev_ops = &ixgbevf_netdev_ops; 3254 ixgbevf_set_ethtool_ops(dev); 3255 dev->watchdog_timeo = 5 * HZ; 3256} 3257 3258/** 3259 * ixgbevf_probe - Device Initialization Routine 3260 * @pdev: PCI device information struct 3261 * @ent: entry in ixgbevf_pci_tbl 3262 * 3263 * Returns 0 on success, negative on failure 3264 * 3265 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3266 * The OS initialization, configuring of the adapter private structure, 3267 * and a hardware reset occur. 3268 **/ 3269static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3270{ 3271 struct net_device *netdev; 3272 struct ixgbevf_adapter *adapter = NULL; 3273 struct ixgbe_hw *hw = NULL; 3274 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3275 static int cards_found; 3276 int err, pci_using_dac; 3277 3278 err = pci_enable_device(pdev); 3279 if (err) 3280 return err; 3281 3282 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3283 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3284 pci_using_dac = 1; 3285 } else { 3286 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3287 if (err) { 3288 err = dma_set_coherent_mask(&pdev->dev, 3289 DMA_BIT_MASK(32)); 3290 if (err) { 3291 dev_err(&pdev->dev, "No usable DMA " 3292 "configuration, aborting\n"); 3293 goto err_dma; 3294 } 3295 } 3296 pci_using_dac = 0; 3297 } 3298 3299 err = pci_request_regions(pdev, ixgbevf_driver_name); 3300 if (err) { 3301 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3302 goto err_pci_reg; 3303 } 3304 3305 pci_set_master(pdev); 3306 3307 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3308 MAX_TX_QUEUES); 3309 if (!netdev) { 3310 err = -ENOMEM; 3311 goto err_alloc_etherdev; 3312 } 3313 3314 SET_NETDEV_DEV(netdev, &pdev->dev); 3315 3316 pci_set_drvdata(pdev, netdev); 3317 adapter = netdev_priv(netdev); 3318 3319 adapter->netdev = netdev; 3320 adapter->pdev = pdev; 3321 hw = &adapter->hw; 3322 hw->back = adapter; 3323 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3324 3325 /* 3326 * call save state here in standalone driver because it relies on 3327 * adapter struct to exist, and needs to call netdev_priv 3328 */ 3329 pci_save_state(pdev); 3330 3331 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3332 pci_resource_len(pdev, 0)); 3333 if (!hw->hw_addr) { 3334 err = -EIO; 3335 goto err_ioremap; 3336 } 3337 3338 ixgbevf_assign_netdev_ops(netdev); 3339 3340 adapter->bd_number = cards_found; 3341 3342 /* Setup hw api */ 3343 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3344 hw->mac.type = ii->mac; 3345 3346 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3347 sizeof(struct ixgbe_mbx_operations)); 3348 3349 /* setup the private structure */ 3350 err = ixgbevf_sw_init(adapter); 3351 if (err) 3352 goto err_sw_init; 3353 3354 /* The HW MAC address was set and/or determined in sw_init */ 3355 if (!is_valid_ether_addr(netdev->dev_addr)) { 3356 pr_err("invalid MAC address\n"); 3357 err = -EIO; 3358 goto err_sw_init; 3359 } 3360 3361 netdev->hw_features = NETIF_F_SG | 3362 NETIF_F_IP_CSUM | 3363 NETIF_F_IPV6_CSUM | 3364 NETIF_F_TSO | 3365 NETIF_F_TSO6 | 3366 NETIF_F_RXCSUM; 3367 3368 netdev->features = netdev->hw_features | 3369 NETIF_F_HW_VLAN_TX | 3370 NETIF_F_HW_VLAN_RX | 3371 NETIF_F_HW_VLAN_FILTER; 3372 3373 netdev->vlan_features |= NETIF_F_TSO; 3374 netdev->vlan_features |= NETIF_F_TSO6; 3375 netdev->vlan_features |= NETIF_F_IP_CSUM; 3376 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3377 netdev->vlan_features |= NETIF_F_SG; 3378 3379 if (pci_using_dac) 3380 netdev->features |= NETIF_F_HIGHDMA; 3381 3382 netdev->priv_flags |= IFF_UNICAST_FLT; 3383 3384 init_timer(&adapter->watchdog_timer); 3385 adapter->watchdog_timer.function = ixgbevf_watchdog; 3386 adapter->watchdog_timer.data = (unsigned long)adapter; 3387 3388 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3389 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3390 3391 err = ixgbevf_init_interrupt_scheme(adapter); 3392 if (err) 3393 goto err_sw_init; 3394 3395 strcpy(netdev->name, "eth%d"); 3396 3397 err = register_netdev(netdev); 3398 if (err) 3399 goto err_register; 3400 3401 netif_carrier_off(netdev); 3402 3403 ixgbevf_init_last_counter_stats(adapter); 3404 3405 /* print the MAC address */ 3406 hw_dbg(hw, "%pM\n", netdev->dev_addr); 3407 3408 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3409 3410 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3411 cards_found++; 3412 return 0; 3413 3414err_register: 3415 ixgbevf_clear_interrupt_scheme(adapter); 3416err_sw_init: 3417 ixgbevf_reset_interrupt_capability(adapter); 3418 iounmap(hw->hw_addr); 3419err_ioremap: 3420 free_netdev(netdev); 3421err_alloc_etherdev: 3422 pci_release_regions(pdev); 3423err_pci_reg: 3424err_dma: 3425 pci_disable_device(pdev); 3426 return err; 3427} 3428 3429/** 3430 * ixgbevf_remove - Device Removal Routine 3431 * @pdev: PCI device information struct 3432 * 3433 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3434 * that it should release a PCI device. The could be caused by a 3435 * Hot-Plug event, or because the driver is going to be removed from 3436 * memory. 3437 **/ 3438static void ixgbevf_remove(struct pci_dev *pdev) 3439{ 3440 struct net_device *netdev = pci_get_drvdata(pdev); 3441 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3442 3443 set_bit(__IXGBEVF_DOWN, &adapter->state); 3444 3445 del_timer_sync(&adapter->watchdog_timer); 3446 3447 cancel_work_sync(&adapter->reset_task); 3448 cancel_work_sync(&adapter->watchdog_task); 3449 3450 if (netdev->reg_state == NETREG_REGISTERED) 3451 unregister_netdev(netdev); 3452 3453 ixgbevf_clear_interrupt_scheme(adapter); 3454 ixgbevf_reset_interrupt_capability(adapter); 3455 3456 iounmap(adapter->hw.hw_addr); 3457 pci_release_regions(pdev); 3458 3459 hw_dbg(&adapter->hw, "Remove complete\n"); 3460 3461 kfree(adapter->tx_ring); 3462 kfree(adapter->rx_ring); 3463 3464 free_netdev(netdev); 3465 3466 pci_disable_device(pdev); 3467} 3468 3469/** 3470 * ixgbevf_io_error_detected - called when PCI error is detected 3471 * @pdev: Pointer to PCI device 3472 * @state: The current pci connection state 3473 * 3474 * This function is called after a PCI bus error affecting 3475 * this device has been detected. 3476 */ 3477static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, 3478 pci_channel_state_t state) 3479{ 3480 struct net_device *netdev = pci_get_drvdata(pdev); 3481 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3482 3483 netif_device_detach(netdev); 3484 3485 if (state == pci_channel_io_perm_failure) 3486 return PCI_ERS_RESULT_DISCONNECT; 3487 3488 if (netif_running(netdev)) 3489 ixgbevf_down(adapter); 3490 3491 pci_disable_device(pdev); 3492 3493 /* Request a slot slot reset. */ 3494 return PCI_ERS_RESULT_NEED_RESET; 3495} 3496 3497/** 3498 * ixgbevf_io_slot_reset - called after the pci bus has been reset. 3499 * @pdev: Pointer to PCI device 3500 * 3501 * Restart the card from scratch, as if from a cold-boot. Implementation 3502 * resembles the first-half of the ixgbevf_resume routine. 3503 */ 3504static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) 3505{ 3506 struct net_device *netdev = pci_get_drvdata(pdev); 3507 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3508 3509 if (pci_enable_device_mem(pdev)) { 3510 dev_err(&pdev->dev, 3511 "Cannot re-enable PCI device after reset.\n"); 3512 return PCI_ERS_RESULT_DISCONNECT; 3513 } 3514 3515 pci_set_master(pdev); 3516 3517 ixgbevf_reset(adapter); 3518 3519 return PCI_ERS_RESULT_RECOVERED; 3520} 3521 3522/** 3523 * ixgbevf_io_resume - called when traffic can start flowing again. 3524 * @pdev: Pointer to PCI device 3525 * 3526 * This callback is called when the error recovery driver tells us that 3527 * its OK to resume normal operation. Implementation resembles the 3528 * second-half of the ixgbevf_resume routine. 3529 */ 3530static void ixgbevf_io_resume(struct pci_dev *pdev) 3531{ 3532 struct net_device *netdev = pci_get_drvdata(pdev); 3533 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3534 3535 if (netif_running(netdev)) 3536 ixgbevf_up(adapter); 3537 3538 netif_device_attach(netdev); 3539} 3540 3541/* PCI Error Recovery (ERS) */ 3542static const struct pci_error_handlers ixgbevf_err_handler = { 3543 .error_detected = ixgbevf_io_error_detected, 3544 .slot_reset = ixgbevf_io_slot_reset, 3545 .resume = ixgbevf_io_resume, 3546}; 3547 3548static struct pci_driver ixgbevf_driver = { 3549 .name = ixgbevf_driver_name, 3550 .id_table = ixgbevf_pci_tbl, 3551 .probe = ixgbevf_probe, 3552 .remove = ixgbevf_remove, 3553#ifdef CONFIG_PM 3554 /* Power Management Hooks */ 3555 .suspend = ixgbevf_suspend, 3556 .resume = ixgbevf_resume, 3557#endif 3558 .shutdown = ixgbevf_shutdown, 3559 .err_handler = &ixgbevf_err_handler 3560}; 3561 3562/** 3563 * ixgbevf_init_module - Driver Registration Routine 3564 * 3565 * ixgbevf_init_module is the first routine called when the driver is 3566 * loaded. All it does is register with the PCI subsystem. 3567 **/ 3568static int __init ixgbevf_init_module(void) 3569{ 3570 int ret; 3571 pr_info("%s - version %s\n", ixgbevf_driver_string, 3572 ixgbevf_driver_version); 3573 3574 pr_info("%s\n", ixgbevf_copyright); 3575 3576 ret = pci_register_driver(&ixgbevf_driver); 3577 return ret; 3578} 3579 3580module_init(ixgbevf_init_module); 3581 3582/** 3583 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3584 * 3585 * ixgbevf_exit_module is called just before the driver is removed 3586 * from memory. 3587 **/ 3588static void __exit ixgbevf_exit_module(void) 3589{ 3590 pci_unregister_driver(&ixgbevf_driver); 3591} 3592 3593#ifdef DEBUG 3594/** 3595 * ixgbevf_get_hw_dev_name - return device name string 3596 * used by hardware layer to print debugging information 3597 **/ 3598char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3599{ 3600 struct ixgbevf_adapter *adapter = hw->back; 3601 return adapter->netdev->name; 3602} 3603 3604#endif 3605module_exit(ixgbevf_exit_module); 3606 3607/* ixgbevf_main.c */ 3608