vmxnet3_drv.c revision 4c1dc80a74384829e467ba078260094d1ffdd963
1/* 2 * Linux driver for VMware's vmxnet3 ethernet NIC. 3 * 4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more 14 * details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 * 23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 24 * 25 */ 26 27#include <linux/module.h> 28#include <net/ip6_checksum.h> 29 30#include "vmxnet3_int.h" 31 32char vmxnet3_driver_name[] = "vmxnet3"; 33#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 34 35/* 36 * PCI Device ID Table 37 * Last entry must be all 0s 38 */ 39static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = { 40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, 41 {0} 42}; 43 44MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); 45 46static atomic_t devices_found; 47 48#define VMXNET3_MAX_DEVICES 10 49static int enable_mq = 1; 50static int irq_share_mode; 51 52static void 53vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); 54 55/* 56 * Enable/Disable the given intr 57 */ 58static void 59vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 60{ 61 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); 62} 63 64 65static void 66vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 67{ 68 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); 69} 70 71 72/* 73 * Enable/Disable all intrs used by the device 74 */ 75static void 76vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) 77{ 78 int i; 79 80 for (i = 0; i < adapter->intr.num_intrs; i++) 81 vmxnet3_enable_intr(adapter, i); 82 adapter->shared->devRead.intrConf.intrCtrl &= 83 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL); 84} 85 86 87static void 88vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) 89{ 90 int i; 91 92 adapter->shared->devRead.intrConf.intrCtrl |= 93 cpu_to_le32(VMXNET3_IC_DISABLE_ALL); 94 for (i = 0; i < adapter->intr.num_intrs; i++) 95 vmxnet3_disable_intr(adapter, i); 96} 97 98 99static void 100vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) 101{ 102 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); 103} 104 105 106static bool 107vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 108{ 109 return tq->stopped; 110} 111 112 113static void 114vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 115{ 116 tq->stopped = false; 117 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); 118} 119 120 121static void 122vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 123{ 124 tq->stopped = false; 125 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); 126} 127 128 129static void 130vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 131{ 132 tq->stopped = true; 133 tq->num_stop++; 134 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); 135} 136 137 138/* 139 * Check the link state. This may start or stop the tx queue. 140 */ 141static void 142vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) 143{ 144 u32 ret; 145 int i; 146 unsigned long flags; 147 148 spin_lock_irqsave(&adapter->cmd_lock, flags); 149 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 150 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 151 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 152 153 adapter->link_speed = ret >> 16; 154 if (ret & 1) { /* Link is up. */ 155 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 156 adapter->netdev->name, adapter->link_speed); 157 if (!netif_carrier_ok(adapter->netdev)) 158 netif_carrier_on(adapter->netdev); 159 160 if (affectTxQueue) { 161 for (i = 0; i < adapter->num_tx_queues; i++) 162 vmxnet3_tq_start(&adapter->tx_queue[i], 163 adapter); 164 } 165 } else { 166 printk(KERN_INFO "%s: NIC Link is Down\n", 167 adapter->netdev->name); 168 if (netif_carrier_ok(adapter->netdev)) 169 netif_carrier_off(adapter->netdev); 170 171 if (affectTxQueue) { 172 for (i = 0; i < adapter->num_tx_queues; i++) 173 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); 174 } 175 } 176} 177 178static void 179vmxnet3_process_events(struct vmxnet3_adapter *adapter) 180{ 181 int i; 182 unsigned long flags; 183 u32 events = le32_to_cpu(adapter->shared->ecr); 184 if (!events) 185 return; 186 187 vmxnet3_ack_events(adapter, events); 188 189 /* Check if link state has changed */ 190 if (events & VMXNET3_ECR_LINK) 191 vmxnet3_check_link(adapter, true); 192 193 /* Check if there is an error on xmit/recv queues */ 194 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 195 spin_lock_irqsave(&adapter->cmd_lock, flags); 196 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 197 VMXNET3_CMD_GET_QUEUE_STATUS); 198 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 199 200 for (i = 0; i < adapter->num_tx_queues; i++) 201 if (adapter->tqd_start[i].status.stopped) 202 dev_err(&adapter->netdev->dev, 203 "%s: tq[%d] error 0x%x\n", 204 adapter->netdev->name, i, le32_to_cpu( 205 adapter->tqd_start[i].status.error)); 206 for (i = 0; i < adapter->num_rx_queues; i++) 207 if (adapter->rqd_start[i].status.stopped) 208 dev_err(&adapter->netdev->dev, 209 "%s: rq[%d] error 0x%x\n", 210 adapter->netdev->name, i, 211 adapter->rqd_start[i].status.error); 212 213 schedule_work(&adapter->work); 214 } 215} 216 217#ifdef __BIG_ENDIAN_BITFIELD 218/* 219 * The device expects the bitfields in shared structures to be written in 220 * little endian. When CPU is big endian, the following routines are used to 221 * correctly read and write into ABI. 222 * The general technique used here is : double word bitfields are defined in 223 * opposite order for big endian architecture. Then before reading them in 224 * driver the complete double word is translated using le32_to_cpu. Similarly 225 * After the driver writes into bitfields, cpu_to_le32 is used to translate the 226 * double words into required format. 227 * In order to avoid touching bits in shared structure more than once, temporary 228 * descriptors are used. These are passed as srcDesc to following functions. 229 */ 230static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc, 231 struct Vmxnet3_RxDesc *dstDesc) 232{ 233 u32 *src = (u32 *)srcDesc + 2; 234 u32 *dst = (u32 *)dstDesc + 2; 235 dstDesc->addr = le64_to_cpu(srcDesc->addr); 236 *dst = le32_to_cpu(*src); 237 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1); 238} 239 240static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc, 241 struct Vmxnet3_TxDesc *dstDesc) 242{ 243 int i; 244 u32 *src = (u32 *)(srcDesc + 1); 245 u32 *dst = (u32 *)(dstDesc + 1); 246 247 /* Working backwards so that the gen bit is set at the end. */ 248 for (i = 2; i > 0; i--) { 249 src--; 250 dst--; 251 *dst = cpu_to_le32(*src); 252 } 253} 254 255 256static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc, 257 struct Vmxnet3_RxCompDesc *dstDesc) 258{ 259 int i = 0; 260 u32 *src = (u32 *)srcDesc; 261 u32 *dst = (u32 *)dstDesc; 262 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) { 263 *dst = le32_to_cpu(*src); 264 src++; 265 dst++; 266 } 267} 268 269 270/* Used to read bitfield values from double words. */ 271static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size) 272{ 273 u32 temp = le32_to_cpu(*bitfield); 274 u32 mask = ((1 << size) - 1) << pos; 275 temp &= mask; 276 temp >>= pos; 277 return temp; 278} 279 280 281 282#endif /* __BIG_ENDIAN_BITFIELD */ 283 284#ifdef __BIG_ENDIAN_BITFIELD 285 286# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \ 287 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \ 288 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE) 289# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \ 290 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \ 291 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE) 292# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \ 293 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \ 294 VMXNET3_TCD_GEN_SIZE) 295# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \ 296 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE) 297# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \ 298 (dstrcd) = (tmp); \ 299 vmxnet3_RxCompToCPU((rcd), (tmp)); \ 300 } while (0) 301# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \ 302 (dstrxd) = (tmp); \ 303 vmxnet3_RxDescToCPU((rxd), (tmp)); \ 304 } while (0) 305 306#else 307 308# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen) 309# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop) 310# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen) 311# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx) 312# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd) 313# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd) 314 315#endif /* __BIG_ENDIAN_BITFIELD */ 316 317 318static void 319vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 320 struct pci_dev *pdev) 321{ 322 if (tbi->map_type == VMXNET3_MAP_SINGLE) 323 pci_unmap_single(pdev, tbi->dma_addr, tbi->len, 324 PCI_DMA_TODEVICE); 325 else if (tbi->map_type == VMXNET3_MAP_PAGE) 326 pci_unmap_page(pdev, tbi->dma_addr, tbi->len, 327 PCI_DMA_TODEVICE); 328 else 329 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); 330 331 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ 332} 333 334 335static int 336vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, 337 struct pci_dev *pdev, struct vmxnet3_adapter *adapter) 338{ 339 struct sk_buff *skb; 340 int entries = 0; 341 342 /* no out of order completion */ 343 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 344 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); 345 346 skb = tq->buf_info[eop_idx].skb; 347 BUG_ON(skb == NULL); 348 tq->buf_info[eop_idx].skb = NULL; 349 350 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); 351 352 while (tq->tx_ring.next2comp != eop_idx) { 353 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, 354 pdev); 355 356 /* update next2comp w/o tx_lock. Since we are marking more, 357 * instead of less, tx ring entries avail, the worst case is 358 * that the tx routine incorrectly re-queues a pkt due to 359 * insufficient tx ring entries. 360 */ 361 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 362 entries++; 363 } 364 365 dev_kfree_skb_any(skb); 366 return entries; 367} 368 369 370static int 371vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, 372 struct vmxnet3_adapter *adapter) 373{ 374 int completed = 0; 375 union Vmxnet3_GenericDesc *gdesc; 376 377 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 378 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { 379 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( 380 &gdesc->tcd), tq, adapter->pdev, 381 adapter); 382 383 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 384 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 385 } 386 387 if (completed) { 388 spin_lock(&tq->tx_lock); 389 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && 390 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > 391 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && 392 netif_carrier_ok(adapter->netdev))) { 393 vmxnet3_tq_wake(tq, adapter); 394 } 395 spin_unlock(&tq->tx_lock); 396 } 397 return completed; 398} 399 400 401static void 402vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, 403 struct vmxnet3_adapter *adapter) 404{ 405 int i; 406 407 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { 408 struct vmxnet3_tx_buf_info *tbi; 409 410 tbi = tq->buf_info + tq->tx_ring.next2comp; 411 412 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); 413 if (tbi->skb) { 414 dev_kfree_skb_any(tbi->skb); 415 tbi->skb = NULL; 416 } 417 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 418 } 419 420 /* sanity check, verify all buffers are indeed unmapped and freed */ 421 for (i = 0; i < tq->tx_ring.size; i++) { 422 BUG_ON(tq->buf_info[i].skb != NULL || 423 tq->buf_info[i].map_type != VMXNET3_MAP_NONE); 424 } 425 426 tq->tx_ring.gen = VMXNET3_INIT_GEN; 427 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 428 429 tq->comp_ring.gen = VMXNET3_INIT_GEN; 430 tq->comp_ring.next2proc = 0; 431} 432 433 434static void 435vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 436 struct vmxnet3_adapter *adapter) 437{ 438 if (tq->tx_ring.base) { 439 pci_free_consistent(adapter->pdev, tq->tx_ring.size * 440 sizeof(struct Vmxnet3_TxDesc), 441 tq->tx_ring.base, tq->tx_ring.basePA); 442 tq->tx_ring.base = NULL; 443 } 444 if (tq->data_ring.base) { 445 pci_free_consistent(adapter->pdev, tq->data_ring.size * 446 sizeof(struct Vmxnet3_TxDataDesc), 447 tq->data_ring.base, tq->data_ring.basePA); 448 tq->data_ring.base = NULL; 449 } 450 if (tq->comp_ring.base) { 451 pci_free_consistent(adapter->pdev, tq->comp_ring.size * 452 sizeof(struct Vmxnet3_TxCompDesc), 453 tq->comp_ring.base, tq->comp_ring.basePA); 454 tq->comp_ring.base = NULL; 455 } 456 kfree(tq->buf_info); 457 tq->buf_info = NULL; 458} 459 460 461/* Destroy all tx queues */ 462void 463vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter) 464{ 465 int i; 466 467 for (i = 0; i < adapter->num_tx_queues; i++) 468 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); 469} 470 471 472static void 473vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, 474 struct vmxnet3_adapter *adapter) 475{ 476 int i; 477 478 /* reset the tx ring contents to 0 and reset the tx ring states */ 479 memset(tq->tx_ring.base, 0, tq->tx_ring.size * 480 sizeof(struct Vmxnet3_TxDesc)); 481 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 482 tq->tx_ring.gen = VMXNET3_INIT_GEN; 483 484 memset(tq->data_ring.base, 0, tq->data_ring.size * 485 sizeof(struct Vmxnet3_TxDataDesc)); 486 487 /* reset the tx comp ring contents to 0 and reset comp ring states */ 488 memset(tq->comp_ring.base, 0, tq->comp_ring.size * 489 sizeof(struct Vmxnet3_TxCompDesc)); 490 tq->comp_ring.next2proc = 0; 491 tq->comp_ring.gen = VMXNET3_INIT_GEN; 492 493 /* reset the bookkeeping data */ 494 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); 495 for (i = 0; i < tq->tx_ring.size; i++) 496 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; 497 498 /* stats are not reset */ 499} 500 501 502static int 503vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, 504 struct vmxnet3_adapter *adapter) 505{ 506 BUG_ON(tq->tx_ring.base || tq->data_ring.base || 507 tq->comp_ring.base || tq->buf_info); 508 509 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size 510 * sizeof(struct Vmxnet3_TxDesc), 511 &tq->tx_ring.basePA); 512 if (!tq->tx_ring.base) { 513 printk(KERN_ERR "%s: failed to allocate tx ring\n", 514 adapter->netdev->name); 515 goto err; 516 } 517 518 tq->data_ring.base = pci_alloc_consistent(adapter->pdev, 519 tq->data_ring.size * 520 sizeof(struct Vmxnet3_TxDataDesc), 521 &tq->data_ring.basePA); 522 if (!tq->data_ring.base) { 523 printk(KERN_ERR "%s: failed to allocate data ring\n", 524 adapter->netdev->name); 525 goto err; 526 } 527 528 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, 529 tq->comp_ring.size * 530 sizeof(struct Vmxnet3_TxCompDesc), 531 &tq->comp_ring.basePA); 532 if (!tq->comp_ring.base) { 533 printk(KERN_ERR "%s: failed to allocate tx comp ring\n", 534 adapter->netdev->name); 535 goto err; 536 } 537 538 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), 539 GFP_KERNEL); 540 if (!tq->buf_info) 541 goto err; 542 543 return 0; 544 545err: 546 vmxnet3_tq_destroy(tq, adapter); 547 return -ENOMEM; 548} 549 550static void 551vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter) 552{ 553 int i; 554 555 for (i = 0; i < adapter->num_tx_queues; i++) 556 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); 557} 558 559/* 560 * starting from ring->next2fill, allocate rx buffers for the given ring 561 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers 562 * are allocated or allocation fails 563 */ 564 565static int 566vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, 567 int num_to_alloc, struct vmxnet3_adapter *adapter) 568{ 569 int num_allocated = 0; 570 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; 571 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; 572 u32 val; 573 574 while (num_allocated <= num_to_alloc) { 575 struct vmxnet3_rx_buf_info *rbi; 576 union Vmxnet3_GenericDesc *gd; 577 578 rbi = rbi_base + ring->next2fill; 579 gd = ring->base + ring->next2fill; 580 581 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { 582 if (rbi->skb == NULL) { 583 rbi->skb = dev_alloc_skb(rbi->len + 584 NET_IP_ALIGN); 585 if (unlikely(rbi->skb == NULL)) { 586 rq->stats.rx_buf_alloc_failure++; 587 break; 588 } 589 rbi->skb->dev = adapter->netdev; 590 591 skb_reserve(rbi->skb, NET_IP_ALIGN); 592 rbi->dma_addr = pci_map_single(adapter->pdev, 593 rbi->skb->data, rbi->len, 594 PCI_DMA_FROMDEVICE); 595 } else { 596 /* rx buffer skipped by the device */ 597 } 598 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; 599 } else { 600 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || 601 rbi->len != PAGE_SIZE); 602 603 if (rbi->page == NULL) { 604 rbi->page = alloc_page(GFP_ATOMIC); 605 if (unlikely(rbi->page == NULL)) { 606 rq->stats.rx_buf_alloc_failure++; 607 break; 608 } 609 rbi->dma_addr = pci_map_page(adapter->pdev, 610 rbi->page, 0, PAGE_SIZE, 611 PCI_DMA_FROMDEVICE); 612 } else { 613 /* rx buffers skipped by the device */ 614 } 615 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; 616 } 617 618 BUG_ON(rbi->dma_addr == 0); 619 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); 620 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) 621 | val | rbi->len); 622 623 /* Fill the last buffer but dont mark it ready, or else the 624 * device will think that the queue is full */ 625 if (num_allocated == num_to_alloc) 626 break; 627 628 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT); 629 num_allocated++; 630 vmxnet3_cmd_ring_adv_next2fill(ring); 631 } 632 rq->uncommitted[ring_idx] += num_allocated; 633 634 dev_dbg(&adapter->netdev->dev, 635 "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 636 "%u, uncommitted %u\n", num_allocated, ring->next2fill, 637 ring->next2comp, rq->uncommitted[ring_idx]); 638 639 /* so that the device can distinguish a full ring and an empty ring */ 640 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); 641 642 return num_allocated; 643} 644 645 646static void 647vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, 648 struct vmxnet3_rx_buf_info *rbi) 649{ 650 struct skb_frag_struct *frag = skb_shinfo(skb)->frags + 651 skb_shinfo(skb)->nr_frags; 652 653 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 654 655 __skb_frag_set_page(frag, rbi->page); 656 frag->page_offset = 0; 657 skb_frag_size_set(frag, rcd->len); 658 skb->data_len += rcd->len; 659 skb->truesize += PAGE_SIZE; 660 skb_shinfo(skb)->nr_frags++; 661} 662 663 664static void 665vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, 666 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, 667 struct vmxnet3_adapter *adapter) 668{ 669 u32 dw2, len; 670 unsigned long buf_offset; 671 int i; 672 union Vmxnet3_GenericDesc *gdesc; 673 struct vmxnet3_tx_buf_info *tbi = NULL; 674 675 BUG_ON(ctx->copy_size > skb_headlen(skb)); 676 677 /* use the previous gen bit for the SOP desc */ 678 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; 679 680 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; 681 gdesc = ctx->sop_txd; /* both loops below can be skipped */ 682 683 /* no need to map the buffer if headers are copied */ 684 if (ctx->copy_size) { 685 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + 686 tq->tx_ring.next2fill * 687 sizeof(struct Vmxnet3_TxDataDesc)); 688 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); 689 ctx->sop_txd->dword[3] = 0; 690 691 tbi = tq->buf_info + tq->tx_ring.next2fill; 692 tbi->map_type = VMXNET3_MAP_NONE; 693 694 dev_dbg(&adapter->netdev->dev, 695 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 696 tq->tx_ring.next2fill, 697 le64_to_cpu(ctx->sop_txd->txd.addr), 698 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 699 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 700 701 /* use the right gen for non-SOP desc */ 702 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 703 } 704 705 /* linear part can use multiple tx desc if it's big */ 706 len = skb_headlen(skb) - ctx->copy_size; 707 buf_offset = ctx->copy_size; 708 while (len) { 709 u32 buf_size; 710 711 if (len < VMXNET3_MAX_TX_BUF_SIZE) { 712 buf_size = len; 713 dw2 |= len; 714 } else { 715 buf_size = VMXNET3_MAX_TX_BUF_SIZE; 716 /* spec says that for TxDesc.len, 0 == 2^14 */ 717 } 718 719 tbi = tq->buf_info + tq->tx_ring.next2fill; 720 tbi->map_type = VMXNET3_MAP_SINGLE; 721 tbi->dma_addr = pci_map_single(adapter->pdev, 722 skb->data + buf_offset, buf_size, 723 PCI_DMA_TODEVICE); 724 725 tbi->len = buf_size; 726 727 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 728 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 729 730 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 731 gdesc->dword[2] = cpu_to_le32(dw2); 732 gdesc->dword[3] = 0; 733 734 dev_dbg(&adapter->netdev->dev, 735 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 736 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 737 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 738 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 739 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 740 741 len -= buf_size; 742 buf_offset += buf_size; 743 } 744 745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 746 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 747 748 tbi = tq->buf_info + tq->tx_ring.next2fill; 749 tbi->map_type = VMXNET3_MAP_PAGE; 750 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 751 0, skb_frag_size(frag), 752 DMA_TO_DEVICE); 753 754 tbi->len = skb_frag_size(frag); 755 756 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 757 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 758 759 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 760 gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); 761 gdesc->dword[3] = 0; 762 763 dev_dbg(&adapter->netdev->dev, 764 "txd[%u]: 0x%llu %u %u\n", 765 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 766 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 767 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 768 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 769 } 770 771 ctx->eop_txd = gdesc; 772 773 /* set the last buf_info for the pkt */ 774 tbi->skb = skb; 775 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; 776} 777 778 779/* Init all tx queues */ 780static void 781vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) 782{ 783 int i; 784 785 for (i = 0; i < adapter->num_tx_queues; i++) 786 vmxnet3_tq_init(&adapter->tx_queue[i], adapter); 787} 788 789 790/* 791 * parse and copy relevant protocol headers: 792 * For a tso pkt, relevant headers are L2/3/4 including options 793 * For a pkt requesting csum offloading, they are L2/3 and may include L4 794 * if it's a TCP/UDP pkt 795 * 796 * Returns: 797 * -1: error happens during parsing 798 * 0: protocol headers parsed, but too big to be copied 799 * 1: protocol headers parsed and copied 800 * 801 * Other effects: 802 * 1. related *ctx fields are updated. 803 * 2. ctx->copy_size is # of bytes copied 804 * 3. the portion copied is guaranteed to be in the linear part 805 * 806 */ 807static int 808vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 809 struct vmxnet3_tx_ctx *ctx, 810 struct vmxnet3_adapter *adapter) 811{ 812 struct Vmxnet3_TxDataDesc *tdd; 813 814 if (ctx->mss) { /* TSO */ 815 ctx->eth_ip_hdr_size = skb_transport_offset(skb); 816 ctx->l4_hdr_size = tcp_hdrlen(skb); 817 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 818 } else { 819 if (skb->ip_summed == CHECKSUM_PARTIAL) { 820 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 821 822 if (ctx->ipv4) { 823 const struct iphdr *iph = ip_hdr(skb); 824 825 if (iph->protocol == IPPROTO_TCP) 826 ctx->l4_hdr_size = tcp_hdrlen(skb); 827 else if (iph->protocol == IPPROTO_UDP) 828 /* 829 * Use tcp header size so that bytes to 830 * be copied are more than required by 831 * the device. 832 */ 833 ctx->l4_hdr_size = sizeof(struct tcphdr); 834 else 835 ctx->l4_hdr_size = 0; 836 } else { 837 /* for simplicity, don't copy L4 headers */ 838 ctx->l4_hdr_size = 0; 839 } 840 ctx->copy_size = min(ctx->eth_ip_hdr_size + 841 ctx->l4_hdr_size, skb->len); 842 } else { 843 ctx->eth_ip_hdr_size = 0; 844 ctx->l4_hdr_size = 0; 845 /* copy as much as allowed */ 846 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE 847 , skb_headlen(skb)); 848 } 849 850 /* make sure headers are accessible directly */ 851 if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) 852 goto err; 853 } 854 855 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { 856 tq->stats.oversized_hdr++; 857 ctx->copy_size = 0; 858 return 0; 859 } 860 861 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 862 863 memcpy(tdd->data, skb->data, ctx->copy_size); 864 dev_dbg(&adapter->netdev->dev, 865 "copy %u bytes to dataRing[%u]\n", 866 ctx->copy_size, tq->tx_ring.next2fill); 867 return 1; 868 869err: 870 return -1; 871} 872 873 874static void 875vmxnet3_prepare_tso(struct sk_buff *skb, 876 struct vmxnet3_tx_ctx *ctx) 877{ 878 struct tcphdr *tcph = tcp_hdr(skb); 879 880 if (ctx->ipv4) { 881 struct iphdr *iph = ip_hdr(skb); 882 883 iph->check = 0; 884 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 885 IPPROTO_TCP, 0); 886 } else { 887 struct ipv6hdr *iph = ipv6_hdr(skb); 888 889 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, 890 IPPROTO_TCP, 0); 891 } 892} 893 894 895/* 896 * Transmits a pkt thru a given tq 897 * Returns: 898 * NETDEV_TX_OK: descriptors are setup successfully 899 * NETDEV_TX_OK: error occurred, the pkt is dropped 900 * NETDEV_TX_BUSY: tx ring is full, queue is stopped 901 * 902 * Side-effects: 903 * 1. tx ring may be changed 904 * 2. tq stats may be updated accordingly 905 * 3. shared->txNumDeferred may be updated 906 */ 907 908static int 909vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 910 struct vmxnet3_adapter *adapter, struct net_device *netdev) 911{ 912 int ret; 913 u32 count; 914 unsigned long flags; 915 struct vmxnet3_tx_ctx ctx; 916 union Vmxnet3_GenericDesc *gdesc; 917#ifdef __BIG_ENDIAN_BITFIELD 918 /* Use temporary descriptor to avoid touching bits multiple times */ 919 union Vmxnet3_GenericDesc tempTxDesc; 920#endif 921 922 /* conservatively estimate # of descriptors to use */ 923 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 924 skb_shinfo(skb)->nr_frags + 1; 925 926 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); 927 928 ctx.mss = skb_shinfo(skb)->gso_size; 929 if (ctx.mss) { 930 if (skb_header_cloned(skb)) { 931 if (unlikely(pskb_expand_head(skb, 0, 0, 932 GFP_ATOMIC) != 0)) { 933 tq->stats.drop_tso++; 934 goto drop_pkt; 935 } 936 tq->stats.copy_skb_header++; 937 } 938 vmxnet3_prepare_tso(skb, &ctx); 939 } else { 940 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { 941 942 /* non-tso pkts must not use more than 943 * VMXNET3_MAX_TXD_PER_PKT entries 944 */ 945 if (skb_linearize(skb) != 0) { 946 tq->stats.drop_too_many_frags++; 947 goto drop_pkt; 948 } 949 tq->stats.linearized++; 950 951 /* recalculate the # of descriptors to use */ 952 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 953 } 954 } 955 956 spin_lock_irqsave(&tq->tx_lock, flags); 957 958 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 959 tq->stats.tx_ring_full++; 960 dev_dbg(&adapter->netdev->dev, 961 "tx queue stopped on %s, next2comp %u" 962 " next2fill %u\n", adapter->netdev->name, 963 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 964 965 vmxnet3_tq_stop(tq, adapter); 966 spin_unlock_irqrestore(&tq->tx_lock, flags); 967 return NETDEV_TX_BUSY; 968 } 969 970 971 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); 972 if (ret >= 0) { 973 BUG_ON(ret <= 0 && ctx.copy_size != 0); 974 /* hdrs parsed, check against other limits */ 975 if (ctx.mss) { 976 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > 977 VMXNET3_MAX_TX_BUF_SIZE)) { 978 goto hdr_too_big; 979 } 980 } else { 981 if (skb->ip_summed == CHECKSUM_PARTIAL) { 982 if (unlikely(ctx.eth_ip_hdr_size + 983 skb->csum_offset > 984 VMXNET3_MAX_CSUM_OFFSET)) { 985 goto hdr_too_big; 986 } 987 } 988 } 989 } else { 990 tq->stats.drop_hdr_inspect_err++; 991 goto unlock_drop_pkt; 992 } 993 994 /* fill tx descs related to addr & len */ 995 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 996 997 /* setup the EOP desc */ 998 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); 999 1000 /* setup the SOP desc */ 1001#ifdef __BIG_ENDIAN_BITFIELD 1002 gdesc = &tempTxDesc; 1003 gdesc->dword[2] = ctx.sop_txd->dword[2]; 1004 gdesc->dword[3] = ctx.sop_txd->dword[3]; 1005#else 1006 gdesc = ctx.sop_txd; 1007#endif 1008 if (ctx.mss) { 1009 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 1010 gdesc->txd.om = VMXNET3_OM_TSO; 1011 gdesc->txd.msscof = ctx.mss; 1012 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - 1013 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss); 1014 } else { 1015 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1016 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 1017 gdesc->txd.om = VMXNET3_OM_CSUM; 1018 gdesc->txd.msscof = ctx.eth_ip_hdr_size + 1019 skb->csum_offset; 1020 } else { 1021 gdesc->txd.om = 0; 1022 gdesc->txd.msscof = 0; 1023 } 1024 le32_add_cpu(&tq->shared->txNumDeferred, 1); 1025 } 1026 1027 if (vlan_tx_tag_present(skb)) { 1028 gdesc->txd.ti = 1; 1029 gdesc->txd.tci = vlan_tx_tag_get(skb); 1030 } 1031 1032 /* finally flips the GEN bit of the SOP desc. */ 1033 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ 1034 VMXNET3_TXD_GEN); 1035#ifdef __BIG_ENDIAN_BITFIELD 1036 /* Finished updating in bitfields of Tx Desc, so write them in original 1037 * place. 1038 */ 1039 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc, 1040 (struct Vmxnet3_TxDesc *)ctx.sop_txd); 1041 gdesc = ctx.sop_txd; 1042#endif 1043 dev_dbg(&adapter->netdev->dev, 1044 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 1045 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 1046 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), 1047 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); 1048 1049 spin_unlock_irqrestore(&tq->tx_lock, flags); 1050 1051 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1052 le32_to_cpu(tq->shared->txThreshold)) { 1053 tq->shared->txNumDeferred = 0; 1054 VMXNET3_WRITE_BAR0_REG(adapter, 1055 VMXNET3_REG_TXPROD + tq->qid * 8, 1056 tq->tx_ring.next2fill); 1057 } 1058 1059 return NETDEV_TX_OK; 1060 1061hdr_too_big: 1062 tq->stats.drop_oversized_hdr++; 1063unlock_drop_pkt: 1064 spin_unlock_irqrestore(&tq->tx_lock, flags); 1065drop_pkt: 1066 tq->stats.drop_total++; 1067 dev_kfree_skb(skb); 1068 return NETDEV_TX_OK; 1069} 1070 1071 1072static netdev_tx_t 1073vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1074{ 1075 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1076 1077 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); 1078 return vmxnet3_tq_xmit(skb, 1079 &adapter->tx_queue[skb->queue_mapping], 1080 adapter, netdev); 1081} 1082 1083 1084static void 1085vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, 1086 struct sk_buff *skb, 1087 union Vmxnet3_GenericDesc *gdesc) 1088{ 1089 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1090 /* typical case: TCP/UDP over IP and both csums are correct */ 1091 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1092 VMXNET3_RCD_CSUM_OK) { 1093 skb->ip_summed = CHECKSUM_UNNECESSARY; 1094 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1095 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); 1096 BUG_ON(gdesc->rcd.frg); 1097 } else { 1098 if (gdesc->rcd.csum) { 1099 skb->csum = htons(gdesc->rcd.csum); 1100 skb->ip_summed = CHECKSUM_PARTIAL; 1101 } else { 1102 skb_checksum_none_assert(skb); 1103 } 1104 } 1105 } else { 1106 skb_checksum_none_assert(skb); 1107 } 1108} 1109 1110 1111static void 1112vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, 1113 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) 1114{ 1115 rq->stats.drop_err++; 1116 if (!rcd->fcs) 1117 rq->stats.drop_fcs++; 1118 1119 rq->stats.drop_total++; 1120 1121 /* 1122 * We do not unmap and chain the rx buffer to the skb. 1123 * We basically pretend this buffer is not used and will be recycled 1124 * by vmxnet3_rq_alloc_rx_buf() 1125 */ 1126 1127 /* 1128 * ctx->skb may be NULL if this is the first and the only one 1129 * desc for the pkt 1130 */ 1131 if (ctx->skb) 1132 dev_kfree_skb_irq(ctx->skb); 1133 1134 ctx->skb = NULL; 1135} 1136 1137 1138static int 1139vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, 1140 struct vmxnet3_adapter *adapter, int quota) 1141{ 1142 static const u32 rxprod_reg[2] = { 1143 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 1144 }; 1145 u32 num_rxd = 0; 1146 bool skip_page_frags = false; 1147 struct Vmxnet3_RxCompDesc *rcd; 1148 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1149#ifdef __BIG_ENDIAN_BITFIELD 1150 struct Vmxnet3_RxDesc rxCmdDesc; 1151 struct Vmxnet3_RxCompDesc rxComp; 1152#endif 1153 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, 1154 &rxComp); 1155 while (rcd->gen == rq->comp_ring.gen) { 1156 struct vmxnet3_rx_buf_info *rbi; 1157 struct sk_buff *skb, *new_skb = NULL; 1158 struct page *new_page = NULL; 1159 int num_to_alloc; 1160 struct Vmxnet3_RxDesc *rxd; 1161 u32 idx, ring_idx; 1162 struct vmxnet3_cmd_ring *ring = NULL; 1163 if (num_rxd >= quota) { 1164 /* we may stop even before we see the EOP desc of 1165 * the current pkt 1166 */ 1167 break; 1168 } 1169 num_rxd++; 1170 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1171 idx = rcd->rxdIdx; 1172 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; 1173 ring = rq->rx_ring + ring_idx; 1174 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1175 &rxCmdDesc); 1176 rbi = rq->buf_info[ring_idx] + idx; 1177 1178 BUG_ON(rxd->addr != rbi->dma_addr || 1179 rxd->len != rbi->len); 1180 1181 if (unlikely(rcd->eop && rcd->err)) { 1182 vmxnet3_rx_error(rq, rcd, ctx, adapter); 1183 goto rcd_done; 1184 } 1185 1186 if (rcd->sop) { /* first buf of the pkt */ 1187 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || 1188 rcd->rqID != rq->qid); 1189 1190 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); 1191 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); 1192 1193 if (unlikely(rcd->len == 0)) { 1194 /* Pretend the rx buffer is skipped. */ 1195 BUG_ON(!(rcd->sop && rcd->eop)); 1196 dev_dbg(&adapter->netdev->dev, 1197 "rxRing[%u][%u] 0 length\n", 1198 ring_idx, idx); 1199 goto rcd_done; 1200 } 1201 1202 skip_page_frags = false; 1203 ctx->skb = rbi->skb; 1204 new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN); 1205 if (new_skb == NULL) { 1206 /* Skb allocation failed, do not handover this 1207 * skb to stack. Reuse it. Drop the existing pkt 1208 */ 1209 rq->stats.rx_buf_alloc_failure++; 1210 ctx->skb = NULL; 1211 rq->stats.drop_total++; 1212 skip_page_frags = true; 1213 goto rcd_done; 1214 } 1215 1216 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, 1217 PCI_DMA_FROMDEVICE); 1218 1219 skb_put(ctx->skb, rcd->len); 1220 1221 /* Immediate refill */ 1222 new_skb->dev = adapter->netdev; 1223 skb_reserve(new_skb, NET_IP_ALIGN); 1224 rbi->skb = new_skb; 1225 rbi->dma_addr = pci_map_single(adapter->pdev, 1226 rbi->skb->data, rbi->len, 1227 PCI_DMA_FROMDEVICE); 1228 rxd->addr = cpu_to_le64(rbi->dma_addr); 1229 rxd->len = rbi->len; 1230 1231 } else { 1232 BUG_ON(ctx->skb == NULL && !skip_page_frags); 1233 1234 /* non SOP buffer must be type 1 in most cases */ 1235 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE); 1236 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); 1237 1238 /* If an sop buffer was dropped, skip all 1239 * following non-sop fragments. They will be reused. 1240 */ 1241 if (skip_page_frags) 1242 goto rcd_done; 1243 1244 new_page = alloc_page(GFP_ATOMIC); 1245 if (unlikely(new_page == NULL)) { 1246 /* Replacement page frag could not be allocated. 1247 * Reuse this page. Drop the pkt and free the 1248 * skb which contained this page as a frag. Skip 1249 * processing all the following non-sop frags. 1250 */ 1251 rq->stats.rx_buf_alloc_failure++; 1252 dev_kfree_skb(ctx->skb); 1253 ctx->skb = NULL; 1254 skip_page_frags = true; 1255 goto rcd_done; 1256 } 1257 1258 if (rcd->len) { 1259 pci_unmap_page(adapter->pdev, 1260 rbi->dma_addr, rbi->len, 1261 PCI_DMA_FROMDEVICE); 1262 1263 vmxnet3_append_frag(ctx->skb, rcd, rbi); 1264 } 1265 1266 /* Immediate refill */ 1267 rbi->page = new_page; 1268 rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page, 1269 0, PAGE_SIZE, 1270 PCI_DMA_FROMDEVICE); 1271 rxd->addr = cpu_to_le64(rbi->dma_addr); 1272 rxd->len = rbi->len; 1273 } 1274 1275 1276 skb = ctx->skb; 1277 if (rcd->eop) { 1278 skb->len += skb->data_len; 1279 1280 vmxnet3_rx_csum(adapter, skb, 1281 (union Vmxnet3_GenericDesc *)rcd); 1282 skb->protocol = eth_type_trans(skb, adapter->netdev); 1283 1284 if (unlikely(rcd->ts)) 1285 __vlan_hwaccel_put_tag(skb, rcd->tci); 1286 1287 if (adapter->netdev->features & NETIF_F_LRO) 1288 netif_receive_skb(skb); 1289 else 1290 napi_gro_receive(&rq->napi, skb); 1291 1292 ctx->skb = NULL; 1293 } 1294 1295rcd_done: 1296 /* device may have skipped some rx descs */ 1297 ring->next2comp = idx; 1298 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); 1299 ring = rq->rx_ring + ring_idx; 1300 while (num_to_alloc) { 1301 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, 1302 &rxCmdDesc); 1303 BUG_ON(!rxd->addr); 1304 1305 /* Recv desc is ready to be used by the device */ 1306 rxd->gen = ring->gen; 1307 vmxnet3_cmd_ring_adv_next2fill(ring); 1308 num_to_alloc--; 1309 } 1310 1311 /* if needed, update the register */ 1312 if (unlikely(rq->shared->updateRxProd)) { 1313 VMXNET3_WRITE_BAR0_REG(adapter, 1314 rxprod_reg[ring_idx] + rq->qid * 8, 1315 ring->next2fill); 1316 rq->uncommitted[ring_idx] = 0; 1317 } 1318 1319 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1320 vmxnet3_getRxComp(rcd, 1321 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); 1322 } 1323 1324 return num_rxd; 1325} 1326 1327 1328static void 1329vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, 1330 struct vmxnet3_adapter *adapter) 1331{ 1332 u32 i, ring_idx; 1333 struct Vmxnet3_RxDesc *rxd; 1334 1335 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1336 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1337#ifdef __BIG_ENDIAN_BITFIELD 1338 struct Vmxnet3_RxDesc rxDesc; 1339#endif 1340 vmxnet3_getRxDesc(rxd, 1341 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); 1342 1343 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1344 rq->buf_info[ring_idx][i].skb) { 1345 pci_unmap_single(adapter->pdev, rxd->addr, 1346 rxd->len, PCI_DMA_FROMDEVICE); 1347 dev_kfree_skb(rq->buf_info[ring_idx][i].skb); 1348 rq->buf_info[ring_idx][i].skb = NULL; 1349 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && 1350 rq->buf_info[ring_idx][i].page) { 1351 pci_unmap_page(adapter->pdev, rxd->addr, 1352 rxd->len, PCI_DMA_FROMDEVICE); 1353 put_page(rq->buf_info[ring_idx][i].page); 1354 rq->buf_info[ring_idx][i].page = NULL; 1355 } 1356 } 1357 1358 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; 1359 rq->rx_ring[ring_idx].next2fill = 1360 rq->rx_ring[ring_idx].next2comp = 0; 1361 rq->uncommitted[ring_idx] = 0; 1362 } 1363 1364 rq->comp_ring.gen = VMXNET3_INIT_GEN; 1365 rq->comp_ring.next2proc = 0; 1366} 1367 1368 1369static void 1370vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) 1371{ 1372 int i; 1373 1374 for (i = 0; i < adapter->num_rx_queues; i++) 1375 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); 1376} 1377 1378 1379void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 1380 struct vmxnet3_adapter *adapter) 1381{ 1382 int i; 1383 int j; 1384 1385 /* all rx buffers must have already been freed */ 1386 for (i = 0; i < 2; i++) { 1387 if (rq->buf_info[i]) { 1388 for (j = 0; j < rq->rx_ring[i].size; j++) 1389 BUG_ON(rq->buf_info[i][j].page != NULL); 1390 } 1391 } 1392 1393 1394 kfree(rq->buf_info[0]); 1395 1396 for (i = 0; i < 2; i++) { 1397 if (rq->rx_ring[i].base) { 1398 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size 1399 * sizeof(struct Vmxnet3_RxDesc), 1400 rq->rx_ring[i].base, 1401 rq->rx_ring[i].basePA); 1402 rq->rx_ring[i].base = NULL; 1403 } 1404 rq->buf_info[i] = NULL; 1405 } 1406 1407 if (rq->comp_ring.base) { 1408 pci_free_consistent(adapter->pdev, rq->comp_ring.size * 1409 sizeof(struct Vmxnet3_RxCompDesc), 1410 rq->comp_ring.base, rq->comp_ring.basePA); 1411 rq->comp_ring.base = NULL; 1412 } 1413} 1414 1415 1416static int 1417vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, 1418 struct vmxnet3_adapter *adapter) 1419{ 1420 int i; 1421 1422 /* initialize buf_info */ 1423 for (i = 0; i < rq->rx_ring[0].size; i++) { 1424 1425 /* 1st buf for a pkt is skbuff */ 1426 if (i % adapter->rx_buf_per_pkt == 0) { 1427 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; 1428 rq->buf_info[0][i].len = adapter->skb_buf_size; 1429 } else { /* subsequent bufs for a pkt is frag */ 1430 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; 1431 rq->buf_info[0][i].len = PAGE_SIZE; 1432 } 1433 } 1434 for (i = 0; i < rq->rx_ring[1].size; i++) { 1435 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; 1436 rq->buf_info[1][i].len = PAGE_SIZE; 1437 } 1438 1439 /* reset internal state and allocate buffers for both rings */ 1440 for (i = 0; i < 2; i++) { 1441 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; 1442 rq->uncommitted[i] = 0; 1443 1444 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * 1445 sizeof(struct Vmxnet3_RxDesc)); 1446 rq->rx_ring[i].gen = VMXNET3_INIT_GEN; 1447 } 1448 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, 1449 adapter) == 0) { 1450 /* at least has 1 rx buffer for the 1st ring */ 1451 return -ENOMEM; 1452 } 1453 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); 1454 1455 /* reset the comp ring */ 1456 rq->comp_ring.next2proc = 0; 1457 memset(rq->comp_ring.base, 0, rq->comp_ring.size * 1458 sizeof(struct Vmxnet3_RxCompDesc)); 1459 rq->comp_ring.gen = VMXNET3_INIT_GEN; 1460 1461 /* reset rxctx */ 1462 rq->rx_ctx.skb = NULL; 1463 1464 /* stats are not reset */ 1465 return 0; 1466} 1467 1468 1469static int 1470vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter) 1471{ 1472 int i, err = 0; 1473 1474 for (i = 0; i < adapter->num_rx_queues; i++) { 1475 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); 1476 if (unlikely(err)) { 1477 dev_err(&adapter->netdev->dev, "%s: failed to " 1478 "initialize rx queue%i\n", 1479 adapter->netdev->name, i); 1480 break; 1481 } 1482 } 1483 return err; 1484 1485} 1486 1487 1488static int 1489vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) 1490{ 1491 int i; 1492 size_t sz; 1493 struct vmxnet3_rx_buf_info *bi; 1494 1495 for (i = 0; i < 2; i++) { 1496 1497 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); 1498 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, 1499 &rq->rx_ring[i].basePA); 1500 if (!rq->rx_ring[i].base) { 1501 printk(KERN_ERR "%s: failed to allocate rx ring %d\n", 1502 adapter->netdev->name, i); 1503 goto err; 1504 } 1505 } 1506 1507 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1508 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, 1509 &rq->comp_ring.basePA); 1510 if (!rq->comp_ring.base) { 1511 printk(KERN_ERR "%s: failed to allocate rx comp ring\n", 1512 adapter->netdev->name); 1513 goto err; 1514 } 1515 1516 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1517 rq->rx_ring[1].size); 1518 bi = kzalloc(sz, GFP_KERNEL); 1519 if (!bi) 1520 goto err; 1521 1522 rq->buf_info[0] = bi; 1523 rq->buf_info[1] = bi + rq->rx_ring[0].size; 1524 1525 return 0; 1526 1527err: 1528 vmxnet3_rq_destroy(rq, adapter); 1529 return -ENOMEM; 1530} 1531 1532 1533static int 1534vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) 1535{ 1536 int i, err = 0; 1537 1538 for (i = 0; i < adapter->num_rx_queues; i++) { 1539 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); 1540 if (unlikely(err)) { 1541 dev_err(&adapter->netdev->dev, 1542 "%s: failed to create rx queue%i\n", 1543 adapter->netdev->name, i); 1544 goto err_out; 1545 } 1546 } 1547 return err; 1548err_out: 1549 vmxnet3_rq_destroy_all(adapter); 1550 return err; 1551 1552} 1553 1554/* Multiple queue aware polling function for tx and rx */ 1555 1556static int 1557vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) 1558{ 1559 int rcd_done = 0, i; 1560 if (unlikely(adapter->shared->ecr)) 1561 vmxnet3_process_events(adapter); 1562 for (i = 0; i < adapter->num_tx_queues; i++) 1563 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); 1564 1565 for (i = 0; i < adapter->num_rx_queues; i++) 1566 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], 1567 adapter, budget); 1568 return rcd_done; 1569} 1570 1571 1572static int 1573vmxnet3_poll(struct napi_struct *napi, int budget) 1574{ 1575 struct vmxnet3_rx_queue *rx_queue = container_of(napi, 1576 struct vmxnet3_rx_queue, napi); 1577 int rxd_done; 1578 1579 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); 1580 1581 if (rxd_done < budget) { 1582 napi_complete(napi); 1583 vmxnet3_enable_all_intrs(rx_queue->adapter); 1584 } 1585 return rxd_done; 1586} 1587 1588/* 1589 * NAPI polling function for MSI-X mode with multiple Rx queues 1590 * Returns the # of the NAPI credit consumed (# of rx descriptors processed) 1591 */ 1592 1593static int 1594vmxnet3_poll_rx_only(struct napi_struct *napi, int budget) 1595{ 1596 struct vmxnet3_rx_queue *rq = container_of(napi, 1597 struct vmxnet3_rx_queue, napi); 1598 struct vmxnet3_adapter *adapter = rq->adapter; 1599 int rxd_done; 1600 1601 /* When sharing interrupt with corresponding tx queue, process 1602 * tx completions in that queue as well 1603 */ 1604 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { 1605 struct vmxnet3_tx_queue *tq = 1606 &adapter->tx_queue[rq - adapter->rx_queue]; 1607 vmxnet3_tq_tx_complete(tq, adapter); 1608 } 1609 1610 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); 1611 1612 if (rxd_done < budget) { 1613 napi_complete(napi); 1614 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); 1615 } 1616 return rxd_done; 1617} 1618 1619 1620#ifdef CONFIG_PCI_MSI 1621 1622/* 1623 * Handle completion interrupts on tx queues 1624 * Returns whether or not the intr is handled 1625 */ 1626 1627static irqreturn_t 1628vmxnet3_msix_tx(int irq, void *data) 1629{ 1630 struct vmxnet3_tx_queue *tq = data; 1631 struct vmxnet3_adapter *adapter = tq->adapter; 1632 1633 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1634 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); 1635 1636 /* Handle the case where only one irq is allocate for all tx queues */ 1637 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { 1638 int i; 1639 for (i = 0; i < adapter->num_tx_queues; i++) { 1640 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; 1641 vmxnet3_tq_tx_complete(txq, adapter); 1642 } 1643 } else { 1644 vmxnet3_tq_tx_complete(tq, adapter); 1645 } 1646 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); 1647 1648 return IRQ_HANDLED; 1649} 1650 1651 1652/* 1653 * Handle completion interrupts on rx queues. Returns whether or not the 1654 * intr is handled 1655 */ 1656 1657static irqreturn_t 1658vmxnet3_msix_rx(int irq, void *data) 1659{ 1660 struct vmxnet3_rx_queue *rq = data; 1661 struct vmxnet3_adapter *adapter = rq->adapter; 1662 1663 /* disable intr if needed */ 1664 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1665 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); 1666 napi_schedule(&rq->napi); 1667 1668 return IRQ_HANDLED; 1669} 1670 1671/* 1672 *---------------------------------------------------------------------------- 1673 * 1674 * vmxnet3_msix_event -- 1675 * 1676 * vmxnet3 msix event intr handler 1677 * 1678 * Result: 1679 * whether or not the intr is handled 1680 * 1681 *---------------------------------------------------------------------------- 1682 */ 1683 1684static irqreturn_t 1685vmxnet3_msix_event(int irq, void *data) 1686{ 1687 struct net_device *dev = data; 1688 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1689 1690 /* disable intr if needed */ 1691 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1692 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); 1693 1694 if (adapter->shared->ecr) 1695 vmxnet3_process_events(adapter); 1696 1697 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); 1698 1699 return IRQ_HANDLED; 1700} 1701 1702#endif /* CONFIG_PCI_MSI */ 1703 1704 1705/* Interrupt handler for vmxnet3 */ 1706static irqreturn_t 1707vmxnet3_intr(int irq, void *dev_id) 1708{ 1709 struct net_device *dev = dev_id; 1710 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1711 1712 if (adapter->intr.type == VMXNET3_IT_INTX) { 1713 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 1714 if (unlikely(icr == 0)) 1715 /* not ours */ 1716 return IRQ_NONE; 1717 } 1718 1719 1720 /* disable intr if needed */ 1721 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1722 vmxnet3_disable_all_intrs(adapter); 1723 1724 napi_schedule(&adapter->rx_queue[0].napi); 1725 1726 return IRQ_HANDLED; 1727} 1728 1729#ifdef CONFIG_NET_POLL_CONTROLLER 1730 1731/* netpoll callback. */ 1732static void 1733vmxnet3_netpoll(struct net_device *netdev) 1734{ 1735 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1736 1737 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1738 vmxnet3_disable_all_intrs(adapter); 1739 1740 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); 1741 vmxnet3_enable_all_intrs(adapter); 1742 1743} 1744#endif /* CONFIG_NET_POLL_CONTROLLER */ 1745 1746static int 1747vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) 1748{ 1749 struct vmxnet3_intr *intr = &adapter->intr; 1750 int err = 0, i; 1751 int vector = 0; 1752 1753#ifdef CONFIG_PCI_MSI 1754 if (adapter->intr.type == VMXNET3_IT_MSIX) { 1755 for (i = 0; i < adapter->num_tx_queues; i++) { 1756 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { 1757 sprintf(adapter->tx_queue[i].name, "%s-tx-%d", 1758 adapter->netdev->name, vector); 1759 err = request_irq( 1760 intr->msix_entries[vector].vector, 1761 vmxnet3_msix_tx, 0, 1762 adapter->tx_queue[i].name, 1763 &adapter->tx_queue[i]); 1764 } else { 1765 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", 1766 adapter->netdev->name, vector); 1767 } 1768 if (err) { 1769 dev_err(&adapter->netdev->dev, 1770 "Failed to request irq for MSIX, %s, " 1771 "error %d\n", 1772 adapter->tx_queue[i].name, err); 1773 return err; 1774 } 1775 1776 /* Handle the case where only 1 MSIx was allocated for 1777 * all tx queues */ 1778 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { 1779 for (; i < adapter->num_tx_queues; i++) 1780 adapter->tx_queue[i].comp_ring.intr_idx 1781 = vector; 1782 vector++; 1783 break; 1784 } else { 1785 adapter->tx_queue[i].comp_ring.intr_idx 1786 = vector++; 1787 } 1788 } 1789 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) 1790 vector = 0; 1791 1792 for (i = 0; i < adapter->num_rx_queues; i++) { 1793 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) 1794 sprintf(adapter->rx_queue[i].name, "%s-rx-%d", 1795 adapter->netdev->name, vector); 1796 else 1797 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", 1798 adapter->netdev->name, vector); 1799 err = request_irq(intr->msix_entries[vector].vector, 1800 vmxnet3_msix_rx, 0, 1801 adapter->rx_queue[i].name, 1802 &(adapter->rx_queue[i])); 1803 if (err) { 1804 printk(KERN_ERR "Failed to request irq for MSIX" 1805 ", %s, error %d\n", 1806 adapter->rx_queue[i].name, err); 1807 return err; 1808 } 1809 1810 adapter->rx_queue[i].comp_ring.intr_idx = vector++; 1811 } 1812 1813 sprintf(intr->event_msi_vector_name, "%s-event-%d", 1814 adapter->netdev->name, vector); 1815 err = request_irq(intr->msix_entries[vector].vector, 1816 vmxnet3_msix_event, 0, 1817 intr->event_msi_vector_name, adapter->netdev); 1818 intr->event_intr_idx = vector; 1819 1820 } else if (intr->type == VMXNET3_IT_MSI) { 1821 adapter->num_rx_queues = 1; 1822 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1823 adapter->netdev->name, adapter->netdev); 1824 } else { 1825#endif 1826 adapter->num_rx_queues = 1; 1827 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1828 IRQF_SHARED, adapter->netdev->name, 1829 adapter->netdev); 1830#ifdef CONFIG_PCI_MSI 1831 } 1832#endif 1833 intr->num_intrs = vector + 1; 1834 if (err) { 1835 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" 1836 ":%d\n", adapter->netdev->name, intr->type, err); 1837 } else { 1838 /* Number of rx queues will not change after this */ 1839 for (i = 0; i < adapter->num_rx_queues; i++) { 1840 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 1841 rq->qid = i; 1842 rq->qid2 = i + adapter->num_rx_queues; 1843 } 1844 1845 1846 1847 /* init our intr settings */ 1848 for (i = 0; i < intr->num_intrs; i++) 1849 intr->mod_levels[i] = UPT1_IML_ADAPTIVE; 1850 if (adapter->intr.type != VMXNET3_IT_MSIX) { 1851 adapter->intr.event_intr_idx = 0; 1852 for (i = 0; i < adapter->num_tx_queues; i++) 1853 adapter->tx_queue[i].comp_ring.intr_idx = 0; 1854 adapter->rx_queue[0].comp_ring.intr_idx = 0; 1855 } 1856 1857 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " 1858 "allocated\n", adapter->netdev->name, intr->type, 1859 intr->mask_mode, intr->num_intrs); 1860 } 1861 1862 return err; 1863} 1864 1865 1866static void 1867vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) 1868{ 1869 struct vmxnet3_intr *intr = &adapter->intr; 1870 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0); 1871 1872 switch (intr->type) { 1873#ifdef CONFIG_PCI_MSI 1874 case VMXNET3_IT_MSIX: 1875 { 1876 int i, vector = 0; 1877 1878 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { 1879 for (i = 0; i < adapter->num_tx_queues; i++) { 1880 free_irq(intr->msix_entries[vector++].vector, 1881 &(adapter->tx_queue[i])); 1882 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) 1883 break; 1884 } 1885 } 1886 1887 for (i = 0; i < adapter->num_rx_queues; i++) { 1888 free_irq(intr->msix_entries[vector++].vector, 1889 &(adapter->rx_queue[i])); 1890 } 1891 1892 free_irq(intr->msix_entries[vector].vector, 1893 adapter->netdev); 1894 BUG_ON(vector >= intr->num_intrs); 1895 break; 1896 } 1897#endif 1898 case VMXNET3_IT_MSI: 1899 free_irq(adapter->pdev->irq, adapter->netdev); 1900 break; 1901 case VMXNET3_IT_INTX: 1902 free_irq(adapter->pdev->irq, adapter->netdev); 1903 break; 1904 default: 1905 BUG_ON(true); 1906 } 1907} 1908 1909 1910static void 1911vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) 1912{ 1913 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1914 u16 vid; 1915 1916 /* allow untagged pkts */ 1917 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1918 1919 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1920 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1921} 1922 1923 1924static int 1925vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1926{ 1927 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1928 1929 if (!(netdev->flags & IFF_PROMISC)) { 1930 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1931 unsigned long flags; 1932 1933 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1934 spin_lock_irqsave(&adapter->cmd_lock, flags); 1935 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1936 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1937 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1938 } 1939 1940 set_bit(vid, adapter->active_vlans); 1941 1942 return 0; 1943} 1944 1945 1946static int 1947vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1948{ 1949 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1950 1951 if (!(netdev->flags & IFF_PROMISC)) { 1952 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1953 unsigned long flags; 1954 1955 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1956 spin_lock_irqsave(&adapter->cmd_lock, flags); 1957 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1958 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1959 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 1960 } 1961 1962 clear_bit(vid, adapter->active_vlans); 1963 1964 return 0; 1965} 1966 1967 1968static u8 * 1969vmxnet3_copy_mc(struct net_device *netdev) 1970{ 1971 u8 *buf = NULL; 1972 u32 sz = netdev_mc_count(netdev) * ETH_ALEN; 1973 1974 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ 1975 if (sz <= 0xffff) { 1976 /* We may be called with BH disabled */ 1977 buf = kmalloc(sz, GFP_ATOMIC); 1978 if (buf) { 1979 struct netdev_hw_addr *ha; 1980 int i = 0; 1981 1982 netdev_for_each_mc_addr(ha, netdev) 1983 memcpy(buf + i++ * ETH_ALEN, ha->addr, 1984 ETH_ALEN); 1985 } 1986 } 1987 return buf; 1988} 1989 1990 1991static void 1992vmxnet3_set_mc(struct net_device *netdev) 1993{ 1994 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1995 unsigned long flags; 1996 struct Vmxnet3_RxFilterConf *rxConf = 1997 &adapter->shared->devRead.rxFilterConf; 1998 u8 *new_table = NULL; 1999 u32 new_mode = VMXNET3_RXM_UCAST; 2000 2001 if (netdev->flags & IFF_PROMISC) { 2002 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 2003 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable)); 2004 2005 new_mode |= VMXNET3_RXM_PROMISC; 2006 } else { 2007 vmxnet3_restore_vlan(adapter); 2008 } 2009 2010 if (netdev->flags & IFF_BROADCAST) 2011 new_mode |= VMXNET3_RXM_BCAST; 2012 2013 if (netdev->flags & IFF_ALLMULTI) 2014 new_mode |= VMXNET3_RXM_ALL_MULTI; 2015 else 2016 if (!netdev_mc_empty(netdev)) { 2017 new_table = vmxnet3_copy_mc(netdev); 2018 if (new_table) { 2019 new_mode |= VMXNET3_RXM_MCAST; 2020 rxConf->mfTableLen = cpu_to_le16( 2021 netdev_mc_count(netdev) * ETH_ALEN); 2022 rxConf->mfTablePA = cpu_to_le64(virt_to_phys( 2023 new_table)); 2024 } else { 2025 printk(KERN_INFO "%s: failed to copy mcast list" 2026 ", setting ALL_MULTI\n", netdev->name); 2027 new_mode |= VMXNET3_RXM_ALL_MULTI; 2028 } 2029 } 2030 2031 2032 if (!(new_mode & VMXNET3_RXM_MCAST)) { 2033 rxConf->mfTableLen = 0; 2034 rxConf->mfTablePA = 0; 2035 } 2036 2037 spin_lock_irqsave(&adapter->cmd_lock, flags); 2038 if (new_mode != rxConf->rxMode) { 2039 rxConf->rxMode = cpu_to_le32(new_mode); 2040 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2041 VMXNET3_CMD_UPDATE_RX_MODE); 2042 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2043 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 2044 } 2045 2046 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2047 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2048 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2049 2050 kfree(new_table); 2051} 2052 2053void 2054vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter) 2055{ 2056 int i; 2057 2058 for (i = 0; i < adapter->num_rx_queues; i++) 2059 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); 2060} 2061 2062 2063/* 2064 * Set up driver_shared based on settings in adapter. 2065 */ 2066 2067static void 2068vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) 2069{ 2070 struct Vmxnet3_DriverShared *shared = adapter->shared; 2071 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 2072 struct Vmxnet3_TxQueueConf *tqc; 2073 struct Vmxnet3_RxQueueConf *rqc; 2074 int i; 2075 2076 memset(shared, 0, sizeof(*shared)); 2077 2078 /* driver settings */ 2079 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC); 2080 devRead->misc.driverInfo.version = cpu_to_le32( 2081 VMXNET3_DRIVER_VERSION_NUM); 2082 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 2083 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 2084 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 2085 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32( 2086 *((u32 *)&devRead->misc.driverInfo.gos)); 2087 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); 2088 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); 2089 2090 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); 2091 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); 2092 2093 /* set up feature flags */ 2094 if (adapter->netdev->features & NETIF_F_RXCSUM) 2095 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 2096 2097 if (adapter->netdev->features & NETIF_F_LRO) { 2098 devRead->misc.uptFeatures |= UPT1_F_LRO; 2099 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2100 } 2101 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) 2102 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2103 2104 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2105 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2106 devRead->misc.queueDescLen = cpu_to_le32( 2107 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + 2108 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); 2109 2110 /* tx queue settings */ 2111 devRead->misc.numTxQueues = adapter->num_tx_queues; 2112 for (i = 0; i < adapter->num_tx_queues; i++) { 2113 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 2114 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); 2115 tqc = &adapter->tqd_start[i].conf; 2116 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); 2117 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); 2118 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); 2119 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); 2120 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 2121 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 2122 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 2123 tqc->ddLen = cpu_to_le32( 2124 sizeof(struct vmxnet3_tx_buf_info) * 2125 tqc->txRingSize); 2126 tqc->intrIdx = tq->comp_ring.intr_idx; 2127 } 2128 2129 /* rx queue settings */ 2130 devRead->misc.numRxQueues = adapter->num_rx_queues; 2131 for (i = 0; i < adapter->num_rx_queues; i++) { 2132 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2133 rqc = &adapter->rqd_start[i].conf; 2134 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); 2135 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); 2136 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); 2137 rqc->ddPA = cpu_to_le64(virt_to_phys( 2138 rq->buf_info)); 2139 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); 2140 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); 2141 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); 2142 rqc->ddLen = cpu_to_le32( 2143 sizeof(struct vmxnet3_rx_buf_info) * 2144 (rqc->rxRingSize[0] + 2145 rqc->rxRingSize[1])); 2146 rqc->intrIdx = rq->comp_ring.intr_idx; 2147 } 2148 2149#ifdef VMXNET3_RSS 2150 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); 2151 2152 if (adapter->rss) { 2153 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 2154 devRead->misc.uptFeatures |= UPT1_F_RSS; 2155 devRead->misc.numRxQueues = adapter->num_rx_queues; 2156 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | 2157 UPT1_RSS_HASH_TYPE_IPV4 | 2158 UPT1_RSS_HASH_TYPE_TCP_IPV6 | 2159 UPT1_RSS_HASH_TYPE_IPV6; 2160 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; 2161 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; 2162 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; 2163 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize); 2164 for (i = 0; i < rssConf->indTableSize; i++) 2165 rssConf->indTable[i] = ethtool_rxfh_indir_default( 2166 i, adapter->num_rx_queues); 2167 2168 devRead->rssConfDesc.confVer = 1; 2169 devRead->rssConfDesc.confLen = sizeof(*rssConf); 2170 devRead->rssConfDesc.confPA = virt_to_phys(rssConf); 2171 } 2172 2173#endif /* VMXNET3_RSS */ 2174 2175 /* intr settings */ 2176 devRead->intrConf.autoMask = adapter->intr.mask_mode == 2177 VMXNET3_IMM_AUTO; 2178 devRead->intrConf.numIntrs = adapter->intr.num_intrs; 2179 for (i = 0; i < adapter->intr.num_intrs; i++) 2180 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; 2181 2182 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; 2183 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); 2184 2185 /* rx filter settings */ 2186 devRead->rxFilterConf.rxMode = 0; 2187 vmxnet3_restore_vlan(adapter); 2188 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); 2189 2190 /* the rest are already zeroed */ 2191} 2192 2193 2194int 2195vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 2196{ 2197 int err, i; 2198 u32 ret; 2199 unsigned long flags; 2200 2201 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2202 " ring sizes %u %u %u\n", adapter->netdev->name, 2203 adapter->skb_buf_size, adapter->rx_buf_per_pkt, 2204 adapter->tx_queue[0].tx_ring.size, 2205 adapter->rx_queue[0].rx_ring[0].size, 2206 adapter->rx_queue[0].rx_ring[1].size); 2207 2208 vmxnet3_tq_init_all(adapter); 2209 err = vmxnet3_rq_init_all(adapter); 2210 if (err) { 2211 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", 2212 adapter->netdev->name, err); 2213 goto rq_err; 2214 } 2215 2216 err = vmxnet3_request_irqs(adapter); 2217 if (err) { 2218 printk(KERN_ERR "Failed to setup irq for %s: error %d\n", 2219 adapter->netdev->name, err); 2220 goto irq_err; 2221 } 2222 2223 vmxnet3_setup_driver_shared(adapter); 2224 2225 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO( 2226 adapter->shared_pa)); 2227 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2228 adapter->shared_pa)); 2229 spin_lock_irqsave(&adapter->cmd_lock, flags); 2230 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2231 VMXNET3_CMD_ACTIVATE_DEV); 2232 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2233 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2234 2235 if (ret != 0) { 2236 printk(KERN_ERR "Failed to activate dev %s: error %u\n", 2237 adapter->netdev->name, ret); 2238 err = -EINVAL; 2239 goto activate_err; 2240 } 2241 2242 for (i = 0; i < adapter->num_rx_queues; i++) { 2243 VMXNET3_WRITE_BAR0_REG(adapter, 2244 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN, 2245 adapter->rx_queue[i].rx_ring[0].next2fill); 2246 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 + 2247 (i * VMXNET3_REG_ALIGN)), 2248 adapter->rx_queue[i].rx_ring[1].next2fill); 2249 } 2250 2251 /* Apply the rx filter settins last. */ 2252 vmxnet3_set_mc(adapter->netdev); 2253 2254 /* 2255 * Check link state when first activating device. It will start the 2256 * tx queue if the link is up. 2257 */ 2258 vmxnet3_check_link(adapter, true); 2259 for (i = 0; i < adapter->num_rx_queues; i++) 2260 napi_enable(&adapter->rx_queue[i].napi); 2261 vmxnet3_enable_all_intrs(adapter); 2262 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2263 return 0; 2264 2265activate_err: 2266 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); 2267 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); 2268 vmxnet3_free_irqs(adapter); 2269irq_err: 2270rq_err: 2271 /* free up buffers we allocated */ 2272 vmxnet3_rq_cleanup_all(adapter); 2273 return err; 2274} 2275 2276 2277void 2278vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2279{ 2280 unsigned long flags; 2281 spin_lock_irqsave(&adapter->cmd_lock, flags); 2282 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 2283 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2284} 2285 2286 2287int 2288vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2289{ 2290 int i; 2291 unsigned long flags; 2292 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2293 return 0; 2294 2295 2296 spin_lock_irqsave(&adapter->cmd_lock, flags); 2297 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2298 VMXNET3_CMD_QUIESCE_DEV); 2299 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2300 vmxnet3_disable_all_intrs(adapter); 2301 2302 for (i = 0; i < adapter->num_rx_queues; i++) 2303 napi_disable(&adapter->rx_queue[i].napi); 2304 netif_tx_disable(adapter->netdev); 2305 adapter->link_speed = 0; 2306 netif_carrier_off(adapter->netdev); 2307 2308 vmxnet3_tq_cleanup_all(adapter); 2309 vmxnet3_rq_cleanup_all(adapter); 2310 vmxnet3_free_irqs(adapter); 2311 return 0; 2312} 2313 2314 2315static void 2316vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 2317{ 2318 u32 tmp; 2319 2320 tmp = *(u32 *)mac; 2321 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); 2322 2323 tmp = (mac[5] << 8) | mac[4]; 2324 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); 2325} 2326 2327 2328static int 2329vmxnet3_set_mac_addr(struct net_device *netdev, void *p) 2330{ 2331 struct sockaddr *addr = p; 2332 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2333 2334 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2335 vmxnet3_write_mac_addr(adapter, addr->sa_data); 2336 2337 return 0; 2338} 2339 2340 2341/* ==================== initialization and cleanup routines ============ */ 2342 2343static int 2344vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) 2345{ 2346 int err; 2347 unsigned long mmio_start, mmio_len; 2348 struct pci_dev *pdev = adapter->pdev; 2349 2350 err = pci_enable_device(pdev); 2351 if (err) { 2352 printk(KERN_ERR "Failed to enable adapter %s: error %d\n", 2353 pci_name(pdev), err); 2354 return err; 2355 } 2356 2357 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 2358 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 2359 printk(KERN_ERR "pci_set_consistent_dma_mask failed " 2360 "for adapter %s\n", pci_name(pdev)); 2361 err = -EIO; 2362 goto err_set_mask; 2363 } 2364 *dma64 = true; 2365 } else { 2366 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 2367 printk(KERN_ERR "pci_set_dma_mask failed for adapter " 2368 "%s\n", pci_name(pdev)); 2369 err = -EIO; 2370 goto err_set_mask; 2371 } 2372 *dma64 = false; 2373 } 2374 2375 err = pci_request_selected_regions(pdev, (1 << 2) - 1, 2376 vmxnet3_driver_name); 2377 if (err) { 2378 printk(KERN_ERR "Failed to request region for adapter %s: " 2379 "error %d\n", pci_name(pdev), err); 2380 goto err_set_mask; 2381 } 2382 2383 pci_set_master(pdev); 2384 2385 mmio_start = pci_resource_start(pdev, 0); 2386 mmio_len = pci_resource_len(pdev, 0); 2387 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); 2388 if (!adapter->hw_addr0) { 2389 printk(KERN_ERR "Failed to map bar0 for adapter %s\n", 2390 pci_name(pdev)); 2391 err = -EIO; 2392 goto err_ioremap; 2393 } 2394 2395 mmio_start = pci_resource_start(pdev, 1); 2396 mmio_len = pci_resource_len(pdev, 1); 2397 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); 2398 if (!adapter->hw_addr1) { 2399 printk(KERN_ERR "Failed to map bar1 for adapter %s\n", 2400 pci_name(pdev)); 2401 err = -EIO; 2402 goto err_bar1; 2403 } 2404 return 0; 2405 2406err_bar1: 2407 iounmap(adapter->hw_addr0); 2408err_ioremap: 2409 pci_release_selected_regions(pdev, (1 << 2) - 1); 2410err_set_mask: 2411 pci_disable_device(pdev); 2412 return err; 2413} 2414 2415 2416static void 2417vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) 2418{ 2419 BUG_ON(!adapter->pdev); 2420 2421 iounmap(adapter->hw_addr0); 2422 iounmap(adapter->hw_addr1); 2423 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); 2424 pci_disable_device(adapter->pdev); 2425} 2426 2427 2428static void 2429vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) 2430{ 2431 size_t sz, i, ring0_size, ring1_size, comp_size; 2432 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0]; 2433 2434 2435 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - 2436 VMXNET3_MAX_ETH_HDR_SIZE) { 2437 adapter->skb_buf_size = adapter->netdev->mtu + 2438 VMXNET3_MAX_ETH_HDR_SIZE; 2439 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) 2440 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; 2441 2442 adapter->rx_buf_per_pkt = 1; 2443 } else { 2444 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; 2445 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + 2446 VMXNET3_MAX_ETH_HDR_SIZE; 2447 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; 2448 } 2449 2450 /* 2451 * for simplicity, force the ring0 size to be a multiple of 2452 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 2453 */ 2454 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2455 ring0_size = adapter->rx_queue[0].rx_ring[0].size; 2456 ring0_size = (ring0_size + sz - 1) / sz * sz; 2457 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / 2458 sz * sz); 2459 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2460 comp_size = ring0_size + ring1_size; 2461 2462 for (i = 0; i < adapter->num_rx_queues; i++) { 2463 rq = &adapter->rx_queue[i]; 2464 rq->rx_ring[0].size = ring0_size; 2465 rq->rx_ring[1].size = ring1_size; 2466 rq->comp_ring.size = comp_size; 2467 } 2468} 2469 2470 2471int 2472vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2473 u32 rx_ring_size, u32 rx_ring2_size) 2474{ 2475 int err = 0, i; 2476 2477 for (i = 0; i < adapter->num_tx_queues; i++) { 2478 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 2479 tq->tx_ring.size = tx_ring_size; 2480 tq->data_ring.size = tx_ring_size; 2481 tq->comp_ring.size = tx_ring_size; 2482 tq->shared = &adapter->tqd_start[i].ctrl; 2483 tq->stopped = true; 2484 tq->adapter = adapter; 2485 tq->qid = i; 2486 err = vmxnet3_tq_create(tq, adapter); 2487 /* 2488 * Too late to change num_tx_queues. We cannot do away with 2489 * lesser number of queues than what we asked for 2490 */ 2491 if (err) 2492 goto queue_err; 2493 } 2494 2495 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; 2496 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; 2497 vmxnet3_adjust_rx_ring_size(adapter); 2498 for (i = 0; i < adapter->num_rx_queues; i++) { 2499 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2500 /* qid and qid2 for rx queues will be assigned later when num 2501 * of rx queues is finalized after allocating intrs */ 2502 rq->shared = &adapter->rqd_start[i].ctrl; 2503 rq->adapter = adapter; 2504 err = vmxnet3_rq_create(rq, adapter); 2505 if (err) { 2506 if (i == 0) { 2507 printk(KERN_ERR "Could not allocate any rx" 2508 "queues. Aborting.\n"); 2509 goto queue_err; 2510 } else { 2511 printk(KERN_INFO "Number of rx queues changed " 2512 "to : %d.\n", i); 2513 adapter->num_rx_queues = i; 2514 err = 0; 2515 break; 2516 } 2517 } 2518 } 2519 return err; 2520queue_err: 2521 vmxnet3_tq_destroy_all(adapter); 2522 return err; 2523} 2524 2525static int 2526vmxnet3_open(struct net_device *netdev) 2527{ 2528 struct vmxnet3_adapter *adapter; 2529 int err, i; 2530 2531 adapter = netdev_priv(netdev); 2532 2533 for (i = 0; i < adapter->num_tx_queues; i++) 2534 spin_lock_init(&adapter->tx_queue[i].tx_lock); 2535 2536 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, 2537 VMXNET3_DEF_RX_RING_SIZE, 2538 VMXNET3_DEF_RX_RING_SIZE); 2539 if (err) 2540 goto queue_err; 2541 2542 err = vmxnet3_activate_dev(adapter); 2543 if (err) 2544 goto activate_err; 2545 2546 return 0; 2547 2548activate_err: 2549 vmxnet3_rq_destroy_all(adapter); 2550 vmxnet3_tq_destroy_all(adapter); 2551queue_err: 2552 return err; 2553} 2554 2555 2556static int 2557vmxnet3_close(struct net_device *netdev) 2558{ 2559 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2560 2561 /* 2562 * Reset_work may be in the middle of resetting the device, wait for its 2563 * completion. 2564 */ 2565 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2566 msleep(1); 2567 2568 vmxnet3_quiesce_dev(adapter); 2569 2570 vmxnet3_rq_destroy_all(adapter); 2571 vmxnet3_tq_destroy_all(adapter); 2572 2573 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2574 2575 2576 return 0; 2577} 2578 2579 2580void 2581vmxnet3_force_close(struct vmxnet3_adapter *adapter) 2582{ 2583 int i; 2584 2585 /* 2586 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise 2587 * vmxnet3_close() will deadlock. 2588 */ 2589 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); 2590 2591 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2592 for (i = 0; i < adapter->num_rx_queues; i++) 2593 napi_enable(&adapter->rx_queue[i].napi); 2594 dev_close(adapter->netdev); 2595} 2596 2597 2598static int 2599vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) 2600{ 2601 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2602 int err = 0; 2603 2604 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) 2605 return -EINVAL; 2606 2607 netdev->mtu = new_mtu; 2608 2609 /* 2610 * Reset_work may be in the middle of resetting the device, wait for its 2611 * completion. 2612 */ 2613 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2614 msleep(1); 2615 2616 if (netif_running(netdev)) { 2617 vmxnet3_quiesce_dev(adapter); 2618 vmxnet3_reset_dev(adapter); 2619 2620 /* we need to re-create the rx queue based on the new mtu */ 2621 vmxnet3_rq_destroy_all(adapter); 2622 vmxnet3_adjust_rx_ring_size(adapter); 2623 err = vmxnet3_rq_create_all(adapter); 2624 if (err) { 2625 printk(KERN_ERR "%s: failed to re-create rx queues," 2626 " error %d. Closing it.\n", netdev->name, err); 2627 goto out; 2628 } 2629 2630 err = vmxnet3_activate_dev(adapter); 2631 if (err) { 2632 printk(KERN_ERR "%s: failed to re-activate, error %d. " 2633 "Closing it\n", netdev->name, err); 2634 goto out; 2635 } 2636 } 2637 2638out: 2639 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2640 if (err) 2641 vmxnet3_force_close(adapter); 2642 2643 return err; 2644} 2645 2646 2647static void 2648vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) 2649{ 2650 struct net_device *netdev = adapter->netdev; 2651 2652 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 2653 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | 2654 NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 | 2655 NETIF_F_LRO; 2656 if (dma64) 2657 netdev->hw_features |= NETIF_F_HIGHDMA; 2658 netdev->vlan_features = netdev->hw_features & 2659 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 2660 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER; 2661 2662 netdev_info(adapter->netdev, 2663 "features: sg csum vlan jf tso tsoIPv6 lro%s\n", 2664 dma64 ? " highDMA" : ""); 2665} 2666 2667 2668static void 2669vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 2670{ 2671 u32 tmp; 2672 2673 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); 2674 *(u32 *)mac = tmp; 2675 2676 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); 2677 mac[4] = tmp & 0xff; 2678 mac[5] = (tmp >> 8) & 0xff; 2679} 2680 2681#ifdef CONFIG_PCI_MSI 2682 2683/* 2684 * Enable MSIx vectors. 2685 * Returns : 2686 * 0 on successful enabling of required vectors, 2687 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required 2688 * could be enabled. 2689 * number of vectors which can be enabled otherwise (this number is smaller 2690 * than VMXNET3_LINUX_MIN_MSIX_VECT) 2691 */ 2692 2693static int 2694vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, 2695 int vectors) 2696{ 2697 int err = 0, vector_threshold; 2698 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT; 2699 2700 while (vectors >= vector_threshold) { 2701 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2702 vectors); 2703 if (!err) { 2704 adapter->intr.num_intrs = vectors; 2705 return 0; 2706 } else if (err < 0) { 2707 netdev_err(adapter->netdev, 2708 "Failed to enable MSI-X, error: %d\n", err); 2709 vectors = 0; 2710 } else if (err < vector_threshold) { 2711 break; 2712 } else { 2713 /* If fails to enable required number of MSI-x vectors 2714 * try enabling minimum number of vectors required. 2715 */ 2716 netdev_err(adapter->netdev, 2717 "Failed to enable %d MSI-X, trying %d instead\n", 2718 vectors, vector_threshold); 2719 vectors = vector_threshold; 2720 } 2721 } 2722 2723 netdev_info(adapter->netdev, 2724 "Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n"); 2725 return err; 2726} 2727 2728 2729#endif /* CONFIG_PCI_MSI */ 2730 2731static void 2732vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2733{ 2734 u32 cfg; 2735 unsigned long flags; 2736 2737 /* intr settings */ 2738 spin_lock_irqsave(&adapter->cmd_lock, flags); 2739 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2740 VMXNET3_CMD_GET_CONF_INTR); 2741 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2742 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2743 adapter->intr.type = cfg & 0x3; 2744 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2745 2746 if (adapter->intr.type == VMXNET3_IT_AUTO) { 2747 adapter->intr.type = VMXNET3_IT_MSIX; 2748 } 2749 2750#ifdef CONFIG_PCI_MSI 2751 if (adapter->intr.type == VMXNET3_IT_MSIX) { 2752 int vector, err = 0; 2753 2754 adapter->intr.num_intrs = (adapter->share_intr == 2755 VMXNET3_INTR_TXSHARE) ? 1 : 2756 adapter->num_tx_queues; 2757 adapter->intr.num_intrs += (adapter->share_intr == 2758 VMXNET3_INTR_BUDDYSHARE) ? 0 : 2759 adapter->num_rx_queues; 2760 adapter->intr.num_intrs += 1; /* for link event */ 2761 2762 adapter->intr.num_intrs = (adapter->intr.num_intrs > 2763 VMXNET3_LINUX_MIN_MSIX_VECT 2764 ? adapter->intr.num_intrs : 2765 VMXNET3_LINUX_MIN_MSIX_VECT); 2766 2767 for (vector = 0; vector < adapter->intr.num_intrs; vector++) 2768 adapter->intr.msix_entries[vector].entry = vector; 2769 2770 err = vmxnet3_acquire_msix_vectors(adapter, 2771 adapter->intr.num_intrs); 2772 /* If we cannot allocate one MSIx vector per queue 2773 * then limit the number of rx queues to 1 2774 */ 2775 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { 2776 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 2777 || adapter->num_rx_queues != 1) { 2778 adapter->share_intr = VMXNET3_INTR_TXSHARE; 2779 printk(KERN_ERR "Number of rx queues : 1\n"); 2780 adapter->num_rx_queues = 1; 2781 adapter->intr.num_intrs = 2782 VMXNET3_LINUX_MIN_MSIX_VECT; 2783 } 2784 return; 2785 } 2786 if (!err) 2787 return; 2788 2789 /* If we cannot allocate MSIx vectors use only one rx queue */ 2790 netdev_info(adapter->netdev, 2791 "Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n", 2792 err); 2793 2794 adapter->intr.type = VMXNET3_IT_MSI; 2795 } 2796 2797 if (adapter->intr.type == VMXNET3_IT_MSI) { 2798 int err; 2799 err = pci_enable_msi(adapter->pdev); 2800 if (!err) { 2801 adapter->num_rx_queues = 1; 2802 adapter->intr.num_intrs = 1; 2803 return; 2804 } 2805 } 2806#endif /* CONFIG_PCI_MSI */ 2807 2808 adapter->num_rx_queues = 1; 2809 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n"); 2810 adapter->intr.type = VMXNET3_IT_INTX; 2811 2812 /* INT-X related setting */ 2813 adapter->intr.num_intrs = 1; 2814} 2815 2816 2817static void 2818vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) 2819{ 2820 if (adapter->intr.type == VMXNET3_IT_MSIX) 2821 pci_disable_msix(adapter->pdev); 2822 else if (adapter->intr.type == VMXNET3_IT_MSI) 2823 pci_disable_msi(adapter->pdev); 2824 else 2825 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); 2826} 2827 2828 2829static void 2830vmxnet3_tx_timeout(struct net_device *netdev) 2831{ 2832 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2833 adapter->tx_timeout_count++; 2834 2835 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); 2836 schedule_work(&adapter->work); 2837 netif_wake_queue(adapter->netdev); 2838} 2839 2840 2841static void 2842vmxnet3_reset_work(struct work_struct *data) 2843{ 2844 struct vmxnet3_adapter *adapter; 2845 2846 adapter = container_of(data, struct vmxnet3_adapter, work); 2847 2848 /* if another thread is resetting the device, no need to proceed */ 2849 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2850 return; 2851 2852 /* if the device is closed, we must leave it alone */ 2853 rtnl_lock(); 2854 if (netif_running(adapter->netdev)) { 2855 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); 2856 vmxnet3_quiesce_dev(adapter); 2857 vmxnet3_reset_dev(adapter); 2858 vmxnet3_activate_dev(adapter); 2859 } else { 2860 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); 2861 } 2862 rtnl_unlock(); 2863 2864 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2865} 2866 2867 2868static int __devinit 2869vmxnet3_probe_device(struct pci_dev *pdev, 2870 const struct pci_device_id *id) 2871{ 2872 static const struct net_device_ops vmxnet3_netdev_ops = { 2873 .ndo_open = vmxnet3_open, 2874 .ndo_stop = vmxnet3_close, 2875 .ndo_start_xmit = vmxnet3_xmit_frame, 2876 .ndo_set_mac_address = vmxnet3_set_mac_addr, 2877 .ndo_change_mtu = vmxnet3_change_mtu, 2878 .ndo_set_features = vmxnet3_set_features, 2879 .ndo_get_stats64 = vmxnet3_get_stats64, 2880 .ndo_tx_timeout = vmxnet3_tx_timeout, 2881 .ndo_set_rx_mode = vmxnet3_set_mc, 2882 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid, 2883 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid, 2884#ifdef CONFIG_NET_POLL_CONTROLLER 2885 .ndo_poll_controller = vmxnet3_netpoll, 2886#endif 2887 }; 2888 int err; 2889 bool dma64 = false; /* stupid gcc */ 2890 u32 ver; 2891 struct net_device *netdev; 2892 struct vmxnet3_adapter *adapter; 2893 u8 mac[ETH_ALEN]; 2894 int size; 2895 int num_tx_queues; 2896 int num_rx_queues; 2897 2898 if (!pci_msi_enabled()) 2899 enable_mq = 0; 2900 2901#ifdef VMXNET3_RSS 2902 if (enable_mq) 2903 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, 2904 (int)num_online_cpus()); 2905 else 2906#endif 2907 num_rx_queues = 1; 2908 num_rx_queues = rounddown_pow_of_two(num_rx_queues); 2909 2910 if (enable_mq) 2911 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES, 2912 (int)num_online_cpus()); 2913 else 2914 num_tx_queues = 1; 2915 2916 num_tx_queues = rounddown_pow_of_two(num_tx_queues); 2917 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), 2918 max(num_tx_queues, num_rx_queues)); 2919 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n", 2920 num_tx_queues, num_rx_queues); 2921 2922 if (!netdev) 2923 return -ENOMEM; 2924 2925 pci_set_drvdata(pdev, netdev); 2926 adapter = netdev_priv(netdev); 2927 adapter->netdev = netdev; 2928 adapter->pdev = pdev; 2929 2930 spin_lock_init(&adapter->cmd_lock); 2931 adapter->shared = pci_alloc_consistent(adapter->pdev, 2932 sizeof(struct Vmxnet3_DriverShared), 2933 &adapter->shared_pa); 2934 if (!adapter->shared) { 2935 printk(KERN_ERR "Failed to allocate memory for %s\n", 2936 pci_name(pdev)); 2937 err = -ENOMEM; 2938 goto err_alloc_shared; 2939 } 2940 2941 adapter->num_rx_queues = num_rx_queues; 2942 adapter->num_tx_queues = num_tx_queues; 2943 2944 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 2945 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 2946 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, 2947 &adapter->queue_desc_pa); 2948 2949 if (!adapter->tqd_start) { 2950 printk(KERN_ERR "Failed to allocate memory for %s\n", 2951 pci_name(pdev)); 2952 err = -ENOMEM; 2953 goto err_alloc_queue_desc; 2954 } 2955 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + 2956 adapter->num_tx_queues); 2957 2958 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 2959 if (adapter->pm_conf == NULL) { 2960 err = -ENOMEM; 2961 goto err_alloc_pm; 2962 } 2963 2964#ifdef VMXNET3_RSS 2965 2966 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); 2967 if (adapter->rss_conf == NULL) { 2968 err = -ENOMEM; 2969 goto err_alloc_rss; 2970 } 2971#endif /* VMXNET3_RSS */ 2972 2973 err = vmxnet3_alloc_pci_resources(adapter, &dma64); 2974 if (err < 0) 2975 goto err_alloc_pci; 2976 2977 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); 2978 if (ver & 1) { 2979 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); 2980 } else { 2981 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" 2982 " %s\n", ver, pci_name(pdev)); 2983 err = -EBUSY; 2984 goto err_ver; 2985 } 2986 2987 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); 2988 if (ver & 1) { 2989 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); 2990 } else { 2991 printk(KERN_ERR "Incompatible upt version (0x%x) for " 2992 "adapter %s\n", ver, pci_name(pdev)); 2993 err = -EBUSY; 2994 goto err_ver; 2995 } 2996 2997 SET_NETDEV_DEV(netdev, &pdev->dev); 2998 vmxnet3_declare_features(adapter, dma64); 2999 3000 adapter->dev_number = atomic_read(&devices_found); 3001 3002 adapter->share_intr = irq_share_mode; 3003 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE && 3004 adapter->num_tx_queues != adapter->num_rx_queues) 3005 adapter->share_intr = VMXNET3_INTR_DONTSHARE; 3006 3007 vmxnet3_alloc_intr_resources(adapter); 3008 3009#ifdef VMXNET3_RSS 3010 if (adapter->num_rx_queues > 1 && 3011 adapter->intr.type == VMXNET3_IT_MSIX) { 3012 adapter->rss = true; 3013 printk(KERN_INFO "RSS is enabled.\n"); 3014 } else { 3015 adapter->rss = false; 3016 } 3017#endif 3018 3019 vmxnet3_read_mac_addr(adapter, mac); 3020 memcpy(netdev->dev_addr, mac, netdev->addr_len); 3021 3022 netdev->netdev_ops = &vmxnet3_netdev_ops; 3023 vmxnet3_set_ethtool_ops(netdev); 3024 netdev->watchdog_timeo = 5 * HZ; 3025 3026 INIT_WORK(&adapter->work, vmxnet3_reset_work); 3027 3028 if (adapter->intr.type == VMXNET3_IT_MSIX) { 3029 int i; 3030 for (i = 0; i < adapter->num_rx_queues; i++) { 3031 netif_napi_add(adapter->netdev, 3032 &adapter->rx_queue[i].napi, 3033 vmxnet3_poll_rx_only, 64); 3034 } 3035 } else { 3036 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, 3037 vmxnet3_poll, 64); 3038 } 3039 3040 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 3041 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); 3042 3043 err = register_netdev(netdev); 3044 3045 if (err) { 3046 printk(KERN_ERR "Failed to register adapter %s\n", 3047 pci_name(pdev)); 3048 goto err_register; 3049 } 3050 3051 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 3052 vmxnet3_check_link(adapter, false); 3053 atomic_inc(&devices_found); 3054 return 0; 3055 3056err_register: 3057 vmxnet3_free_intr_resources(adapter); 3058err_ver: 3059 vmxnet3_free_pci_resources(adapter); 3060err_alloc_pci: 3061#ifdef VMXNET3_RSS 3062 kfree(adapter->rss_conf); 3063err_alloc_rss: 3064#endif 3065 kfree(adapter->pm_conf); 3066err_alloc_pm: 3067 pci_free_consistent(adapter->pdev, size, adapter->tqd_start, 3068 adapter->queue_desc_pa); 3069err_alloc_queue_desc: 3070 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3071 adapter->shared, adapter->shared_pa); 3072err_alloc_shared: 3073 pci_set_drvdata(pdev, NULL); 3074 free_netdev(netdev); 3075 return err; 3076} 3077 3078 3079static void __devexit 3080vmxnet3_remove_device(struct pci_dev *pdev) 3081{ 3082 struct net_device *netdev = pci_get_drvdata(pdev); 3083 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3084 int size = 0; 3085 int num_rx_queues; 3086 3087#ifdef VMXNET3_RSS 3088 if (enable_mq) 3089 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, 3090 (int)num_online_cpus()); 3091 else 3092#endif 3093 num_rx_queues = 1; 3094 num_rx_queues = rounddown_pow_of_two(num_rx_queues); 3095 3096 cancel_work_sync(&adapter->work); 3097 3098 unregister_netdev(netdev); 3099 3100 vmxnet3_free_intr_resources(adapter); 3101 vmxnet3_free_pci_resources(adapter); 3102#ifdef VMXNET3_RSS 3103 kfree(adapter->rss_conf); 3104#endif 3105 kfree(adapter->pm_conf); 3106 3107 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 3108 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; 3109 pci_free_consistent(adapter->pdev, size, adapter->tqd_start, 3110 adapter->queue_desc_pa); 3111 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3112 adapter->shared, adapter->shared_pa); 3113 free_netdev(netdev); 3114} 3115 3116 3117#ifdef CONFIG_PM 3118 3119static int 3120vmxnet3_suspend(struct device *device) 3121{ 3122 struct pci_dev *pdev = to_pci_dev(device); 3123 struct net_device *netdev = pci_get_drvdata(pdev); 3124 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3125 struct Vmxnet3_PMConf *pmConf; 3126 struct ethhdr *ehdr; 3127 struct arphdr *ahdr; 3128 u8 *arpreq; 3129 struct in_device *in_dev; 3130 struct in_ifaddr *ifa; 3131 unsigned long flags; 3132 int i = 0; 3133 3134 if (!netif_running(netdev)) 3135 return 0; 3136 3137 for (i = 0; i < adapter->num_rx_queues; i++) 3138 napi_disable(&adapter->rx_queue[i].napi); 3139 3140 vmxnet3_disable_all_intrs(adapter); 3141 vmxnet3_free_irqs(adapter); 3142 vmxnet3_free_intr_resources(adapter); 3143 3144 netif_device_detach(netdev); 3145 netif_tx_stop_all_queues(netdev); 3146 3147 /* Create wake-up filters. */ 3148 pmConf = adapter->pm_conf; 3149 memset(pmConf, 0, sizeof(*pmConf)); 3150 3151 if (adapter->wol & WAKE_UCAST) { 3152 pmConf->filters[i].patternSize = ETH_ALEN; 3153 pmConf->filters[i].maskSize = 1; 3154 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 3155 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 3156 3157 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 3158 i++; 3159 } 3160 3161 if (adapter->wol & WAKE_ARP) { 3162 in_dev = in_dev_get(netdev); 3163 if (!in_dev) 3164 goto skip_arp; 3165 3166 ifa = (struct in_ifaddr *)in_dev->ifa_list; 3167 if (!ifa) 3168 goto skip_arp; 3169 3170 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ 3171 sizeof(struct arphdr) + /* ARP header */ 3172 2 * ETH_ALEN + /* 2 Ethernet addresses*/ 3173 2 * sizeof(u32); /*2 IPv4 addresses */ 3174 pmConf->filters[i].maskSize = 3175 (pmConf->filters[i].patternSize - 1) / 8 + 1; 3176 3177 /* ETH_P_ARP in Ethernet header. */ 3178 ehdr = (struct ethhdr *)pmConf->filters[i].pattern; 3179 ehdr->h_proto = htons(ETH_P_ARP); 3180 3181 /* ARPOP_REQUEST in ARP header. */ 3182 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; 3183 ahdr->ar_op = htons(ARPOP_REQUEST); 3184 arpreq = (u8 *)(ahdr + 1); 3185 3186 /* The Unicast IPv4 address in 'tip' field. */ 3187 arpreq += 2 * ETH_ALEN + sizeof(u32); 3188 *(u32 *)arpreq = ifa->ifa_address; 3189 3190 /* The mask for the relevant bits. */ 3191 pmConf->filters[i].mask[0] = 0x00; 3192 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ 3193 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ 3194 pmConf->filters[i].mask[3] = 0x00; 3195 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ 3196 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 3197 in_dev_put(in_dev); 3198 3199 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 3200 i++; 3201 } 3202 3203skip_arp: 3204 if (adapter->wol & WAKE_MAGIC) 3205 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 3206 3207 pmConf->numFilters = i; 3208 3209 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3210 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3211 *pmConf)); 3212 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3213 pmConf)); 3214 3215 spin_lock_irqsave(&adapter->cmd_lock, flags); 3216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3217 VMXNET3_CMD_UPDATE_PMCFG); 3218 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 3219 3220 pci_save_state(pdev); 3221 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 3222 adapter->wol); 3223 pci_disable_device(pdev); 3224 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND)); 3225 3226 return 0; 3227} 3228 3229 3230static int 3231vmxnet3_resume(struct device *device) 3232{ 3233 int err, i = 0; 3234 unsigned long flags; 3235 struct pci_dev *pdev = to_pci_dev(device); 3236 struct net_device *netdev = pci_get_drvdata(pdev); 3237 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3238 struct Vmxnet3_PMConf *pmConf; 3239 3240 if (!netif_running(netdev)) 3241 return 0; 3242 3243 /* Destroy wake-up filters. */ 3244 pmConf = adapter->pm_conf; 3245 memset(pmConf, 0, sizeof(*pmConf)); 3246 3247 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3248 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3249 *pmConf)); 3250 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3251 pmConf)); 3252 3253 netif_device_attach(netdev); 3254 pci_set_power_state(pdev, PCI_D0); 3255 pci_restore_state(pdev); 3256 err = pci_enable_device_mem(pdev); 3257 if (err != 0) 3258 return err; 3259 3260 pci_enable_wake(pdev, PCI_D0, 0); 3261 3262 spin_lock_irqsave(&adapter->cmd_lock, flags); 3263 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3264 VMXNET3_CMD_UPDATE_PMCFG); 3265 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 3266 vmxnet3_alloc_intr_resources(adapter); 3267 vmxnet3_request_irqs(adapter); 3268 for (i = 0; i < adapter->num_rx_queues; i++) 3269 napi_enable(&adapter->rx_queue[i].napi); 3270 vmxnet3_enable_all_intrs(adapter); 3271 3272 return 0; 3273} 3274 3275static const struct dev_pm_ops vmxnet3_pm_ops = { 3276 .suspend = vmxnet3_suspend, 3277 .resume = vmxnet3_resume, 3278}; 3279#endif 3280 3281static struct pci_driver vmxnet3_driver = { 3282 .name = vmxnet3_driver_name, 3283 .id_table = vmxnet3_pciid_table, 3284 .probe = vmxnet3_probe_device, 3285 .remove = __devexit_p(vmxnet3_remove_device), 3286#ifdef CONFIG_PM 3287 .driver.pm = &vmxnet3_pm_ops, 3288#endif 3289}; 3290 3291 3292static int __init 3293vmxnet3_init_module(void) 3294{ 3295 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, 3296 VMXNET3_DRIVER_VERSION_REPORT); 3297 return pci_register_driver(&vmxnet3_driver); 3298} 3299 3300module_init(vmxnet3_init_module); 3301 3302 3303static void 3304vmxnet3_exit_module(void) 3305{ 3306 pci_unregister_driver(&vmxnet3_driver); 3307} 3308 3309module_exit(vmxnet3_exit_module); 3310 3311MODULE_AUTHOR("VMware, Inc."); 3312MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC); 3313MODULE_LICENSE("GPL v2"); 3314MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING); 3315