vmxnet3_drv.c revision 0d0b16727f24f8258eeb33818347ca0f4557f982
1/* 2 * Linux driver for VMware's vmxnet3 ethernet NIC. 3 * 4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more 14 * details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 * 23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 24 * 25 */ 26 27#include <net/ip6_checksum.h> 28 29#include "vmxnet3_int.h" 30 31char vmxnet3_driver_name[] = "vmxnet3"; 32#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 33 34/* 35 * PCI Device ID Table 36 * Last entry must be all 0s 37 */ 38static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = { 39 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, 40 {0} 41}; 42 43MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); 44 45static atomic_t devices_found; 46 47#define VMXNET3_MAX_DEVICES 10 48static int enable_mq = 1; 49static int irq_share_mode; 50 51/* 52 * Enable/Disable the given intr 53 */ 54static void 55vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 56{ 57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); 58} 59 60 61static void 62vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 63{ 64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); 65} 66 67 68/* 69 * Enable/Disable all intrs used by the device 70 */ 71static void 72vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) 73{ 74 int i; 75 76 for (i = 0; i < adapter->intr.num_intrs; i++) 77 vmxnet3_enable_intr(adapter, i); 78 adapter->shared->devRead.intrConf.intrCtrl &= 79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL); 80} 81 82 83static void 84vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) 85{ 86 int i; 87 88 adapter->shared->devRead.intrConf.intrCtrl |= 89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL); 90 for (i = 0; i < adapter->intr.num_intrs; i++) 91 vmxnet3_disable_intr(adapter, i); 92} 93 94 95static void 96vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) 97{ 98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); 99} 100 101 102static bool 103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 104{ 105 return tq->stopped; 106} 107 108 109static void 110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 111{ 112 tq->stopped = false; 113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); 114} 115 116 117static void 118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 119{ 120 tq->stopped = false; 121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); 122} 123 124 125static void 126vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 127{ 128 tq->stopped = true; 129 tq->num_stop++; 130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); 131} 132 133 134/* 135 * Check the link state. This may start or stop the tx queue. 136 */ 137static void 138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) 139{ 140 u32 ret; 141 int i; 142 143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 145 adapter->link_speed = ret >> 16; 146 if (ret & 1) { /* Link is up. */ 147 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 148 adapter->netdev->name, adapter->link_speed); 149 if (!netif_carrier_ok(adapter->netdev)) 150 netif_carrier_on(adapter->netdev); 151 152 if (affectTxQueue) { 153 for (i = 0; i < adapter->num_tx_queues; i++) 154 vmxnet3_tq_start(&adapter->tx_queue[i], 155 adapter); 156 } 157 } else { 158 printk(KERN_INFO "%s: NIC Link is Down\n", 159 adapter->netdev->name); 160 if (netif_carrier_ok(adapter->netdev)) 161 netif_carrier_off(adapter->netdev); 162 163 if (affectTxQueue) { 164 for (i = 0; i < adapter->num_tx_queues; i++) 165 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter); 166 } 167 } 168} 169 170static void 171vmxnet3_process_events(struct vmxnet3_adapter *adapter) 172{ 173 int i; 174 u32 events = le32_to_cpu(adapter->shared->ecr); 175 if (!events) 176 return; 177 178 vmxnet3_ack_events(adapter, events); 179 180 /* Check if link state has changed */ 181 if (events & VMXNET3_ECR_LINK) 182 vmxnet3_check_link(adapter, true); 183 184 /* Check if there is an error on xmit/recv queues */ 185 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 187 VMXNET3_CMD_GET_QUEUE_STATUS); 188 189 for (i = 0; i < adapter->num_tx_queues; i++) 190 if (adapter->tqd_start[i].status.stopped) 191 dev_err(&adapter->netdev->dev, 192 "%s: tq[%d] error 0x%x\n", 193 adapter->netdev->name, i, le32_to_cpu( 194 adapter->tqd_start[i].status.error)); 195 for (i = 0; i < adapter->num_rx_queues; i++) 196 if (adapter->rqd_start[i].status.stopped) 197 dev_err(&adapter->netdev->dev, 198 "%s: rq[%d] error 0x%x\n", 199 adapter->netdev->name, i, 200 adapter->rqd_start[i].status.error); 201 202 schedule_work(&adapter->work); 203 } 204} 205 206#ifdef __BIG_ENDIAN_BITFIELD 207/* 208 * The device expects the bitfields in shared structures to be written in 209 * little endian. When CPU is big endian, the following routines are used to 210 * correctly read and write into ABI. 211 * The general technique used here is : double word bitfields are defined in 212 * opposite order for big endian architecture. Then before reading them in 213 * driver the complete double word is translated using le32_to_cpu. Similarly 214 * After the driver writes into bitfields, cpu_to_le32 is used to translate the 215 * double words into required format. 216 * In order to avoid touching bits in shared structure more than once, temporary 217 * descriptors are used. These are passed as srcDesc to following functions. 218 */ 219static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc, 220 struct Vmxnet3_RxDesc *dstDesc) 221{ 222 u32 *src = (u32 *)srcDesc + 2; 223 u32 *dst = (u32 *)dstDesc + 2; 224 dstDesc->addr = le64_to_cpu(srcDesc->addr); 225 *dst = le32_to_cpu(*src); 226 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1); 227} 228 229static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc, 230 struct Vmxnet3_TxDesc *dstDesc) 231{ 232 int i; 233 u32 *src = (u32 *)(srcDesc + 1); 234 u32 *dst = (u32 *)(dstDesc + 1); 235 236 /* Working backwards so that the gen bit is set at the end. */ 237 for (i = 2; i > 0; i--) { 238 src--; 239 dst--; 240 *dst = cpu_to_le32(*src); 241 } 242} 243 244 245static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc, 246 struct Vmxnet3_RxCompDesc *dstDesc) 247{ 248 int i = 0; 249 u32 *src = (u32 *)srcDesc; 250 u32 *dst = (u32 *)dstDesc; 251 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) { 252 *dst = le32_to_cpu(*src); 253 src++; 254 dst++; 255 } 256} 257 258 259/* Used to read bitfield values from double words. */ 260static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size) 261{ 262 u32 temp = le32_to_cpu(*bitfield); 263 u32 mask = ((1 << size) - 1) << pos; 264 temp &= mask; 265 temp >>= pos; 266 return temp; 267} 268 269 270 271#endif /* __BIG_ENDIAN_BITFIELD */ 272 273#ifdef __BIG_ENDIAN_BITFIELD 274 275# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \ 276 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \ 277 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE) 278# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \ 279 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \ 280 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE) 281# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \ 282 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \ 283 VMXNET3_TCD_GEN_SIZE) 284# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \ 285 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE) 286# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \ 287 (dstrcd) = (tmp); \ 288 vmxnet3_RxCompToCPU((rcd), (tmp)); \ 289 } while (0) 290# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \ 291 (dstrxd) = (tmp); \ 292 vmxnet3_RxDescToCPU((rxd), (tmp)); \ 293 } while (0) 294 295#else 296 297# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen) 298# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop) 299# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen) 300# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx) 301# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd) 302# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd) 303 304#endif /* __BIG_ENDIAN_BITFIELD */ 305 306 307static void 308vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 309 struct pci_dev *pdev) 310{ 311 if (tbi->map_type == VMXNET3_MAP_SINGLE) 312 pci_unmap_single(pdev, tbi->dma_addr, tbi->len, 313 PCI_DMA_TODEVICE); 314 else if (tbi->map_type == VMXNET3_MAP_PAGE) 315 pci_unmap_page(pdev, tbi->dma_addr, tbi->len, 316 PCI_DMA_TODEVICE); 317 else 318 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); 319 320 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ 321} 322 323 324static int 325vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, 326 struct pci_dev *pdev, struct vmxnet3_adapter *adapter) 327{ 328 struct sk_buff *skb; 329 int entries = 0; 330 331 /* no out of order completion */ 332 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 333 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); 334 335 skb = tq->buf_info[eop_idx].skb; 336 BUG_ON(skb == NULL); 337 tq->buf_info[eop_idx].skb = NULL; 338 339 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); 340 341 while (tq->tx_ring.next2comp != eop_idx) { 342 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, 343 pdev); 344 345 /* update next2comp w/o tx_lock. Since we are marking more, 346 * instead of less, tx ring entries avail, the worst case is 347 * that the tx routine incorrectly re-queues a pkt due to 348 * insufficient tx ring entries. 349 */ 350 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 351 entries++; 352 } 353 354 dev_kfree_skb_any(skb); 355 return entries; 356} 357 358 359static int 360vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, 361 struct vmxnet3_adapter *adapter) 362{ 363 int completed = 0; 364 union Vmxnet3_GenericDesc *gdesc; 365 366 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 367 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { 368 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( 369 &gdesc->tcd), tq, adapter->pdev, 370 adapter); 371 372 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 373 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 374 } 375 376 if (completed) { 377 spin_lock(&tq->tx_lock); 378 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && 379 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > 380 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && 381 netif_carrier_ok(adapter->netdev))) { 382 vmxnet3_tq_wake(tq, adapter); 383 } 384 spin_unlock(&tq->tx_lock); 385 } 386 return completed; 387} 388 389 390static void 391vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, 392 struct vmxnet3_adapter *adapter) 393{ 394 int i; 395 396 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { 397 struct vmxnet3_tx_buf_info *tbi; 398 union Vmxnet3_GenericDesc *gdesc; 399 400 tbi = tq->buf_info + tq->tx_ring.next2comp; 401 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp; 402 403 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); 404 if (tbi->skb) { 405 dev_kfree_skb_any(tbi->skb); 406 tbi->skb = NULL; 407 } 408 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 409 } 410 411 /* sanity check, verify all buffers are indeed unmapped and freed */ 412 for (i = 0; i < tq->tx_ring.size; i++) { 413 BUG_ON(tq->buf_info[i].skb != NULL || 414 tq->buf_info[i].map_type != VMXNET3_MAP_NONE); 415 } 416 417 tq->tx_ring.gen = VMXNET3_INIT_GEN; 418 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 419 420 tq->comp_ring.gen = VMXNET3_INIT_GEN; 421 tq->comp_ring.next2proc = 0; 422} 423 424 425static void 426vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 427 struct vmxnet3_adapter *adapter) 428{ 429 if (tq->tx_ring.base) { 430 pci_free_consistent(adapter->pdev, tq->tx_ring.size * 431 sizeof(struct Vmxnet3_TxDesc), 432 tq->tx_ring.base, tq->tx_ring.basePA); 433 tq->tx_ring.base = NULL; 434 } 435 if (tq->data_ring.base) { 436 pci_free_consistent(adapter->pdev, tq->data_ring.size * 437 sizeof(struct Vmxnet3_TxDataDesc), 438 tq->data_ring.base, tq->data_ring.basePA); 439 tq->data_ring.base = NULL; 440 } 441 if (tq->comp_ring.base) { 442 pci_free_consistent(adapter->pdev, tq->comp_ring.size * 443 sizeof(struct Vmxnet3_TxCompDesc), 444 tq->comp_ring.base, tq->comp_ring.basePA); 445 tq->comp_ring.base = NULL; 446 } 447 kfree(tq->buf_info); 448 tq->buf_info = NULL; 449} 450 451 452/* Destroy all tx queues */ 453void 454vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter) 455{ 456 int i; 457 458 for (i = 0; i < adapter->num_tx_queues; i++) 459 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter); 460} 461 462 463static void 464vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, 465 struct vmxnet3_adapter *adapter) 466{ 467 int i; 468 469 /* reset the tx ring contents to 0 and reset the tx ring states */ 470 memset(tq->tx_ring.base, 0, tq->tx_ring.size * 471 sizeof(struct Vmxnet3_TxDesc)); 472 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 473 tq->tx_ring.gen = VMXNET3_INIT_GEN; 474 475 memset(tq->data_ring.base, 0, tq->data_ring.size * 476 sizeof(struct Vmxnet3_TxDataDesc)); 477 478 /* reset the tx comp ring contents to 0 and reset comp ring states */ 479 memset(tq->comp_ring.base, 0, tq->comp_ring.size * 480 sizeof(struct Vmxnet3_TxCompDesc)); 481 tq->comp_ring.next2proc = 0; 482 tq->comp_ring.gen = VMXNET3_INIT_GEN; 483 484 /* reset the bookkeeping data */ 485 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); 486 for (i = 0; i < tq->tx_ring.size; i++) 487 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; 488 489 /* stats are not reset */ 490} 491 492 493static int 494vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, 495 struct vmxnet3_adapter *adapter) 496{ 497 BUG_ON(tq->tx_ring.base || tq->data_ring.base || 498 tq->comp_ring.base || tq->buf_info); 499 500 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size 501 * sizeof(struct Vmxnet3_TxDesc), 502 &tq->tx_ring.basePA); 503 if (!tq->tx_ring.base) { 504 printk(KERN_ERR "%s: failed to allocate tx ring\n", 505 adapter->netdev->name); 506 goto err; 507 } 508 509 tq->data_ring.base = pci_alloc_consistent(adapter->pdev, 510 tq->data_ring.size * 511 sizeof(struct Vmxnet3_TxDataDesc), 512 &tq->data_ring.basePA); 513 if (!tq->data_ring.base) { 514 printk(KERN_ERR "%s: failed to allocate data ring\n", 515 adapter->netdev->name); 516 goto err; 517 } 518 519 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, 520 tq->comp_ring.size * 521 sizeof(struct Vmxnet3_TxCompDesc), 522 &tq->comp_ring.basePA); 523 if (!tq->comp_ring.base) { 524 printk(KERN_ERR "%s: failed to allocate tx comp ring\n", 525 adapter->netdev->name); 526 goto err; 527 } 528 529 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), 530 GFP_KERNEL); 531 if (!tq->buf_info) { 532 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n", 533 adapter->netdev->name); 534 goto err; 535 } 536 537 return 0; 538 539err: 540 vmxnet3_tq_destroy(tq, adapter); 541 return -ENOMEM; 542} 543 544static void 545vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter) 546{ 547 int i; 548 549 for (i = 0; i < adapter->num_tx_queues; i++) 550 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter); 551} 552 553/* 554 * starting from ring->next2fill, allocate rx buffers for the given ring 555 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers 556 * are allocated or allocation fails 557 */ 558 559static int 560vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, 561 int num_to_alloc, struct vmxnet3_adapter *adapter) 562{ 563 int num_allocated = 0; 564 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; 565 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; 566 u32 val; 567 568 while (num_allocated < num_to_alloc) { 569 struct vmxnet3_rx_buf_info *rbi; 570 union Vmxnet3_GenericDesc *gd; 571 572 rbi = rbi_base + ring->next2fill; 573 gd = ring->base + ring->next2fill; 574 575 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { 576 if (rbi->skb == NULL) { 577 rbi->skb = dev_alloc_skb(rbi->len + 578 NET_IP_ALIGN); 579 if (unlikely(rbi->skb == NULL)) { 580 rq->stats.rx_buf_alloc_failure++; 581 break; 582 } 583 rbi->skb->dev = adapter->netdev; 584 585 skb_reserve(rbi->skb, NET_IP_ALIGN); 586 rbi->dma_addr = pci_map_single(adapter->pdev, 587 rbi->skb->data, rbi->len, 588 PCI_DMA_FROMDEVICE); 589 } else { 590 /* rx buffer skipped by the device */ 591 } 592 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; 593 } else { 594 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || 595 rbi->len != PAGE_SIZE); 596 597 if (rbi->page == NULL) { 598 rbi->page = alloc_page(GFP_ATOMIC); 599 if (unlikely(rbi->page == NULL)) { 600 rq->stats.rx_buf_alloc_failure++; 601 break; 602 } 603 rbi->dma_addr = pci_map_page(adapter->pdev, 604 rbi->page, 0, PAGE_SIZE, 605 PCI_DMA_FROMDEVICE); 606 } else { 607 /* rx buffers skipped by the device */ 608 } 609 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; 610 } 611 612 BUG_ON(rbi->dma_addr == 0); 613 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); 614 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT) 615 | val | rbi->len); 616 617 num_allocated++; 618 vmxnet3_cmd_ring_adv_next2fill(ring); 619 } 620 rq->uncommitted[ring_idx] += num_allocated; 621 622 dev_dbg(&adapter->netdev->dev, 623 "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 624 "%u, uncommited %u\n", num_allocated, ring->next2fill, 625 ring->next2comp, rq->uncommitted[ring_idx]); 626 627 /* so that the device can distinguish a full ring and an empty ring */ 628 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); 629 630 return num_allocated; 631} 632 633 634static void 635vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, 636 struct vmxnet3_rx_buf_info *rbi) 637{ 638 struct skb_frag_struct *frag = skb_shinfo(skb)->frags + 639 skb_shinfo(skb)->nr_frags; 640 641 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 642 643 frag->page = rbi->page; 644 frag->page_offset = 0; 645 frag->size = rcd->len; 646 skb->data_len += frag->size; 647 skb_shinfo(skb)->nr_frags++; 648} 649 650 651static void 652vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, 653 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, 654 struct vmxnet3_adapter *adapter) 655{ 656 u32 dw2, len; 657 unsigned long buf_offset; 658 int i; 659 union Vmxnet3_GenericDesc *gdesc; 660 struct vmxnet3_tx_buf_info *tbi = NULL; 661 662 BUG_ON(ctx->copy_size > skb_headlen(skb)); 663 664 /* use the previous gen bit for the SOP desc */ 665 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; 666 667 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; 668 gdesc = ctx->sop_txd; /* both loops below can be skipped */ 669 670 /* no need to map the buffer if headers are copied */ 671 if (ctx->copy_size) { 672 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + 673 tq->tx_ring.next2fill * 674 sizeof(struct Vmxnet3_TxDataDesc)); 675 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); 676 ctx->sop_txd->dword[3] = 0; 677 678 tbi = tq->buf_info + tq->tx_ring.next2fill; 679 tbi->map_type = VMXNET3_MAP_NONE; 680 681 dev_dbg(&adapter->netdev->dev, 682 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 683 tq->tx_ring.next2fill, 684 le64_to_cpu(ctx->sop_txd->txd.addr), 685 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 686 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 687 688 /* use the right gen for non-SOP desc */ 689 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 690 } 691 692 /* linear part can use multiple tx desc if it's big */ 693 len = skb_headlen(skb) - ctx->copy_size; 694 buf_offset = ctx->copy_size; 695 while (len) { 696 u32 buf_size; 697 698 if (len < VMXNET3_MAX_TX_BUF_SIZE) { 699 buf_size = len; 700 dw2 |= len; 701 } else { 702 buf_size = VMXNET3_MAX_TX_BUF_SIZE; 703 /* spec says that for TxDesc.len, 0 == 2^14 */ 704 } 705 706 tbi = tq->buf_info + tq->tx_ring.next2fill; 707 tbi->map_type = VMXNET3_MAP_SINGLE; 708 tbi->dma_addr = pci_map_single(adapter->pdev, 709 skb->data + buf_offset, buf_size, 710 PCI_DMA_TODEVICE); 711 712 tbi->len = buf_size; 713 714 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 715 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 716 717 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 718 gdesc->dword[2] = cpu_to_le32(dw2); 719 gdesc->dword[3] = 0; 720 721 dev_dbg(&adapter->netdev->dev, 722 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 723 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 724 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 725 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 726 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 727 728 len -= buf_size; 729 buf_offset += buf_size; 730 } 731 732 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 733 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 734 735 tbi = tq->buf_info + tq->tx_ring.next2fill; 736 tbi->map_type = VMXNET3_MAP_PAGE; 737 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page, 738 frag->page_offset, frag->size, 739 PCI_DMA_TODEVICE); 740 741 tbi->len = frag->size; 742 743 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 744 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 745 746 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 747 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size); 748 gdesc->dword[3] = 0; 749 750 dev_dbg(&adapter->netdev->dev, 751 "txd[%u]: 0x%llu %u %u\n", 752 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 753 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 754 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 755 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 756 } 757 758 ctx->eop_txd = gdesc; 759 760 /* set the last buf_info for the pkt */ 761 tbi->skb = skb; 762 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; 763} 764 765 766/* Init all tx queues */ 767static void 768vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter) 769{ 770 int i; 771 772 for (i = 0; i < adapter->num_tx_queues; i++) 773 vmxnet3_tq_init(&adapter->tx_queue[i], adapter); 774} 775 776 777/* 778 * parse and copy relevant protocol headers: 779 * For a tso pkt, relevant headers are L2/3/4 including options 780 * For a pkt requesting csum offloading, they are L2/3 and may include L4 781 * if it's a TCP/UDP pkt 782 * 783 * Returns: 784 * -1: error happens during parsing 785 * 0: protocol headers parsed, but too big to be copied 786 * 1: protocol headers parsed and copied 787 * 788 * Other effects: 789 * 1. related *ctx fields are updated. 790 * 2. ctx->copy_size is # of bytes copied 791 * 3. the portion copied is guaranteed to be in the linear part 792 * 793 */ 794static int 795vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 796 struct vmxnet3_tx_ctx *ctx, 797 struct vmxnet3_adapter *adapter) 798{ 799 struct Vmxnet3_TxDataDesc *tdd; 800 801 if (ctx->mss) { /* TSO */ 802 ctx->eth_ip_hdr_size = skb_transport_offset(skb); 803 ctx->l4_hdr_size = ((struct tcphdr *) 804 skb_transport_header(skb))->doff * 4; 805 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 806 } else { 807 unsigned int pull_size; 808 809 if (skb->ip_summed == CHECKSUM_PARTIAL) { 810 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 811 812 if (ctx->ipv4) { 813 struct iphdr *iph = (struct iphdr *) 814 skb_network_header(skb); 815 if (iph->protocol == IPPROTO_TCP) { 816 pull_size = ctx->eth_ip_hdr_size + 817 sizeof(struct tcphdr); 818 819 if (unlikely(!pskb_may_pull(skb, 820 pull_size))) { 821 goto err; 822 } 823 ctx->l4_hdr_size = ((struct tcphdr *) 824 skb_transport_header(skb))->doff * 4; 825 } else if (iph->protocol == IPPROTO_UDP) { 826 ctx->l4_hdr_size = 827 sizeof(struct udphdr); 828 } else { 829 ctx->l4_hdr_size = 0; 830 } 831 } else { 832 /* for simplicity, don't copy L4 headers */ 833 ctx->l4_hdr_size = 0; 834 } 835 ctx->copy_size = ctx->eth_ip_hdr_size + 836 ctx->l4_hdr_size; 837 } else { 838 ctx->eth_ip_hdr_size = 0; 839 ctx->l4_hdr_size = 0; 840 /* copy as much as allowed */ 841 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE 842 , skb_headlen(skb)); 843 } 844 845 /* make sure headers are accessible directly */ 846 if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) 847 goto err; 848 } 849 850 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { 851 tq->stats.oversized_hdr++; 852 ctx->copy_size = 0; 853 return 0; 854 } 855 856 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 857 858 memcpy(tdd->data, skb->data, ctx->copy_size); 859 dev_dbg(&adapter->netdev->dev, 860 "copy %u bytes to dataRing[%u]\n", 861 ctx->copy_size, tq->tx_ring.next2fill); 862 return 1; 863 864err: 865 return -1; 866} 867 868 869static void 870vmxnet3_prepare_tso(struct sk_buff *skb, 871 struct vmxnet3_tx_ctx *ctx) 872{ 873 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); 874 if (ctx->ipv4) { 875 struct iphdr *iph = (struct iphdr *)skb_network_header(skb); 876 iph->check = 0; 877 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 878 IPPROTO_TCP, 0); 879 } else { 880 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb); 881 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, 882 IPPROTO_TCP, 0); 883 } 884} 885 886 887/* 888 * Transmits a pkt thru a given tq 889 * Returns: 890 * NETDEV_TX_OK: descriptors are setup successfully 891 * NETDEV_TX_OK: error occured, the pkt is dropped 892 * NETDEV_TX_BUSY: tx ring is full, queue is stopped 893 * 894 * Side-effects: 895 * 1. tx ring may be changed 896 * 2. tq stats may be updated accordingly 897 * 3. shared->txNumDeferred may be updated 898 */ 899 900static int 901vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 902 struct vmxnet3_adapter *adapter, struct net_device *netdev) 903{ 904 int ret; 905 u32 count; 906 unsigned long flags; 907 struct vmxnet3_tx_ctx ctx; 908 union Vmxnet3_GenericDesc *gdesc; 909#ifdef __BIG_ENDIAN_BITFIELD 910 /* Use temporary descriptor to avoid touching bits multiple times */ 911 union Vmxnet3_GenericDesc tempTxDesc; 912#endif 913 914 /* conservatively estimate # of descriptors to use */ 915 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 916 skb_shinfo(skb)->nr_frags + 1; 917 918 ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP)); 919 920 ctx.mss = skb_shinfo(skb)->gso_size; 921 if (ctx.mss) { 922 if (skb_header_cloned(skb)) { 923 if (unlikely(pskb_expand_head(skb, 0, 0, 924 GFP_ATOMIC) != 0)) { 925 tq->stats.drop_tso++; 926 goto drop_pkt; 927 } 928 tq->stats.copy_skb_header++; 929 } 930 vmxnet3_prepare_tso(skb, &ctx); 931 } else { 932 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { 933 934 /* non-tso pkts must not use more than 935 * VMXNET3_MAX_TXD_PER_PKT entries 936 */ 937 if (skb_linearize(skb) != 0) { 938 tq->stats.drop_too_many_frags++; 939 goto drop_pkt; 940 } 941 tq->stats.linearized++; 942 943 /* recalculate the # of descriptors to use */ 944 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 945 } 946 } 947 948 spin_lock_irqsave(&tq->tx_lock, flags); 949 950 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 951 tq->stats.tx_ring_full++; 952 dev_dbg(&adapter->netdev->dev, 953 "tx queue stopped on %s, next2comp %u" 954 " next2fill %u\n", adapter->netdev->name, 955 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 956 957 vmxnet3_tq_stop(tq, adapter); 958 spin_unlock_irqrestore(&tq->tx_lock, flags); 959 return NETDEV_TX_BUSY; 960 } 961 962 963 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); 964 if (ret >= 0) { 965 BUG_ON(ret <= 0 && ctx.copy_size != 0); 966 /* hdrs parsed, check against other limits */ 967 if (ctx.mss) { 968 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > 969 VMXNET3_MAX_TX_BUF_SIZE)) { 970 goto hdr_too_big; 971 } 972 } else { 973 if (skb->ip_summed == CHECKSUM_PARTIAL) { 974 if (unlikely(ctx.eth_ip_hdr_size + 975 skb->csum_offset > 976 VMXNET3_MAX_CSUM_OFFSET)) { 977 goto hdr_too_big; 978 } 979 } 980 } 981 } else { 982 tq->stats.drop_hdr_inspect_err++; 983 goto drop_pkt; 984 } 985 986 /* fill tx descs related to addr & len */ 987 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 988 989 /* setup the EOP desc */ 990 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); 991 992 /* setup the SOP desc */ 993#ifdef __BIG_ENDIAN_BITFIELD 994 gdesc = &tempTxDesc; 995 gdesc->dword[2] = ctx.sop_txd->dword[2]; 996 gdesc->dword[3] = ctx.sop_txd->dword[3]; 997#else 998 gdesc = ctx.sop_txd; 999#endif 1000 if (ctx.mss) { 1001 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 1002 gdesc->txd.om = VMXNET3_OM_TSO; 1003 gdesc->txd.msscof = ctx.mss; 1004 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - 1005 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss); 1006 } else { 1007 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1008 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 1009 gdesc->txd.om = VMXNET3_OM_CSUM; 1010 gdesc->txd.msscof = ctx.eth_ip_hdr_size + 1011 skb->csum_offset; 1012 } else { 1013 gdesc->txd.om = 0; 1014 gdesc->txd.msscof = 0; 1015 } 1016 le32_add_cpu(&tq->shared->txNumDeferred, 1); 1017 } 1018 1019 if (vlan_tx_tag_present(skb)) { 1020 gdesc->txd.ti = 1; 1021 gdesc->txd.tci = vlan_tx_tag_get(skb); 1022 } 1023 1024 /* finally flips the GEN bit of the SOP desc. */ 1025 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ 1026 VMXNET3_TXD_GEN); 1027#ifdef __BIG_ENDIAN_BITFIELD 1028 /* Finished updating in bitfields of Tx Desc, so write them in original 1029 * place. 1030 */ 1031 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc, 1032 (struct Vmxnet3_TxDesc *)ctx.sop_txd); 1033 gdesc = ctx.sop_txd; 1034#endif 1035 dev_dbg(&adapter->netdev->dev, 1036 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 1037 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 1038 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), 1039 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3])); 1040 1041 spin_unlock_irqrestore(&tq->tx_lock, flags); 1042 1043 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1044 le32_to_cpu(tq->shared->txThreshold)) { 1045 tq->shared->txNumDeferred = 0; 1046 VMXNET3_WRITE_BAR0_REG(adapter, 1047 VMXNET3_REG_TXPROD + tq->qid * 8, 1048 tq->tx_ring.next2fill); 1049 } 1050 1051 return NETDEV_TX_OK; 1052 1053hdr_too_big: 1054 tq->stats.drop_oversized_hdr++; 1055drop_pkt: 1056 tq->stats.drop_total++; 1057 dev_kfree_skb(skb); 1058 return NETDEV_TX_OK; 1059} 1060 1061 1062static netdev_tx_t 1063vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1064{ 1065 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1066 1067 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); 1068 return vmxnet3_tq_xmit(skb, 1069 &adapter->tx_queue[skb->queue_mapping], 1070 adapter, netdev); 1071} 1072 1073 1074static void 1075vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, 1076 struct sk_buff *skb, 1077 union Vmxnet3_GenericDesc *gdesc) 1078{ 1079 if (!gdesc->rcd.cnc && adapter->rxcsum) { 1080 /* typical case: TCP/UDP over IP and both csums are correct */ 1081 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1082 VMXNET3_RCD_CSUM_OK) { 1083 skb->ip_summed = CHECKSUM_UNNECESSARY; 1084 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1085 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); 1086 BUG_ON(gdesc->rcd.frg); 1087 } else { 1088 if (gdesc->rcd.csum) { 1089 skb->csum = htons(gdesc->rcd.csum); 1090 skb->ip_summed = CHECKSUM_PARTIAL; 1091 } else { 1092 skb_checksum_none_assert(skb); 1093 } 1094 } 1095 } else { 1096 skb_checksum_none_assert(skb); 1097 } 1098} 1099 1100 1101static void 1102vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, 1103 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) 1104{ 1105 rq->stats.drop_err++; 1106 if (!rcd->fcs) 1107 rq->stats.drop_fcs++; 1108 1109 rq->stats.drop_total++; 1110 1111 /* 1112 * We do not unmap and chain the rx buffer to the skb. 1113 * We basically pretend this buffer is not used and will be recycled 1114 * by vmxnet3_rq_alloc_rx_buf() 1115 */ 1116 1117 /* 1118 * ctx->skb may be NULL if this is the first and the only one 1119 * desc for the pkt 1120 */ 1121 if (ctx->skb) 1122 dev_kfree_skb_irq(ctx->skb); 1123 1124 ctx->skb = NULL; 1125} 1126 1127 1128static int 1129vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, 1130 struct vmxnet3_adapter *adapter, int quota) 1131{ 1132 static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; 1133 u32 num_rxd = 0; 1134 struct Vmxnet3_RxCompDesc *rcd; 1135 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1136#ifdef __BIG_ENDIAN_BITFIELD 1137 struct Vmxnet3_RxDesc rxCmdDesc; 1138 struct Vmxnet3_RxCompDesc rxComp; 1139#endif 1140 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, 1141 &rxComp); 1142 while (rcd->gen == rq->comp_ring.gen) { 1143 struct vmxnet3_rx_buf_info *rbi; 1144 struct sk_buff *skb; 1145 int num_to_alloc; 1146 struct Vmxnet3_RxDesc *rxd; 1147 u32 idx, ring_idx; 1148 1149 if (num_rxd >= quota) { 1150 /* we may stop even before we see the EOP desc of 1151 * the current pkt 1152 */ 1153 break; 1154 } 1155 num_rxd++; 1156 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1157 idx = rcd->rxdIdx; 1158 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; 1159 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1160 &rxCmdDesc); 1161 rbi = rq->buf_info[ring_idx] + idx; 1162 1163 BUG_ON(rxd->addr != rbi->dma_addr || 1164 rxd->len != rbi->len); 1165 1166 if (unlikely(rcd->eop && rcd->err)) { 1167 vmxnet3_rx_error(rq, rcd, ctx, adapter); 1168 goto rcd_done; 1169 } 1170 1171 if (rcd->sop) { /* first buf of the pkt */ 1172 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || 1173 rcd->rqID != rq->qid); 1174 1175 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); 1176 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); 1177 1178 if (unlikely(rcd->len == 0)) { 1179 /* Pretend the rx buffer is skipped. */ 1180 BUG_ON(!(rcd->sop && rcd->eop)); 1181 dev_dbg(&adapter->netdev->dev, 1182 "rxRing[%u][%u] 0 length\n", 1183 ring_idx, idx); 1184 goto rcd_done; 1185 } 1186 1187 ctx->skb = rbi->skb; 1188 rbi->skb = NULL; 1189 1190 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, 1191 PCI_DMA_FROMDEVICE); 1192 1193 skb_put(ctx->skb, rcd->len); 1194 } else { 1195 BUG_ON(ctx->skb == NULL); 1196 /* non SOP buffer must be type 1 in most cases */ 1197 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { 1198 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); 1199 1200 if (rcd->len) { 1201 pci_unmap_page(adapter->pdev, 1202 rbi->dma_addr, rbi->len, 1203 PCI_DMA_FROMDEVICE); 1204 1205 vmxnet3_append_frag(ctx->skb, rcd, rbi); 1206 rbi->page = NULL; 1207 } 1208 } else { 1209 /* 1210 * The only time a non-SOP buffer is type 0 is 1211 * when it's EOP and error flag is raised, which 1212 * has already been handled. 1213 */ 1214 BUG_ON(true); 1215 } 1216 } 1217 1218 skb = ctx->skb; 1219 if (rcd->eop) { 1220 skb->len += skb->data_len; 1221 skb->truesize += skb->data_len; 1222 1223 vmxnet3_rx_csum(adapter, skb, 1224 (union Vmxnet3_GenericDesc *)rcd); 1225 skb->protocol = eth_type_trans(skb, adapter->netdev); 1226 1227 if (unlikely(adapter->vlan_grp && rcd->ts)) { 1228 vlan_hwaccel_receive_skb(skb, 1229 adapter->vlan_grp, rcd->tci); 1230 } else { 1231 netif_receive_skb(skb); 1232 } 1233 1234 ctx->skb = NULL; 1235 } 1236 1237rcd_done: 1238 /* device may skip some rx descs */ 1239 rq->rx_ring[ring_idx].next2comp = idx; 1240 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, 1241 rq->rx_ring[ring_idx].size); 1242 1243 /* refill rx buffers frequently to avoid starving the h/w */ 1244 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + 1245 ring_idx); 1246 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, 1247 ring_idx, adapter))) { 1248 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, 1249 adapter); 1250 1251 /* if needed, update the register */ 1252 if (unlikely(rq->shared->updateRxProd)) { 1253 VMXNET3_WRITE_BAR0_REG(adapter, 1254 rxprod_reg[ring_idx] + rq->qid * 8, 1255 rq->rx_ring[ring_idx].next2fill); 1256 rq->uncommitted[ring_idx] = 0; 1257 } 1258 } 1259 1260 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1261 vmxnet3_getRxComp(rcd, 1262 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); 1263 } 1264 1265 return num_rxd; 1266} 1267 1268 1269static void 1270vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, 1271 struct vmxnet3_adapter *adapter) 1272{ 1273 u32 i, ring_idx; 1274 struct Vmxnet3_RxDesc *rxd; 1275 1276 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1277 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1278#ifdef __BIG_ENDIAN_BITFIELD 1279 struct Vmxnet3_RxDesc rxDesc; 1280#endif 1281 vmxnet3_getRxDesc(rxd, 1282 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc); 1283 1284 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1285 rq->buf_info[ring_idx][i].skb) { 1286 pci_unmap_single(adapter->pdev, rxd->addr, 1287 rxd->len, PCI_DMA_FROMDEVICE); 1288 dev_kfree_skb(rq->buf_info[ring_idx][i].skb); 1289 rq->buf_info[ring_idx][i].skb = NULL; 1290 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && 1291 rq->buf_info[ring_idx][i].page) { 1292 pci_unmap_page(adapter->pdev, rxd->addr, 1293 rxd->len, PCI_DMA_FROMDEVICE); 1294 put_page(rq->buf_info[ring_idx][i].page); 1295 rq->buf_info[ring_idx][i].page = NULL; 1296 } 1297 } 1298 1299 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; 1300 rq->rx_ring[ring_idx].next2fill = 1301 rq->rx_ring[ring_idx].next2comp = 0; 1302 rq->uncommitted[ring_idx] = 0; 1303 } 1304 1305 rq->comp_ring.gen = VMXNET3_INIT_GEN; 1306 rq->comp_ring.next2proc = 0; 1307} 1308 1309 1310static void 1311vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter) 1312{ 1313 int i; 1314 1315 for (i = 0; i < adapter->num_rx_queues; i++) 1316 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter); 1317} 1318 1319 1320void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 1321 struct vmxnet3_adapter *adapter) 1322{ 1323 int i; 1324 int j; 1325 1326 /* all rx buffers must have already been freed */ 1327 for (i = 0; i < 2; i++) { 1328 if (rq->buf_info[i]) { 1329 for (j = 0; j < rq->rx_ring[i].size; j++) 1330 BUG_ON(rq->buf_info[i][j].page != NULL); 1331 } 1332 } 1333 1334 1335 kfree(rq->buf_info[0]); 1336 1337 for (i = 0; i < 2; i++) { 1338 if (rq->rx_ring[i].base) { 1339 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size 1340 * sizeof(struct Vmxnet3_RxDesc), 1341 rq->rx_ring[i].base, 1342 rq->rx_ring[i].basePA); 1343 rq->rx_ring[i].base = NULL; 1344 } 1345 rq->buf_info[i] = NULL; 1346 } 1347 1348 if (rq->comp_ring.base) { 1349 pci_free_consistent(adapter->pdev, rq->comp_ring.size * 1350 sizeof(struct Vmxnet3_RxCompDesc), 1351 rq->comp_ring.base, rq->comp_ring.basePA); 1352 rq->comp_ring.base = NULL; 1353 } 1354} 1355 1356 1357static int 1358vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, 1359 struct vmxnet3_adapter *adapter) 1360{ 1361 int i; 1362 1363 /* initialize buf_info */ 1364 for (i = 0; i < rq->rx_ring[0].size; i++) { 1365 1366 /* 1st buf for a pkt is skbuff */ 1367 if (i % adapter->rx_buf_per_pkt == 0) { 1368 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; 1369 rq->buf_info[0][i].len = adapter->skb_buf_size; 1370 } else { /* subsequent bufs for a pkt is frag */ 1371 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; 1372 rq->buf_info[0][i].len = PAGE_SIZE; 1373 } 1374 } 1375 for (i = 0; i < rq->rx_ring[1].size; i++) { 1376 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; 1377 rq->buf_info[1][i].len = PAGE_SIZE; 1378 } 1379 1380 /* reset internal state and allocate buffers for both rings */ 1381 for (i = 0; i < 2; i++) { 1382 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; 1383 rq->uncommitted[i] = 0; 1384 1385 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * 1386 sizeof(struct Vmxnet3_RxDesc)); 1387 rq->rx_ring[i].gen = VMXNET3_INIT_GEN; 1388 } 1389 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, 1390 adapter) == 0) { 1391 /* at least has 1 rx buffer for the 1st ring */ 1392 return -ENOMEM; 1393 } 1394 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); 1395 1396 /* reset the comp ring */ 1397 rq->comp_ring.next2proc = 0; 1398 memset(rq->comp_ring.base, 0, rq->comp_ring.size * 1399 sizeof(struct Vmxnet3_RxCompDesc)); 1400 rq->comp_ring.gen = VMXNET3_INIT_GEN; 1401 1402 /* reset rxctx */ 1403 rq->rx_ctx.skb = NULL; 1404 1405 /* stats are not reset */ 1406 return 0; 1407} 1408 1409 1410static int 1411vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter) 1412{ 1413 int i, err = 0; 1414 1415 for (i = 0; i < adapter->num_rx_queues; i++) { 1416 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter); 1417 if (unlikely(err)) { 1418 dev_err(&adapter->netdev->dev, "%s: failed to " 1419 "initialize rx queue%i\n", 1420 adapter->netdev->name, i); 1421 break; 1422 } 1423 } 1424 return err; 1425 1426} 1427 1428 1429static int 1430vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) 1431{ 1432 int i; 1433 size_t sz; 1434 struct vmxnet3_rx_buf_info *bi; 1435 1436 for (i = 0; i < 2; i++) { 1437 1438 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); 1439 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, 1440 &rq->rx_ring[i].basePA); 1441 if (!rq->rx_ring[i].base) { 1442 printk(KERN_ERR "%s: failed to allocate rx ring %d\n", 1443 adapter->netdev->name, i); 1444 goto err; 1445 } 1446 } 1447 1448 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1449 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, 1450 &rq->comp_ring.basePA); 1451 if (!rq->comp_ring.base) { 1452 printk(KERN_ERR "%s: failed to allocate rx comp ring\n", 1453 adapter->netdev->name); 1454 goto err; 1455 } 1456 1457 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1458 rq->rx_ring[1].size); 1459 bi = kzalloc(sz, GFP_KERNEL); 1460 if (!bi) { 1461 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n", 1462 adapter->netdev->name); 1463 goto err; 1464 } 1465 rq->buf_info[0] = bi; 1466 rq->buf_info[1] = bi + rq->rx_ring[0].size; 1467 1468 return 0; 1469 1470err: 1471 vmxnet3_rq_destroy(rq, adapter); 1472 return -ENOMEM; 1473} 1474 1475 1476static int 1477vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) 1478{ 1479 int i, err = 0; 1480 1481 for (i = 0; i < adapter->num_rx_queues; i++) { 1482 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); 1483 if (unlikely(err)) { 1484 dev_err(&adapter->netdev->dev, 1485 "%s: failed to create rx queue%i\n", 1486 adapter->netdev->name, i); 1487 goto err_out; 1488 } 1489 } 1490 return err; 1491err_out: 1492 vmxnet3_rq_destroy_all(adapter); 1493 return err; 1494 1495} 1496 1497/* Multiple queue aware polling function for tx and rx */ 1498 1499static int 1500vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) 1501{ 1502 int rcd_done = 0, i; 1503 if (unlikely(adapter->shared->ecr)) 1504 vmxnet3_process_events(adapter); 1505 for (i = 0; i < adapter->num_tx_queues; i++) 1506 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter); 1507 1508 for (i = 0; i < adapter->num_rx_queues; i++) 1509 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i], 1510 adapter, budget); 1511 return rcd_done; 1512} 1513 1514 1515static int 1516vmxnet3_poll(struct napi_struct *napi, int budget) 1517{ 1518 struct vmxnet3_rx_queue *rx_queue = container_of(napi, 1519 struct vmxnet3_rx_queue, napi); 1520 int rxd_done; 1521 1522 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget); 1523 1524 if (rxd_done < budget) { 1525 napi_complete(napi); 1526 vmxnet3_enable_all_intrs(rx_queue->adapter); 1527 } 1528 return rxd_done; 1529} 1530 1531/* 1532 * NAPI polling function for MSI-X mode with multiple Rx queues 1533 * Returns the # of the NAPI credit consumed (# of rx descriptors processed) 1534 */ 1535 1536static int 1537vmxnet3_poll_rx_only(struct napi_struct *napi, int budget) 1538{ 1539 struct vmxnet3_rx_queue *rq = container_of(napi, 1540 struct vmxnet3_rx_queue, napi); 1541 struct vmxnet3_adapter *adapter = rq->adapter; 1542 int rxd_done; 1543 1544 /* When sharing interrupt with corresponding tx queue, process 1545 * tx completions in that queue as well 1546 */ 1547 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) { 1548 struct vmxnet3_tx_queue *tq = 1549 &adapter->tx_queue[rq - adapter->rx_queue]; 1550 vmxnet3_tq_tx_complete(tq, adapter); 1551 } 1552 1553 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget); 1554 1555 if (rxd_done < budget) { 1556 napi_complete(napi); 1557 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx); 1558 } 1559 return rxd_done; 1560} 1561 1562 1563#ifdef CONFIG_PCI_MSI 1564 1565/* 1566 * Handle completion interrupts on tx queues 1567 * Returns whether or not the intr is handled 1568 */ 1569 1570static irqreturn_t 1571vmxnet3_msix_tx(int irq, void *data) 1572{ 1573 struct vmxnet3_tx_queue *tq = data; 1574 struct vmxnet3_adapter *adapter = tq->adapter; 1575 1576 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1577 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); 1578 1579 /* Handle the case where only one irq is allocate for all tx queues */ 1580 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { 1581 int i; 1582 for (i = 0; i < adapter->num_tx_queues; i++) { 1583 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i]; 1584 vmxnet3_tq_tx_complete(txq, adapter); 1585 } 1586 } else { 1587 vmxnet3_tq_tx_complete(tq, adapter); 1588 } 1589 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); 1590 1591 return IRQ_HANDLED; 1592} 1593 1594 1595/* 1596 * Handle completion interrupts on rx queues. Returns whether or not the 1597 * intr is handled 1598 */ 1599 1600static irqreturn_t 1601vmxnet3_msix_rx(int irq, void *data) 1602{ 1603 struct vmxnet3_rx_queue *rq = data; 1604 struct vmxnet3_adapter *adapter = rq->adapter; 1605 1606 /* disable intr if needed */ 1607 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1608 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx); 1609 napi_schedule(&rq->napi); 1610 1611 return IRQ_HANDLED; 1612} 1613 1614/* 1615 *---------------------------------------------------------------------------- 1616 * 1617 * vmxnet3_msix_event -- 1618 * 1619 * vmxnet3 msix event intr handler 1620 * 1621 * Result: 1622 * whether or not the intr is handled 1623 * 1624 *---------------------------------------------------------------------------- 1625 */ 1626 1627static irqreturn_t 1628vmxnet3_msix_event(int irq, void *data) 1629{ 1630 struct net_device *dev = data; 1631 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1632 1633 /* disable intr if needed */ 1634 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1635 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx); 1636 1637 if (adapter->shared->ecr) 1638 vmxnet3_process_events(adapter); 1639 1640 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx); 1641 1642 return IRQ_HANDLED; 1643} 1644 1645#endif /* CONFIG_PCI_MSI */ 1646 1647 1648/* Interrupt handler for vmxnet3 */ 1649static irqreturn_t 1650vmxnet3_intr(int irq, void *dev_id) 1651{ 1652 struct net_device *dev = dev_id; 1653 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1654 1655 if (adapter->intr.type == VMXNET3_IT_INTX) { 1656 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 1657 if (unlikely(icr == 0)) 1658 /* not ours */ 1659 return IRQ_NONE; 1660 } 1661 1662 1663 /* disable intr if needed */ 1664 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1665 vmxnet3_disable_all_intrs(adapter); 1666 1667 napi_schedule(&adapter->rx_queue[0].napi); 1668 1669 return IRQ_HANDLED; 1670} 1671 1672#ifdef CONFIG_NET_POLL_CONTROLLER 1673 1674/* netpoll callback. */ 1675static void 1676vmxnet3_netpoll(struct net_device *netdev) 1677{ 1678 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1679 1680 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1681 vmxnet3_disable_all_intrs(adapter); 1682 1683 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); 1684 vmxnet3_enable_all_intrs(adapter); 1685 1686} 1687#endif /* CONFIG_NET_POLL_CONTROLLER */ 1688 1689static int 1690vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) 1691{ 1692 struct vmxnet3_intr *intr = &adapter->intr; 1693 int err = 0, i; 1694 int vector = 0; 1695 1696#ifdef CONFIG_PCI_MSI 1697 if (adapter->intr.type == VMXNET3_IT_MSIX) { 1698 for (i = 0; i < adapter->num_tx_queues; i++) { 1699 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { 1700 sprintf(adapter->tx_queue[i].name, "%s-tx-%d", 1701 adapter->netdev->name, vector); 1702 err = request_irq( 1703 intr->msix_entries[vector].vector, 1704 vmxnet3_msix_tx, 0, 1705 adapter->tx_queue[i].name, 1706 &adapter->tx_queue[i]); 1707 } else { 1708 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d", 1709 adapter->netdev->name, vector); 1710 } 1711 if (err) { 1712 dev_err(&adapter->netdev->dev, 1713 "Failed to request irq for MSIX, %s, " 1714 "error %d\n", 1715 adapter->tx_queue[i].name, err); 1716 return err; 1717 } 1718 1719 /* Handle the case where only 1 MSIx was allocated for 1720 * all tx queues */ 1721 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) { 1722 for (; i < adapter->num_tx_queues; i++) 1723 adapter->tx_queue[i].comp_ring.intr_idx 1724 = vector; 1725 vector++; 1726 break; 1727 } else { 1728 adapter->tx_queue[i].comp_ring.intr_idx 1729 = vector++; 1730 } 1731 } 1732 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) 1733 vector = 0; 1734 1735 for (i = 0; i < adapter->num_rx_queues; i++) { 1736 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) 1737 sprintf(adapter->rx_queue[i].name, "%s-rx-%d", 1738 adapter->netdev->name, vector); 1739 else 1740 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d", 1741 adapter->netdev->name, vector); 1742 err = request_irq(intr->msix_entries[vector].vector, 1743 vmxnet3_msix_rx, 0, 1744 adapter->rx_queue[i].name, 1745 &(adapter->rx_queue[i])); 1746 if (err) { 1747 printk(KERN_ERR "Failed to request irq for MSIX" 1748 ", %s, error %d\n", 1749 adapter->rx_queue[i].name, err); 1750 return err; 1751 } 1752 1753 adapter->rx_queue[i].comp_ring.intr_idx = vector++; 1754 } 1755 1756 sprintf(intr->event_msi_vector_name, "%s-event-%d", 1757 adapter->netdev->name, vector); 1758 err = request_irq(intr->msix_entries[vector].vector, 1759 vmxnet3_msix_event, 0, 1760 intr->event_msi_vector_name, adapter->netdev); 1761 intr->event_intr_idx = vector; 1762 1763 } else if (intr->type == VMXNET3_IT_MSI) { 1764 adapter->num_rx_queues = 1; 1765 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1766 adapter->netdev->name, adapter->netdev); 1767 } else { 1768#endif 1769 adapter->num_rx_queues = 1; 1770 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1771 IRQF_SHARED, adapter->netdev->name, 1772 adapter->netdev); 1773#ifdef CONFIG_PCI_MSI 1774 } 1775#endif 1776 intr->num_intrs = vector + 1; 1777 if (err) { 1778 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" 1779 ":%d\n", adapter->netdev->name, intr->type, err); 1780 } else { 1781 /* Number of rx queues will not change after this */ 1782 for (i = 0; i < adapter->num_rx_queues; i++) { 1783 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 1784 rq->qid = i; 1785 rq->qid2 = i + adapter->num_rx_queues; 1786 } 1787 1788 1789 1790 /* init our intr settings */ 1791 for (i = 0; i < intr->num_intrs; i++) 1792 intr->mod_levels[i] = UPT1_IML_ADAPTIVE; 1793 if (adapter->intr.type != VMXNET3_IT_MSIX) { 1794 adapter->intr.event_intr_idx = 0; 1795 for (i = 0; i < adapter->num_tx_queues; i++) 1796 adapter->tx_queue[i].comp_ring.intr_idx = 0; 1797 adapter->rx_queue[0].comp_ring.intr_idx = 0; 1798 } 1799 1800 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " 1801 "allocated\n", adapter->netdev->name, intr->type, 1802 intr->mask_mode, intr->num_intrs); 1803 } 1804 1805 return err; 1806} 1807 1808 1809static void 1810vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) 1811{ 1812 struct vmxnet3_intr *intr = &adapter->intr; 1813 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0); 1814 1815 switch (intr->type) { 1816#ifdef CONFIG_PCI_MSI 1817 case VMXNET3_IT_MSIX: 1818 { 1819 int i, vector = 0; 1820 1821 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) { 1822 for (i = 0; i < adapter->num_tx_queues; i++) { 1823 free_irq(intr->msix_entries[vector++].vector, 1824 &(adapter->tx_queue[i])); 1825 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) 1826 break; 1827 } 1828 } 1829 1830 for (i = 0; i < adapter->num_rx_queues; i++) { 1831 free_irq(intr->msix_entries[vector++].vector, 1832 &(adapter->rx_queue[i])); 1833 } 1834 1835 free_irq(intr->msix_entries[vector].vector, 1836 adapter->netdev); 1837 BUG_ON(vector >= intr->num_intrs); 1838 break; 1839 } 1840#endif 1841 case VMXNET3_IT_MSI: 1842 free_irq(adapter->pdev->irq, adapter->netdev); 1843 break; 1844 case VMXNET3_IT_INTX: 1845 free_irq(adapter->pdev->irq, adapter->netdev); 1846 break; 1847 default: 1848 BUG_ON(true); 1849 } 1850} 1851 1852static void 1853vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 1854{ 1855 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1856 struct Vmxnet3_DriverShared *shared = adapter->shared; 1857 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1858 1859 if (grp) { 1860 /* add vlan rx stripping. */ 1861 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { 1862 int i; 1863 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1864 adapter->vlan_grp = grp; 1865 1866 /* update FEATURES to device */ 1867 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1868 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1869 VMXNET3_CMD_UPDATE_FEATURE); 1870 /* 1871 * Clear entire vfTable; then enable untagged pkts. 1872 * Note: setting one entry in vfTable to non-zero turns 1873 * on VLAN rx filtering. 1874 */ 1875 for (i = 0; i < VMXNET3_VFT_SIZE; i++) 1876 vfTable[i] = 0; 1877 1878 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1879 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1880 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1881 } else { 1882 printk(KERN_ERR "%s: vlan_rx_register when device has " 1883 "no NETIF_F_HW_VLAN_RX\n", netdev->name); 1884 } 1885 } else { 1886 /* remove vlan rx stripping. */ 1887 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1888 adapter->vlan_grp = NULL; 1889 1890 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { 1891 int i; 1892 1893 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1894 /* clear entire vfTable; this also disables 1895 * VLAN rx filtering 1896 */ 1897 vfTable[i] = 0; 1898 } 1899 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1900 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1901 1902 /* update FEATURES to device */ 1903 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1904 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1905 VMXNET3_CMD_UPDATE_FEATURE); 1906 } 1907 } 1908} 1909 1910 1911static void 1912vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) 1913{ 1914 if (adapter->vlan_grp) { 1915 u16 vid; 1916 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1917 bool activeVlan = false; 1918 1919 for (vid = 0; vid < VLAN_N_VID; vid++) { 1920 if (vlan_group_get_device(adapter->vlan_grp, vid)) { 1921 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1922 activeVlan = true; 1923 } 1924 } 1925 if (activeVlan) { 1926 /* continue to allow untagged pkts */ 1927 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1928 } 1929 } 1930} 1931 1932 1933static void 1934vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1935{ 1936 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1937 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1938 1939 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1940 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1941 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1942} 1943 1944 1945static void 1946vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1947{ 1948 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1949 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1950 1951 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1952 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1953 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1954} 1955 1956 1957static u8 * 1958vmxnet3_copy_mc(struct net_device *netdev) 1959{ 1960 u8 *buf = NULL; 1961 u32 sz = netdev_mc_count(netdev) * ETH_ALEN; 1962 1963 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ 1964 if (sz <= 0xffff) { 1965 /* We may be called with BH disabled */ 1966 buf = kmalloc(sz, GFP_ATOMIC); 1967 if (buf) { 1968 struct netdev_hw_addr *ha; 1969 int i = 0; 1970 1971 netdev_for_each_mc_addr(ha, netdev) 1972 memcpy(buf + i++ * ETH_ALEN, ha->addr, 1973 ETH_ALEN); 1974 } 1975 } 1976 return buf; 1977} 1978 1979 1980static void 1981vmxnet3_set_mc(struct net_device *netdev) 1982{ 1983 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1984 struct Vmxnet3_RxFilterConf *rxConf = 1985 &adapter->shared->devRead.rxFilterConf; 1986 u8 *new_table = NULL; 1987 u32 new_mode = VMXNET3_RXM_UCAST; 1988 1989 if (netdev->flags & IFF_PROMISC) 1990 new_mode |= VMXNET3_RXM_PROMISC; 1991 1992 if (netdev->flags & IFF_BROADCAST) 1993 new_mode |= VMXNET3_RXM_BCAST; 1994 1995 if (netdev->flags & IFF_ALLMULTI) 1996 new_mode |= VMXNET3_RXM_ALL_MULTI; 1997 else 1998 if (!netdev_mc_empty(netdev)) { 1999 new_table = vmxnet3_copy_mc(netdev); 2000 if (new_table) { 2001 new_mode |= VMXNET3_RXM_MCAST; 2002 rxConf->mfTableLen = cpu_to_le16( 2003 netdev_mc_count(netdev) * ETH_ALEN); 2004 rxConf->mfTablePA = cpu_to_le64(virt_to_phys( 2005 new_table)); 2006 } else { 2007 printk(KERN_INFO "%s: failed to copy mcast list" 2008 ", setting ALL_MULTI\n", netdev->name); 2009 new_mode |= VMXNET3_RXM_ALL_MULTI; 2010 } 2011 } 2012 2013 2014 if (!(new_mode & VMXNET3_RXM_MCAST)) { 2015 rxConf->mfTableLen = 0; 2016 rxConf->mfTablePA = 0; 2017 } 2018 2019 if (new_mode != rxConf->rxMode) { 2020 rxConf->rxMode = cpu_to_le32(new_mode); 2021 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2022 VMXNET3_CMD_UPDATE_RX_MODE); 2023 } 2024 2025 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2026 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2027 2028 kfree(new_table); 2029} 2030 2031void 2032vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter) 2033{ 2034 int i; 2035 2036 for (i = 0; i < adapter->num_rx_queues; i++) 2037 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter); 2038} 2039 2040 2041/* 2042 * Set up driver_shared based on settings in adapter. 2043 */ 2044 2045static void 2046vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) 2047{ 2048 struct Vmxnet3_DriverShared *shared = adapter->shared; 2049 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 2050 struct Vmxnet3_TxQueueConf *tqc; 2051 struct Vmxnet3_RxQueueConf *rqc; 2052 int i; 2053 2054 memset(shared, 0, sizeof(*shared)); 2055 2056 /* driver settings */ 2057 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC); 2058 devRead->misc.driverInfo.version = cpu_to_le32( 2059 VMXNET3_DRIVER_VERSION_NUM); 2060 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 2061 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 2062 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 2063 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32( 2064 *((u32 *)&devRead->misc.driverInfo.gos)); 2065 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); 2066 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); 2067 2068 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); 2069 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); 2070 2071 /* set up feature flags */ 2072 if (adapter->rxcsum) 2073 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 2074 2075 if (adapter->lro) { 2076 devRead->misc.uptFeatures |= UPT1_F_LRO; 2077 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2078 } 2079 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && 2080 adapter->vlan_grp) { 2081 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2082 } 2083 2084 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2085 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2086 devRead->misc.queueDescLen = cpu_to_le32( 2087 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + 2088 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc)); 2089 2090 /* tx queue settings */ 2091 devRead->misc.numTxQueues = adapter->num_tx_queues; 2092 for (i = 0; i < adapter->num_tx_queues; i++) { 2093 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 2094 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL); 2095 tqc = &adapter->tqd_start[i].conf; 2096 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); 2097 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); 2098 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); 2099 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); 2100 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 2101 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 2102 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 2103 tqc->ddLen = cpu_to_le32( 2104 sizeof(struct vmxnet3_tx_buf_info) * 2105 tqc->txRingSize); 2106 tqc->intrIdx = tq->comp_ring.intr_idx; 2107 } 2108 2109 /* rx queue settings */ 2110 devRead->misc.numRxQueues = adapter->num_rx_queues; 2111 for (i = 0; i < adapter->num_rx_queues; i++) { 2112 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2113 rqc = &adapter->rqd_start[i].conf; 2114 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); 2115 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); 2116 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); 2117 rqc->ddPA = cpu_to_le64(virt_to_phys( 2118 rq->buf_info)); 2119 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); 2120 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); 2121 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); 2122 rqc->ddLen = cpu_to_le32( 2123 sizeof(struct vmxnet3_rx_buf_info) * 2124 (rqc->rxRingSize[0] + 2125 rqc->rxRingSize[1])); 2126 rqc->intrIdx = rq->comp_ring.intr_idx; 2127 } 2128 2129#ifdef VMXNET3_RSS 2130 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf)); 2131 2132 if (adapter->rss) { 2133 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 2134 devRead->misc.uptFeatures |= UPT1_F_RSS; 2135 devRead->misc.numRxQueues = adapter->num_rx_queues; 2136 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 | 2137 UPT1_RSS_HASH_TYPE_IPV4 | 2138 UPT1_RSS_HASH_TYPE_TCP_IPV6 | 2139 UPT1_RSS_HASH_TYPE_IPV6; 2140 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ; 2141 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; 2142 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE; 2143 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize); 2144 for (i = 0; i < rssConf->indTableSize; i++) 2145 rssConf->indTable[i] = i % adapter->num_rx_queues; 2146 2147 devRead->rssConfDesc.confVer = 1; 2148 devRead->rssConfDesc.confLen = sizeof(*rssConf); 2149 devRead->rssConfDesc.confPA = virt_to_phys(rssConf); 2150 } 2151 2152#endif /* VMXNET3_RSS */ 2153 2154 /* intr settings */ 2155 devRead->intrConf.autoMask = adapter->intr.mask_mode == 2156 VMXNET3_IMM_AUTO; 2157 devRead->intrConf.numIntrs = adapter->intr.num_intrs; 2158 for (i = 0; i < adapter->intr.num_intrs; i++) 2159 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; 2160 2161 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; 2162 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); 2163 2164 /* rx filter settings */ 2165 devRead->rxFilterConf.rxMode = 0; 2166 vmxnet3_restore_vlan(adapter); 2167 /* the rest are already zeroed */ 2168} 2169 2170 2171int 2172vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 2173{ 2174 int err, i; 2175 u32 ret; 2176 2177 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2178 " ring sizes %u %u %u\n", adapter->netdev->name, 2179 adapter->skb_buf_size, adapter->rx_buf_per_pkt, 2180 adapter->tx_queue[0].tx_ring.size, 2181 adapter->rx_queue[0].rx_ring[0].size, 2182 adapter->rx_queue[0].rx_ring[1].size); 2183 2184 vmxnet3_tq_init_all(adapter); 2185 err = vmxnet3_rq_init_all(adapter); 2186 if (err) { 2187 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", 2188 adapter->netdev->name, err); 2189 goto rq_err; 2190 } 2191 2192 err = vmxnet3_request_irqs(adapter); 2193 if (err) { 2194 printk(KERN_ERR "Failed to setup irq for %s: error %d\n", 2195 adapter->netdev->name, err); 2196 goto irq_err; 2197 } 2198 2199 vmxnet3_setup_driver_shared(adapter); 2200 2201 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO( 2202 adapter->shared_pa)); 2203 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2204 adapter->shared_pa)); 2205 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2206 VMXNET3_CMD_ACTIVATE_DEV); 2207 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2208 2209 if (ret != 0) { 2210 printk(KERN_ERR "Failed to activate dev %s: error %u\n", 2211 adapter->netdev->name, ret); 2212 err = -EINVAL; 2213 goto activate_err; 2214 } 2215 2216 for (i = 0; i < adapter->num_rx_queues; i++) { 2217 VMXNET3_WRITE_BAR0_REG(adapter, 2218 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN, 2219 adapter->rx_queue[i].rx_ring[0].next2fill); 2220 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 + 2221 (i * VMXNET3_REG_ALIGN)), 2222 adapter->rx_queue[i].rx_ring[1].next2fill); 2223 } 2224 2225 /* Apply the rx filter settins last. */ 2226 vmxnet3_set_mc(adapter->netdev); 2227 2228 /* 2229 * Check link state when first activating device. It will start the 2230 * tx queue if the link is up. 2231 */ 2232 vmxnet3_check_link(adapter, true); 2233 for (i = 0; i < adapter->num_rx_queues; i++) 2234 napi_enable(&adapter->rx_queue[i].napi); 2235 vmxnet3_enable_all_intrs(adapter); 2236 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2237 return 0; 2238 2239activate_err: 2240 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); 2241 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); 2242 vmxnet3_free_irqs(adapter); 2243irq_err: 2244rq_err: 2245 /* free up buffers we allocated */ 2246 vmxnet3_rq_cleanup_all(adapter); 2247 return err; 2248} 2249 2250 2251void 2252vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2253{ 2254 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 2255} 2256 2257 2258int 2259vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2260{ 2261 int i; 2262 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2263 return 0; 2264 2265 2266 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2267 VMXNET3_CMD_QUIESCE_DEV); 2268 vmxnet3_disable_all_intrs(adapter); 2269 2270 for (i = 0; i < adapter->num_rx_queues; i++) 2271 napi_disable(&adapter->rx_queue[i].napi); 2272 netif_tx_disable(adapter->netdev); 2273 adapter->link_speed = 0; 2274 netif_carrier_off(adapter->netdev); 2275 2276 vmxnet3_tq_cleanup_all(adapter); 2277 vmxnet3_rq_cleanup_all(adapter); 2278 vmxnet3_free_irqs(adapter); 2279 return 0; 2280} 2281 2282 2283static void 2284vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 2285{ 2286 u32 tmp; 2287 2288 tmp = *(u32 *)mac; 2289 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); 2290 2291 tmp = (mac[5] << 8) | mac[4]; 2292 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); 2293} 2294 2295 2296static int 2297vmxnet3_set_mac_addr(struct net_device *netdev, void *p) 2298{ 2299 struct sockaddr *addr = p; 2300 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2301 2302 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2303 vmxnet3_write_mac_addr(adapter, addr->sa_data); 2304 2305 return 0; 2306} 2307 2308 2309/* ==================== initialization and cleanup routines ============ */ 2310 2311static int 2312vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) 2313{ 2314 int err; 2315 unsigned long mmio_start, mmio_len; 2316 struct pci_dev *pdev = adapter->pdev; 2317 2318 err = pci_enable_device(pdev); 2319 if (err) { 2320 printk(KERN_ERR "Failed to enable adapter %s: error %d\n", 2321 pci_name(pdev), err); 2322 return err; 2323 } 2324 2325 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 2326 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 2327 printk(KERN_ERR "pci_set_consistent_dma_mask failed " 2328 "for adapter %s\n", pci_name(pdev)); 2329 err = -EIO; 2330 goto err_set_mask; 2331 } 2332 *dma64 = true; 2333 } else { 2334 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 2335 printk(KERN_ERR "pci_set_dma_mask failed for adapter " 2336 "%s\n", pci_name(pdev)); 2337 err = -EIO; 2338 goto err_set_mask; 2339 } 2340 *dma64 = false; 2341 } 2342 2343 err = pci_request_selected_regions(pdev, (1 << 2) - 1, 2344 vmxnet3_driver_name); 2345 if (err) { 2346 printk(KERN_ERR "Failed to request region for adapter %s: " 2347 "error %d\n", pci_name(pdev), err); 2348 goto err_set_mask; 2349 } 2350 2351 pci_set_master(pdev); 2352 2353 mmio_start = pci_resource_start(pdev, 0); 2354 mmio_len = pci_resource_len(pdev, 0); 2355 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); 2356 if (!adapter->hw_addr0) { 2357 printk(KERN_ERR "Failed to map bar0 for adapter %s\n", 2358 pci_name(pdev)); 2359 err = -EIO; 2360 goto err_ioremap; 2361 } 2362 2363 mmio_start = pci_resource_start(pdev, 1); 2364 mmio_len = pci_resource_len(pdev, 1); 2365 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); 2366 if (!adapter->hw_addr1) { 2367 printk(KERN_ERR "Failed to map bar1 for adapter %s\n", 2368 pci_name(pdev)); 2369 err = -EIO; 2370 goto err_bar1; 2371 } 2372 return 0; 2373 2374err_bar1: 2375 iounmap(adapter->hw_addr0); 2376err_ioremap: 2377 pci_release_selected_regions(pdev, (1 << 2) - 1); 2378err_set_mask: 2379 pci_disable_device(pdev); 2380 return err; 2381} 2382 2383 2384static void 2385vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) 2386{ 2387 BUG_ON(!adapter->pdev); 2388 2389 iounmap(adapter->hw_addr0); 2390 iounmap(adapter->hw_addr1); 2391 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); 2392 pci_disable_device(adapter->pdev); 2393} 2394 2395 2396static void 2397vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) 2398{ 2399 size_t sz, i, ring0_size, ring1_size, comp_size; 2400 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0]; 2401 2402 2403 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - 2404 VMXNET3_MAX_ETH_HDR_SIZE) { 2405 adapter->skb_buf_size = adapter->netdev->mtu + 2406 VMXNET3_MAX_ETH_HDR_SIZE; 2407 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) 2408 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; 2409 2410 adapter->rx_buf_per_pkt = 1; 2411 } else { 2412 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; 2413 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + 2414 VMXNET3_MAX_ETH_HDR_SIZE; 2415 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; 2416 } 2417 2418 /* 2419 * for simplicity, force the ring0 size to be a multiple of 2420 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 2421 */ 2422 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2423 ring0_size = adapter->rx_queue[0].rx_ring[0].size; 2424 ring0_size = (ring0_size + sz - 1) / sz * sz; 2425 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / 2426 sz * sz); 2427 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2428 comp_size = ring0_size + ring1_size; 2429 2430 for (i = 0; i < adapter->num_rx_queues; i++) { 2431 rq = &adapter->rx_queue[i]; 2432 rq->rx_ring[0].size = ring0_size; 2433 rq->rx_ring[1].size = ring1_size; 2434 rq->comp_ring.size = comp_size; 2435 } 2436} 2437 2438 2439int 2440vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2441 u32 rx_ring_size, u32 rx_ring2_size) 2442{ 2443 int err = 0, i; 2444 2445 for (i = 0; i < adapter->num_tx_queues; i++) { 2446 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; 2447 tq->tx_ring.size = tx_ring_size; 2448 tq->data_ring.size = tx_ring_size; 2449 tq->comp_ring.size = tx_ring_size; 2450 tq->shared = &adapter->tqd_start[i].ctrl; 2451 tq->stopped = true; 2452 tq->adapter = adapter; 2453 tq->qid = i; 2454 err = vmxnet3_tq_create(tq, adapter); 2455 /* 2456 * Too late to change num_tx_queues. We cannot do away with 2457 * lesser number of queues than what we asked for 2458 */ 2459 if (err) 2460 goto queue_err; 2461 } 2462 2463 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; 2464 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; 2465 vmxnet3_adjust_rx_ring_size(adapter); 2466 for (i = 0; i < adapter->num_rx_queues; i++) { 2467 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2468 /* qid and qid2 for rx queues will be assigned later when num 2469 * of rx queues is finalized after allocating intrs */ 2470 rq->shared = &adapter->rqd_start[i].ctrl; 2471 rq->adapter = adapter; 2472 err = vmxnet3_rq_create(rq, adapter); 2473 if (err) { 2474 if (i == 0) { 2475 printk(KERN_ERR "Could not allocate any rx" 2476 "queues. Aborting.\n"); 2477 goto queue_err; 2478 } else { 2479 printk(KERN_INFO "Number of rx queues changed " 2480 "to : %d.\n", i); 2481 adapter->num_rx_queues = i; 2482 err = 0; 2483 break; 2484 } 2485 } 2486 } 2487 return err; 2488queue_err: 2489 vmxnet3_tq_destroy_all(adapter); 2490 return err; 2491} 2492 2493static int 2494vmxnet3_open(struct net_device *netdev) 2495{ 2496 struct vmxnet3_adapter *adapter; 2497 int err, i; 2498 2499 adapter = netdev_priv(netdev); 2500 2501 for (i = 0; i < adapter->num_tx_queues; i++) 2502 spin_lock_init(&adapter->tx_queue[i].tx_lock); 2503 2504 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, 2505 VMXNET3_DEF_RX_RING_SIZE, 2506 VMXNET3_DEF_RX_RING_SIZE); 2507 if (err) 2508 goto queue_err; 2509 2510 err = vmxnet3_activate_dev(adapter); 2511 if (err) 2512 goto activate_err; 2513 2514 return 0; 2515 2516activate_err: 2517 vmxnet3_rq_destroy_all(adapter); 2518 vmxnet3_tq_destroy_all(adapter); 2519queue_err: 2520 return err; 2521} 2522 2523 2524static int 2525vmxnet3_close(struct net_device *netdev) 2526{ 2527 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2528 2529 /* 2530 * Reset_work may be in the middle of resetting the device, wait for its 2531 * completion. 2532 */ 2533 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2534 msleep(1); 2535 2536 vmxnet3_quiesce_dev(adapter); 2537 2538 vmxnet3_rq_destroy_all(adapter); 2539 vmxnet3_tq_destroy_all(adapter); 2540 2541 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2542 2543 2544 return 0; 2545} 2546 2547 2548void 2549vmxnet3_force_close(struct vmxnet3_adapter *adapter) 2550{ 2551 int i; 2552 2553 /* 2554 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise 2555 * vmxnet3_close() will deadlock. 2556 */ 2557 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); 2558 2559 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2560 for (i = 0; i < adapter->num_rx_queues; i++) 2561 napi_enable(&adapter->rx_queue[i].napi); 2562 dev_close(adapter->netdev); 2563} 2564 2565 2566static int 2567vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) 2568{ 2569 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2570 int err = 0; 2571 2572 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) 2573 return -EINVAL; 2574 2575 if (new_mtu > 1500 && !adapter->jumbo_frame) 2576 return -EINVAL; 2577 2578 netdev->mtu = new_mtu; 2579 2580 /* 2581 * Reset_work may be in the middle of resetting the device, wait for its 2582 * completion. 2583 */ 2584 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2585 msleep(1); 2586 2587 if (netif_running(netdev)) { 2588 vmxnet3_quiesce_dev(adapter); 2589 vmxnet3_reset_dev(adapter); 2590 2591 /* we need to re-create the rx queue based on the new mtu */ 2592 vmxnet3_rq_destroy_all(adapter); 2593 vmxnet3_adjust_rx_ring_size(adapter); 2594 err = vmxnet3_rq_create_all(adapter); 2595 if (err) { 2596 printk(KERN_ERR "%s: failed to re-create rx queues," 2597 " error %d. Closing it.\n", netdev->name, err); 2598 goto out; 2599 } 2600 2601 err = vmxnet3_activate_dev(adapter); 2602 if (err) { 2603 printk(KERN_ERR "%s: failed to re-activate, error %d. " 2604 "Closing it\n", netdev->name, err); 2605 goto out; 2606 } 2607 } 2608 2609out: 2610 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2611 if (err) 2612 vmxnet3_force_close(adapter); 2613 2614 return err; 2615} 2616 2617 2618static void 2619vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) 2620{ 2621 struct net_device *netdev = adapter->netdev; 2622 2623 netdev->features = NETIF_F_SG | 2624 NETIF_F_HW_CSUM | 2625 NETIF_F_HW_VLAN_TX | 2626 NETIF_F_HW_VLAN_RX | 2627 NETIF_F_HW_VLAN_FILTER | 2628 NETIF_F_TSO | 2629 NETIF_F_TSO6 | 2630 NETIF_F_LRO; 2631 2632 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro"); 2633 2634 adapter->rxcsum = true; 2635 adapter->jumbo_frame = true; 2636 adapter->lro = true; 2637 2638 if (dma64) { 2639 netdev->features |= NETIF_F_HIGHDMA; 2640 printk(" highDMA"); 2641 } 2642 2643 netdev->vlan_features = netdev->features; 2644 printk("\n"); 2645} 2646 2647 2648static void 2649vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 2650{ 2651 u32 tmp; 2652 2653 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); 2654 *(u32 *)mac = tmp; 2655 2656 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); 2657 mac[4] = tmp & 0xff; 2658 mac[5] = (tmp >> 8) & 0xff; 2659} 2660 2661#ifdef CONFIG_PCI_MSI 2662 2663/* 2664 * Enable MSIx vectors. 2665 * Returns : 2666 * 0 on successful enabling of required vectors, 2667 * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required 2668 * could be enabled. 2669 * number of vectors which can be enabled otherwise (this number is smaller 2670 * than VMXNET3_LINUX_MIN_MSIX_VECT) 2671 */ 2672 2673static int 2674vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, 2675 int vectors) 2676{ 2677 int err = 0, vector_threshold; 2678 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT; 2679 2680 while (vectors >= vector_threshold) { 2681 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2682 vectors); 2683 if (!err) { 2684 adapter->intr.num_intrs = vectors; 2685 return 0; 2686 } else if (err < 0) { 2687 printk(KERN_ERR "Failed to enable MSI-X for %s, error" 2688 " %d\n", adapter->netdev->name, err); 2689 vectors = 0; 2690 } else if (err < vector_threshold) { 2691 break; 2692 } else { 2693 /* If fails to enable required number of MSI-x vectors 2694 * try enabling 3 of them. One each for rx, tx and event 2695 */ 2696 vectors = vector_threshold; 2697 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" 2698 " %d instead\n", vectors, adapter->netdev->name, 2699 vector_threshold); 2700 } 2701 } 2702 2703 printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi" 2704 " are lower than min threshold required.\n"); 2705 return err; 2706} 2707 2708 2709#endif /* CONFIG_PCI_MSI */ 2710 2711static void 2712vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2713{ 2714 u32 cfg; 2715 2716 /* intr settings */ 2717 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2718 VMXNET3_CMD_GET_CONF_INTR); 2719 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2720 adapter->intr.type = cfg & 0x3; 2721 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2722 2723 if (adapter->intr.type == VMXNET3_IT_AUTO) { 2724 adapter->intr.type = VMXNET3_IT_MSIX; 2725 } 2726 2727#ifdef CONFIG_PCI_MSI 2728 if (adapter->intr.type == VMXNET3_IT_MSIX) { 2729 int vector, err = 0; 2730 2731 adapter->intr.num_intrs = (adapter->share_intr == 2732 VMXNET3_INTR_TXSHARE) ? 1 : 2733 adapter->num_tx_queues; 2734 adapter->intr.num_intrs += (adapter->share_intr == 2735 VMXNET3_INTR_BUDDYSHARE) ? 0 : 2736 adapter->num_rx_queues; 2737 adapter->intr.num_intrs += 1; /* for link event */ 2738 2739 adapter->intr.num_intrs = (adapter->intr.num_intrs > 2740 VMXNET3_LINUX_MIN_MSIX_VECT 2741 ? adapter->intr.num_intrs : 2742 VMXNET3_LINUX_MIN_MSIX_VECT); 2743 2744 for (vector = 0; vector < adapter->intr.num_intrs; vector++) 2745 adapter->intr.msix_entries[vector].entry = vector; 2746 2747 err = vmxnet3_acquire_msix_vectors(adapter, 2748 adapter->intr.num_intrs); 2749 /* If we cannot allocate one MSIx vector per queue 2750 * then limit the number of rx queues to 1 2751 */ 2752 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { 2753 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 2754 || adapter->num_rx_queues != 2) { 2755 adapter->share_intr = VMXNET3_INTR_TXSHARE; 2756 printk(KERN_ERR "Number of rx queues : 1\n"); 2757 adapter->num_rx_queues = 1; 2758 adapter->intr.num_intrs = 2759 VMXNET3_LINUX_MIN_MSIX_VECT; 2760 } 2761 return; 2762 } 2763 if (!err) 2764 return; 2765 2766 /* If we cannot allocate MSIx vectors use only one rx queue */ 2767 printk(KERN_INFO "Failed to enable MSI-X for %s, error %d." 2768 "#rx queues : 1, try MSI\n", adapter->netdev->name, err); 2769 2770 adapter->intr.type = VMXNET3_IT_MSI; 2771 } 2772 2773 if (adapter->intr.type == VMXNET3_IT_MSI) { 2774 int err; 2775 err = pci_enable_msi(adapter->pdev); 2776 if (!err) { 2777 adapter->num_rx_queues = 1; 2778 adapter->intr.num_intrs = 1; 2779 return; 2780 } 2781 } 2782#endif /* CONFIG_PCI_MSI */ 2783 2784 adapter->num_rx_queues = 1; 2785 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n"); 2786 adapter->intr.type = VMXNET3_IT_INTX; 2787 2788 /* INT-X related setting */ 2789 adapter->intr.num_intrs = 1; 2790} 2791 2792 2793static void 2794vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) 2795{ 2796 if (adapter->intr.type == VMXNET3_IT_MSIX) 2797 pci_disable_msix(adapter->pdev); 2798 else if (adapter->intr.type == VMXNET3_IT_MSI) 2799 pci_disable_msi(adapter->pdev); 2800 else 2801 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); 2802} 2803 2804 2805static void 2806vmxnet3_tx_timeout(struct net_device *netdev) 2807{ 2808 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2809 adapter->tx_timeout_count++; 2810 2811 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); 2812 schedule_work(&adapter->work); 2813 netif_wake_queue(adapter->netdev); 2814} 2815 2816 2817static void 2818vmxnet3_reset_work(struct work_struct *data) 2819{ 2820 struct vmxnet3_adapter *adapter; 2821 2822 adapter = container_of(data, struct vmxnet3_adapter, work); 2823 2824 /* if another thread is resetting the device, no need to proceed */ 2825 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2826 return; 2827 2828 /* if the device is closed, we must leave it alone */ 2829 rtnl_lock(); 2830 if (netif_running(adapter->netdev)) { 2831 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); 2832 vmxnet3_quiesce_dev(adapter); 2833 vmxnet3_reset_dev(adapter); 2834 vmxnet3_activate_dev(adapter); 2835 } else { 2836 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); 2837 } 2838 rtnl_unlock(); 2839 2840 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2841} 2842 2843 2844static int __devinit 2845vmxnet3_probe_device(struct pci_dev *pdev, 2846 const struct pci_device_id *id) 2847{ 2848 static const struct net_device_ops vmxnet3_netdev_ops = { 2849 .ndo_open = vmxnet3_open, 2850 .ndo_stop = vmxnet3_close, 2851 .ndo_start_xmit = vmxnet3_xmit_frame, 2852 .ndo_set_mac_address = vmxnet3_set_mac_addr, 2853 .ndo_change_mtu = vmxnet3_change_mtu, 2854 .ndo_get_stats = vmxnet3_get_stats, 2855 .ndo_tx_timeout = vmxnet3_tx_timeout, 2856 .ndo_set_multicast_list = vmxnet3_set_mc, 2857 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register, 2858 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid, 2859 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid, 2860#ifdef CONFIG_NET_POLL_CONTROLLER 2861 .ndo_poll_controller = vmxnet3_netpoll, 2862#endif 2863 }; 2864 int err; 2865 bool dma64 = false; /* stupid gcc */ 2866 u32 ver; 2867 struct net_device *netdev; 2868 struct vmxnet3_adapter *adapter; 2869 u8 mac[ETH_ALEN]; 2870 int size; 2871 int num_tx_queues; 2872 int num_rx_queues; 2873 2874#ifdef VMXNET3_RSS 2875 if (enable_mq) 2876 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, 2877 (int)num_online_cpus()); 2878 else 2879#endif 2880 num_rx_queues = 1; 2881 2882 if (enable_mq) 2883 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES, 2884 (int)num_online_cpus()); 2885 else 2886 num_tx_queues = 1; 2887 2888 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter), 2889 max(num_tx_queues, num_rx_queues)); 2890 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n", 2891 num_tx_queues, num_rx_queues); 2892 2893 if (!netdev) { 2894 printk(KERN_ERR "Failed to alloc ethernet device for adapter " 2895 "%s\n", pci_name(pdev)); 2896 return -ENOMEM; 2897 } 2898 2899 pci_set_drvdata(pdev, netdev); 2900 adapter = netdev_priv(netdev); 2901 adapter->netdev = netdev; 2902 adapter->pdev = pdev; 2903 2904 adapter->shared = pci_alloc_consistent(adapter->pdev, 2905 sizeof(struct Vmxnet3_DriverShared), 2906 &adapter->shared_pa); 2907 if (!adapter->shared) { 2908 printk(KERN_ERR "Failed to allocate memory for %s\n", 2909 pci_name(pdev)); 2910 err = -ENOMEM; 2911 goto err_alloc_shared; 2912 } 2913 2914 adapter->num_rx_queues = num_rx_queues; 2915 adapter->num_tx_queues = num_tx_queues; 2916 2917 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 2918 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 2919 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, 2920 &adapter->queue_desc_pa); 2921 2922 if (!adapter->tqd_start) { 2923 printk(KERN_ERR "Failed to allocate memory for %s\n", 2924 pci_name(pdev)); 2925 err = -ENOMEM; 2926 goto err_alloc_queue_desc; 2927 } 2928 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + 2929 adapter->num_tx_queues); 2930 2931 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 2932 if (adapter->pm_conf == NULL) { 2933 printk(KERN_ERR "Failed to allocate memory for %s\n", 2934 pci_name(pdev)); 2935 err = -ENOMEM; 2936 goto err_alloc_pm; 2937 } 2938 2939#ifdef VMXNET3_RSS 2940 2941 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); 2942 if (adapter->rss_conf == NULL) { 2943 printk(KERN_ERR "Failed to allocate memory for %s\n", 2944 pci_name(pdev)); 2945 err = -ENOMEM; 2946 goto err_alloc_rss; 2947 } 2948#endif /* VMXNET3_RSS */ 2949 2950 err = vmxnet3_alloc_pci_resources(adapter, &dma64); 2951 if (err < 0) 2952 goto err_alloc_pci; 2953 2954 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); 2955 if (ver & 1) { 2956 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); 2957 } else { 2958 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" 2959 " %s\n", ver, pci_name(pdev)); 2960 err = -EBUSY; 2961 goto err_ver; 2962 } 2963 2964 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); 2965 if (ver & 1) { 2966 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); 2967 } else { 2968 printk(KERN_ERR "Incompatible upt version (0x%x) for " 2969 "adapter %s\n", ver, pci_name(pdev)); 2970 err = -EBUSY; 2971 goto err_ver; 2972 } 2973 2974 vmxnet3_declare_features(adapter, dma64); 2975 2976 adapter->dev_number = atomic_read(&devices_found); 2977 2978 adapter->share_intr = irq_share_mode; 2979 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE && 2980 adapter->num_tx_queues != adapter->num_rx_queues) 2981 adapter->share_intr = VMXNET3_INTR_DONTSHARE; 2982 2983 vmxnet3_alloc_intr_resources(adapter); 2984 2985#ifdef VMXNET3_RSS 2986 if (adapter->num_rx_queues > 1 && 2987 adapter->intr.type == VMXNET3_IT_MSIX) { 2988 adapter->rss = true; 2989 printk(KERN_INFO "RSS is enabled.\n"); 2990 } else { 2991 adapter->rss = false; 2992 } 2993#endif 2994 2995 vmxnet3_read_mac_addr(adapter, mac); 2996 memcpy(netdev->dev_addr, mac, netdev->addr_len); 2997 2998 netdev->netdev_ops = &vmxnet3_netdev_ops; 2999 vmxnet3_set_ethtool_ops(netdev); 3000 netdev->watchdog_timeo = 5 * HZ; 3001 3002 INIT_WORK(&adapter->work, vmxnet3_reset_work); 3003 3004 if (adapter->intr.type == VMXNET3_IT_MSIX) { 3005 int i; 3006 for (i = 0; i < adapter->num_rx_queues; i++) { 3007 netif_napi_add(adapter->netdev, 3008 &adapter->rx_queue[i].napi, 3009 vmxnet3_poll_rx_only, 64); 3010 } 3011 } else { 3012 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi, 3013 vmxnet3_poll, 64); 3014 } 3015 3016 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 3017 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); 3018 3019 SET_NETDEV_DEV(netdev, &pdev->dev); 3020 err = register_netdev(netdev); 3021 3022 if (err) { 3023 printk(KERN_ERR "Failed to register adapter %s\n", 3024 pci_name(pdev)); 3025 goto err_register; 3026 } 3027 3028 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 3029 vmxnet3_check_link(adapter, false); 3030 atomic_inc(&devices_found); 3031 return 0; 3032 3033err_register: 3034 vmxnet3_free_intr_resources(adapter); 3035err_ver: 3036 vmxnet3_free_pci_resources(adapter); 3037err_alloc_pci: 3038#ifdef VMXNET3_RSS 3039 kfree(adapter->rss_conf); 3040err_alloc_rss: 3041#endif 3042 kfree(adapter->pm_conf); 3043err_alloc_pm: 3044 pci_free_consistent(adapter->pdev, size, adapter->tqd_start, 3045 adapter->queue_desc_pa); 3046err_alloc_queue_desc: 3047 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3048 adapter->shared, adapter->shared_pa); 3049err_alloc_shared: 3050 pci_set_drvdata(pdev, NULL); 3051 free_netdev(netdev); 3052 return err; 3053} 3054 3055 3056static void __devexit 3057vmxnet3_remove_device(struct pci_dev *pdev) 3058{ 3059 struct net_device *netdev = pci_get_drvdata(pdev); 3060 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3061 int size = 0; 3062 int num_rx_queues; 3063 3064#ifdef VMXNET3_RSS 3065 if (enable_mq) 3066 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, 3067 (int)num_online_cpus()); 3068 else 3069#endif 3070 num_rx_queues = 1; 3071 3072 cancel_work_sync(&adapter->work); 3073 3074 unregister_netdev(netdev); 3075 3076 vmxnet3_free_intr_resources(adapter); 3077 vmxnet3_free_pci_resources(adapter); 3078#ifdef VMXNET3_RSS 3079 kfree(adapter->rss_conf); 3080#endif 3081 kfree(adapter->pm_conf); 3082 3083 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 3084 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; 3085 pci_free_consistent(adapter->pdev, size, adapter->tqd_start, 3086 adapter->queue_desc_pa); 3087 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3088 adapter->shared, adapter->shared_pa); 3089 free_netdev(netdev); 3090} 3091 3092 3093#ifdef CONFIG_PM 3094 3095static int 3096vmxnet3_suspend(struct device *device) 3097{ 3098 struct pci_dev *pdev = to_pci_dev(device); 3099 struct net_device *netdev = pci_get_drvdata(pdev); 3100 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3101 struct Vmxnet3_PMConf *pmConf; 3102 struct ethhdr *ehdr; 3103 struct arphdr *ahdr; 3104 u8 *arpreq; 3105 struct in_device *in_dev; 3106 struct in_ifaddr *ifa; 3107 int i = 0; 3108 3109 if (!netif_running(netdev)) 3110 return 0; 3111 3112 vmxnet3_disable_all_intrs(adapter); 3113 vmxnet3_free_irqs(adapter); 3114 vmxnet3_free_intr_resources(adapter); 3115 3116 netif_device_detach(netdev); 3117 netif_tx_stop_all_queues(netdev); 3118 3119 /* Create wake-up filters. */ 3120 pmConf = adapter->pm_conf; 3121 memset(pmConf, 0, sizeof(*pmConf)); 3122 3123 if (adapter->wol & WAKE_UCAST) { 3124 pmConf->filters[i].patternSize = ETH_ALEN; 3125 pmConf->filters[i].maskSize = 1; 3126 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 3127 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 3128 3129 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 3130 i++; 3131 } 3132 3133 if (adapter->wol & WAKE_ARP) { 3134 in_dev = in_dev_get(netdev); 3135 if (!in_dev) 3136 goto skip_arp; 3137 3138 ifa = (struct in_ifaddr *)in_dev->ifa_list; 3139 if (!ifa) 3140 goto skip_arp; 3141 3142 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ 3143 sizeof(struct arphdr) + /* ARP header */ 3144 2 * ETH_ALEN + /* 2 Ethernet addresses*/ 3145 2 * sizeof(u32); /*2 IPv4 addresses */ 3146 pmConf->filters[i].maskSize = 3147 (pmConf->filters[i].patternSize - 1) / 8 + 1; 3148 3149 /* ETH_P_ARP in Ethernet header. */ 3150 ehdr = (struct ethhdr *)pmConf->filters[i].pattern; 3151 ehdr->h_proto = htons(ETH_P_ARP); 3152 3153 /* ARPOP_REQUEST in ARP header. */ 3154 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; 3155 ahdr->ar_op = htons(ARPOP_REQUEST); 3156 arpreq = (u8 *)(ahdr + 1); 3157 3158 /* The Unicast IPv4 address in 'tip' field. */ 3159 arpreq += 2 * ETH_ALEN + sizeof(u32); 3160 *(u32 *)arpreq = ifa->ifa_address; 3161 3162 /* The mask for the relevant bits. */ 3163 pmConf->filters[i].mask[0] = 0x00; 3164 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ 3165 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ 3166 pmConf->filters[i].mask[3] = 0x00; 3167 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ 3168 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 3169 in_dev_put(in_dev); 3170 3171 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 3172 i++; 3173 } 3174 3175skip_arp: 3176 if (adapter->wol & WAKE_MAGIC) 3177 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 3178 3179 pmConf->numFilters = i; 3180 3181 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3182 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3183 *pmConf)); 3184 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3185 pmConf)); 3186 3187 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3188 VMXNET3_CMD_UPDATE_PMCFG); 3189 3190 pci_save_state(pdev); 3191 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 3192 adapter->wol); 3193 pci_disable_device(pdev); 3194 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND)); 3195 3196 return 0; 3197} 3198 3199 3200static int 3201vmxnet3_resume(struct device *device) 3202{ 3203 int err; 3204 struct pci_dev *pdev = to_pci_dev(device); 3205 struct net_device *netdev = pci_get_drvdata(pdev); 3206 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3207 struct Vmxnet3_PMConf *pmConf; 3208 3209 if (!netif_running(netdev)) 3210 return 0; 3211 3212 /* Destroy wake-up filters. */ 3213 pmConf = adapter->pm_conf; 3214 memset(pmConf, 0, sizeof(*pmConf)); 3215 3216 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3217 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3218 *pmConf)); 3219 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3220 pmConf)); 3221 3222 netif_device_attach(netdev); 3223 pci_set_power_state(pdev, PCI_D0); 3224 pci_restore_state(pdev); 3225 err = pci_enable_device_mem(pdev); 3226 if (err != 0) 3227 return err; 3228 3229 pci_enable_wake(pdev, PCI_D0, 0); 3230 3231 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3232 VMXNET3_CMD_UPDATE_PMCFG); 3233 vmxnet3_alloc_intr_resources(adapter); 3234 vmxnet3_request_irqs(adapter); 3235 vmxnet3_enable_all_intrs(adapter); 3236 3237 return 0; 3238} 3239 3240static const struct dev_pm_ops vmxnet3_pm_ops = { 3241 .suspend = vmxnet3_suspend, 3242 .resume = vmxnet3_resume, 3243}; 3244#endif 3245 3246static struct pci_driver vmxnet3_driver = { 3247 .name = vmxnet3_driver_name, 3248 .id_table = vmxnet3_pciid_table, 3249 .probe = vmxnet3_probe_device, 3250 .remove = __devexit_p(vmxnet3_remove_device), 3251#ifdef CONFIG_PM 3252 .driver.pm = &vmxnet3_pm_ops, 3253#endif 3254}; 3255 3256 3257static int __init 3258vmxnet3_init_module(void) 3259{ 3260 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, 3261 VMXNET3_DRIVER_VERSION_REPORT); 3262 return pci_register_driver(&vmxnet3_driver); 3263} 3264 3265module_init(vmxnet3_init_module); 3266 3267 3268static void 3269vmxnet3_exit_module(void) 3270{ 3271 pci_unregister_driver(&vmxnet3_driver); 3272} 3273 3274module_exit(vmxnet3_exit_module); 3275 3276MODULE_AUTHOR("VMware, Inc."); 3277MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC); 3278MODULE_LICENSE("GPL v2"); 3279MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING); 3280