vmxnet3_drv.c revision 8f7e524ce33ca81b663711404709396165da3cbd
1/* 2 * Linux driver for VMware's vmxnet3 ethernet NIC. 3 * 4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; version 2 of the License and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more 14 * details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * The full GNU General Public License is included in this distribution in 21 * the file called "COPYING". 22 * 23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> 24 * 25 */ 26 27#include "vmxnet3_int.h" 28 29char vmxnet3_driver_name[] = "vmxnet3"; 30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 31 32 33/* 34 * PCI Device ID Table 35 * Last entry must be all 0s 36 */ 37static const struct pci_device_id vmxnet3_pciid_table[] = { 38 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, 39 {0} 40}; 41 42MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); 43 44static atomic_t devices_found; 45 46 47/* 48 * Enable/Disable the given intr 49 */ 50static void 51vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 52{ 53 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); 54} 55 56 57static void 58vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) 59{ 60 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); 61} 62 63 64/* 65 * Enable/Disable all intrs used by the device 66 */ 67static void 68vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) 69{ 70 int i; 71 72 for (i = 0; i < adapter->intr.num_intrs; i++) 73 vmxnet3_enable_intr(adapter, i); 74} 75 76 77static void 78vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) 79{ 80 int i; 81 82 for (i = 0; i < adapter->intr.num_intrs; i++) 83 vmxnet3_disable_intr(adapter, i); 84} 85 86 87static void 88vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) 89{ 90 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); 91} 92 93 94static bool 95vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 96{ 97 return netif_queue_stopped(adapter->netdev); 98} 99 100 101static void 102vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 103{ 104 tq->stopped = false; 105 netif_start_queue(adapter->netdev); 106} 107 108 109static void 110vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 111{ 112 tq->stopped = false; 113 netif_wake_queue(adapter->netdev); 114} 115 116 117static void 118vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 119{ 120 tq->stopped = true; 121 tq->num_stop++; 122 netif_stop_queue(adapter->netdev); 123} 124 125 126/* 127 * Check the link state. This may start or stop the tx queue. 128 */ 129static void 130vmxnet3_check_link(struct vmxnet3_adapter *adapter) 131{ 132 u32 ret; 133 134 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 135 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 136 adapter->link_speed = ret >> 16; 137 if (ret & 1) { /* Link is up. */ 138 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 139 adapter->netdev->name, adapter->link_speed); 140 if (!netif_carrier_ok(adapter->netdev)) 141 netif_carrier_on(adapter->netdev); 142 143 vmxnet3_tq_start(&adapter->tx_queue, adapter); 144 } else { 145 printk(KERN_INFO "%s: NIC Link is Down\n", 146 adapter->netdev->name); 147 if (netif_carrier_ok(adapter->netdev)) 148 netif_carrier_off(adapter->netdev); 149 150 vmxnet3_tq_stop(&adapter->tx_queue, adapter); 151 } 152} 153 154 155static void 156vmxnet3_process_events(struct vmxnet3_adapter *adapter) 157{ 158 u32 events = adapter->shared->ecr; 159 if (!events) 160 return; 161 162 vmxnet3_ack_events(adapter, events); 163 164 /* Check if link state has changed */ 165 if (events & VMXNET3_ECR_LINK) 166 vmxnet3_check_link(adapter); 167 168 /* Check if there is an error on xmit/recv queues */ 169 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 170 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 171 VMXNET3_CMD_GET_QUEUE_STATUS); 172 173 if (adapter->tqd_start->status.stopped) { 174 printk(KERN_ERR "%s: tq error 0x%x\n", 175 adapter->netdev->name, 176 adapter->tqd_start->status.error); 177 } 178 if (adapter->rqd_start->status.stopped) { 179 printk(KERN_ERR "%s: rq error 0x%x\n", 180 adapter->netdev->name, 181 adapter->rqd_start->status.error); 182 } 183 184 schedule_work(&adapter->work); 185 } 186} 187 188 189static void 190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 191 struct pci_dev *pdev) 192{ 193 if (tbi->map_type == VMXNET3_MAP_SINGLE) 194 pci_unmap_single(pdev, tbi->dma_addr, tbi->len, 195 PCI_DMA_TODEVICE); 196 else if (tbi->map_type == VMXNET3_MAP_PAGE) 197 pci_unmap_page(pdev, tbi->dma_addr, tbi->len, 198 PCI_DMA_TODEVICE); 199 else 200 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); 201 202 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ 203} 204 205 206static int 207vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, 208 struct pci_dev *pdev, struct vmxnet3_adapter *adapter) 209{ 210 struct sk_buff *skb; 211 int entries = 0; 212 213 /* no out of order completion */ 214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); 216 217 skb = tq->buf_info[eop_idx].skb; 218 BUG_ON(skb == NULL); 219 tq->buf_info[eop_idx].skb = NULL; 220 221 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); 222 223 while (tq->tx_ring.next2comp != eop_idx) { 224 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, 225 pdev); 226 227 /* update next2comp w/o tx_lock. Since we are marking more, 228 * instead of less, tx ring entries avail, the worst case is 229 * that the tx routine incorrectly re-queues a pkt due to 230 * insufficient tx ring entries. 231 */ 232 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 233 entries++; 234 } 235 236 dev_kfree_skb_any(skb); 237 return entries; 238} 239 240 241static int 242vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, 243 struct vmxnet3_adapter *adapter) 244{ 245 int completed = 0; 246 union Vmxnet3_GenericDesc *gdesc; 247 248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 249 while (gdesc->tcd.gen == tq->comp_ring.gen) { 250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, 251 adapter->pdev, adapter); 252 253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 255 } 256 257 if (completed) { 258 spin_lock(&tq->tx_lock); 259 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && 260 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > 261 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && 262 netif_carrier_ok(adapter->netdev))) { 263 vmxnet3_tq_wake(tq, adapter); 264 } 265 spin_unlock(&tq->tx_lock); 266 } 267 return completed; 268} 269 270 271static void 272vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, 273 struct vmxnet3_adapter *adapter) 274{ 275 int i; 276 277 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { 278 struct vmxnet3_tx_buf_info *tbi; 279 union Vmxnet3_GenericDesc *gdesc; 280 281 tbi = tq->buf_info + tq->tx_ring.next2comp; 282 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp; 283 284 vmxnet3_unmap_tx_buf(tbi, adapter->pdev); 285 if (tbi->skb) { 286 dev_kfree_skb_any(tbi->skb); 287 tbi->skb = NULL; 288 } 289 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); 290 } 291 292 /* sanity check, verify all buffers are indeed unmapped and freed */ 293 for (i = 0; i < tq->tx_ring.size; i++) { 294 BUG_ON(tq->buf_info[i].skb != NULL || 295 tq->buf_info[i].map_type != VMXNET3_MAP_NONE); 296 } 297 298 tq->tx_ring.gen = VMXNET3_INIT_GEN; 299 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 300 301 tq->comp_ring.gen = VMXNET3_INIT_GEN; 302 tq->comp_ring.next2proc = 0; 303} 304 305 306void 307vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 308 struct vmxnet3_adapter *adapter) 309{ 310 if (tq->tx_ring.base) { 311 pci_free_consistent(adapter->pdev, tq->tx_ring.size * 312 sizeof(struct Vmxnet3_TxDesc), 313 tq->tx_ring.base, tq->tx_ring.basePA); 314 tq->tx_ring.base = NULL; 315 } 316 if (tq->data_ring.base) { 317 pci_free_consistent(adapter->pdev, tq->data_ring.size * 318 sizeof(struct Vmxnet3_TxDataDesc), 319 tq->data_ring.base, tq->data_ring.basePA); 320 tq->data_ring.base = NULL; 321 } 322 if (tq->comp_ring.base) { 323 pci_free_consistent(adapter->pdev, tq->comp_ring.size * 324 sizeof(struct Vmxnet3_TxCompDesc), 325 tq->comp_ring.base, tq->comp_ring.basePA); 326 tq->comp_ring.base = NULL; 327 } 328 kfree(tq->buf_info); 329 tq->buf_info = NULL; 330} 331 332 333static void 334vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, 335 struct vmxnet3_adapter *adapter) 336{ 337 int i; 338 339 /* reset the tx ring contents to 0 and reset the tx ring states */ 340 memset(tq->tx_ring.base, 0, tq->tx_ring.size * 341 sizeof(struct Vmxnet3_TxDesc)); 342 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 343 tq->tx_ring.gen = VMXNET3_INIT_GEN; 344 345 memset(tq->data_ring.base, 0, tq->data_ring.size * 346 sizeof(struct Vmxnet3_TxDataDesc)); 347 348 /* reset the tx comp ring contents to 0 and reset comp ring states */ 349 memset(tq->comp_ring.base, 0, tq->comp_ring.size * 350 sizeof(struct Vmxnet3_TxCompDesc)); 351 tq->comp_ring.next2proc = 0; 352 tq->comp_ring.gen = VMXNET3_INIT_GEN; 353 354 /* reset the bookkeeping data */ 355 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); 356 for (i = 0; i < tq->tx_ring.size; i++) 357 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; 358 359 /* stats are not reset */ 360} 361 362 363static int 364vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, 365 struct vmxnet3_adapter *adapter) 366{ 367 BUG_ON(tq->tx_ring.base || tq->data_ring.base || 368 tq->comp_ring.base || tq->buf_info); 369 370 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size 371 * sizeof(struct Vmxnet3_TxDesc), 372 &tq->tx_ring.basePA); 373 if (!tq->tx_ring.base) { 374 printk(KERN_ERR "%s: failed to allocate tx ring\n", 375 adapter->netdev->name); 376 goto err; 377 } 378 379 tq->data_ring.base = pci_alloc_consistent(adapter->pdev, 380 tq->data_ring.size * 381 sizeof(struct Vmxnet3_TxDataDesc), 382 &tq->data_ring.basePA); 383 if (!tq->data_ring.base) { 384 printk(KERN_ERR "%s: failed to allocate data ring\n", 385 adapter->netdev->name); 386 goto err; 387 } 388 389 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, 390 tq->comp_ring.size * 391 sizeof(struct Vmxnet3_TxCompDesc), 392 &tq->comp_ring.basePA); 393 if (!tq->comp_ring.base) { 394 printk(KERN_ERR "%s: failed to allocate tx comp ring\n", 395 adapter->netdev->name); 396 goto err; 397 } 398 399 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), 400 GFP_KERNEL); 401 if (!tq->buf_info) { 402 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n", 403 adapter->netdev->name); 404 goto err; 405 } 406 407 return 0; 408 409err: 410 vmxnet3_tq_destroy(tq, adapter); 411 return -ENOMEM; 412} 413 414 415/* 416 * starting from ring->next2fill, allocate rx buffers for the given ring 417 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers 418 * are allocated or allocation fails 419 */ 420 421static int 422vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, 423 int num_to_alloc, struct vmxnet3_adapter *adapter) 424{ 425 int num_allocated = 0; 426 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; 427 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; 428 u32 val; 429 430 while (num_allocated < num_to_alloc) { 431 struct vmxnet3_rx_buf_info *rbi; 432 union Vmxnet3_GenericDesc *gd; 433 434 rbi = rbi_base + ring->next2fill; 435 gd = ring->base + ring->next2fill; 436 437 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { 438 if (rbi->skb == NULL) { 439 rbi->skb = dev_alloc_skb(rbi->len + 440 NET_IP_ALIGN); 441 if (unlikely(rbi->skb == NULL)) { 442 rq->stats.rx_buf_alloc_failure++; 443 break; 444 } 445 rbi->skb->dev = adapter->netdev; 446 447 skb_reserve(rbi->skb, NET_IP_ALIGN); 448 rbi->dma_addr = pci_map_single(adapter->pdev, 449 rbi->skb->data, rbi->len, 450 PCI_DMA_FROMDEVICE); 451 } else { 452 /* rx buffer skipped by the device */ 453 } 454 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; 455 } else { 456 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || 457 rbi->len != PAGE_SIZE); 458 459 if (rbi->page == NULL) { 460 rbi->page = alloc_page(GFP_ATOMIC); 461 if (unlikely(rbi->page == NULL)) { 462 rq->stats.rx_buf_alloc_failure++; 463 break; 464 } 465 rbi->dma_addr = pci_map_page(adapter->pdev, 466 rbi->page, 0, PAGE_SIZE, 467 PCI_DMA_FROMDEVICE); 468 } else { 469 /* rx buffers skipped by the device */ 470 } 471 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; 472 } 473 474 BUG_ON(rbi->dma_addr == 0); 475 gd->rxd.addr = rbi->dma_addr; 476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | 477 rbi->len; 478 479 num_allocated++; 480 vmxnet3_cmd_ring_adv_next2fill(ring); 481 } 482 rq->uncommitted[ring_idx] += num_allocated; 483 484 dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 485 "%u, uncommited %u\n", num_allocated, ring->next2fill, 486 ring->next2comp, rq->uncommitted[ring_idx]); 487 488 /* so that the device can distinguish a full ring and an empty ring */ 489 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); 490 491 return num_allocated; 492} 493 494 495static void 496vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, 497 struct vmxnet3_rx_buf_info *rbi) 498{ 499 struct skb_frag_struct *frag = skb_shinfo(skb)->frags + 500 skb_shinfo(skb)->nr_frags; 501 502 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 503 504 frag->page = rbi->page; 505 frag->page_offset = 0; 506 frag->size = rcd->len; 507 skb->data_len += frag->size; 508 skb_shinfo(skb)->nr_frags++; 509} 510 511 512static void 513vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, 514 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, 515 struct vmxnet3_adapter *adapter) 516{ 517 u32 dw2, len; 518 unsigned long buf_offset; 519 int i; 520 union Vmxnet3_GenericDesc *gdesc; 521 struct vmxnet3_tx_buf_info *tbi = NULL; 522 523 BUG_ON(ctx->copy_size > skb_headlen(skb)); 524 525 /* use the previous gen bit for the SOP desc */ 526 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; 527 528 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; 529 gdesc = ctx->sop_txd; /* both loops below can be skipped */ 530 531 /* no need to map the buffer if headers are copied */ 532 if (ctx->copy_size) { 533 ctx->sop_txd->txd.addr = tq->data_ring.basePA + 534 tq->tx_ring.next2fill * 535 sizeof(struct Vmxnet3_TxDataDesc); 536 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; 537 ctx->sop_txd->dword[3] = 0; 538 539 tbi = tq->buf_info + tq->tx_ring.next2fill; 540 tbi->map_type = VMXNET3_MAP_NONE; 541 542 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 543 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 544 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 545 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 546 547 /* use the right gen for non-SOP desc */ 548 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 549 } 550 551 /* linear part can use multiple tx desc if it's big */ 552 len = skb_headlen(skb) - ctx->copy_size; 553 buf_offset = ctx->copy_size; 554 while (len) { 555 u32 buf_size; 556 557 buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ? 558 VMXNET3_MAX_TX_BUF_SIZE : len; 559 560 tbi = tq->buf_info + tq->tx_ring.next2fill; 561 tbi->map_type = VMXNET3_MAP_SINGLE; 562 tbi->dma_addr = pci_map_single(adapter->pdev, 563 skb->data + buf_offset, buf_size, 564 PCI_DMA_TODEVICE); 565 566 tbi->len = buf_size; /* this automatically convert 2^14 to 0 */ 567 568 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 569 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 570 571 gdesc->txd.addr = tbi->dma_addr; 572 gdesc->dword[2] = dw2 | buf_size; 573 gdesc->dword[3] = 0; 574 575 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 576 tq->tx_ring.next2fill, gdesc->txd.addr, 577 gdesc->dword[2], gdesc->dword[3]); 578 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 579 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 580 581 len -= buf_size; 582 buf_offset += buf_size; 583 } 584 585 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 586 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 587 588 tbi = tq->buf_info + tq->tx_ring.next2fill; 589 tbi->map_type = VMXNET3_MAP_PAGE; 590 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page, 591 frag->page_offset, frag->size, 592 PCI_DMA_TODEVICE); 593 594 tbi->len = frag->size; 595 596 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 597 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 598 599 gdesc->txd.addr = tbi->dma_addr; 600 gdesc->dword[2] = dw2 | frag->size; 601 gdesc->dword[3] = 0; 602 603 dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n", 604 tq->tx_ring.next2fill, gdesc->txd.addr, 605 gdesc->dword[2], gdesc->dword[3]); 606 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 607 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 608 } 609 610 ctx->eop_txd = gdesc; 611 612 /* set the last buf_info for the pkt */ 613 tbi->skb = skb; 614 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; 615} 616 617 618/* 619 * parse and copy relevant protocol headers: 620 * For a tso pkt, relevant headers are L2/3/4 including options 621 * For a pkt requesting csum offloading, they are L2/3 and may include L4 622 * if it's a TCP/UDP pkt 623 * 624 * Returns: 625 * -1: error happens during parsing 626 * 0: protocol headers parsed, but too big to be copied 627 * 1: protocol headers parsed and copied 628 * 629 * Other effects: 630 * 1. related *ctx fields are updated. 631 * 2. ctx->copy_size is # of bytes copied 632 * 3. the portion copied is guaranteed to be in the linear part 633 * 634 */ 635static int 636vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 637 struct vmxnet3_tx_ctx *ctx, 638 struct vmxnet3_adapter *adapter) 639{ 640 struct Vmxnet3_TxDataDesc *tdd; 641 642 if (ctx->mss) { 643 ctx->eth_ip_hdr_size = skb_transport_offset(skb); 644 ctx->l4_hdr_size = ((struct tcphdr *) 645 skb_transport_header(skb))->doff * 4; 646 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 647 } else { 648 unsigned int pull_size; 649 650 if (skb->ip_summed == CHECKSUM_PARTIAL) { 651 ctx->eth_ip_hdr_size = skb_transport_offset(skb); 652 653 if (ctx->ipv4) { 654 struct iphdr *iph = (struct iphdr *) 655 skb_network_header(skb); 656 if (iph->protocol == IPPROTO_TCP) { 657 pull_size = ctx->eth_ip_hdr_size + 658 sizeof(struct tcphdr); 659 660 if (unlikely(!pskb_may_pull(skb, 661 pull_size))) { 662 goto err; 663 } 664 ctx->l4_hdr_size = ((struct tcphdr *) 665 skb_transport_header(skb))->doff * 4; 666 } else if (iph->protocol == IPPROTO_UDP) { 667 ctx->l4_hdr_size = 668 sizeof(struct udphdr); 669 } else { 670 ctx->l4_hdr_size = 0; 671 } 672 } else { 673 /* for simplicity, don't copy L4 headers */ 674 ctx->l4_hdr_size = 0; 675 } 676 ctx->copy_size = ctx->eth_ip_hdr_size + 677 ctx->l4_hdr_size; 678 } else { 679 ctx->eth_ip_hdr_size = 0; 680 ctx->l4_hdr_size = 0; 681 /* copy as much as allowed */ 682 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE 683 , skb_headlen(skb)); 684 } 685 686 /* make sure headers are accessible directly */ 687 if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) 688 goto err; 689 } 690 691 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { 692 tq->stats.oversized_hdr++; 693 ctx->copy_size = 0; 694 return 0; 695 } 696 697 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 698 699 memcpy(tdd->data, skb->data, ctx->copy_size); 700 dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n", 701 ctx->copy_size, tq->tx_ring.next2fill); 702 return 1; 703 704err: 705 return -1; 706} 707 708 709static void 710vmxnet3_prepare_tso(struct sk_buff *skb, 711 struct vmxnet3_tx_ctx *ctx) 712{ 713 struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); 714 if (ctx->ipv4) { 715 struct iphdr *iph = (struct iphdr *)skb_network_header(skb); 716 iph->check = 0; 717 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 718 IPPROTO_TCP, 0); 719 } else { 720 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb); 721 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, 722 IPPROTO_TCP, 0); 723 } 724} 725 726 727/* 728 * Transmits a pkt thru a given tq 729 * Returns: 730 * NETDEV_TX_OK: descriptors are setup successfully 731 * NETDEV_TX_OK: error occured, the pkt is dropped 732 * NETDEV_TX_BUSY: tx ring is full, queue is stopped 733 * 734 * Side-effects: 735 * 1. tx ring may be changed 736 * 2. tq stats may be updated accordingly 737 * 3. shared->txNumDeferred may be updated 738 */ 739 740static int 741vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 742 struct vmxnet3_adapter *adapter, struct net_device *netdev) 743{ 744 int ret; 745 u32 count; 746 unsigned long flags; 747 struct vmxnet3_tx_ctx ctx; 748 union Vmxnet3_GenericDesc *gdesc; 749 750 /* conservatively estimate # of descriptors to use */ 751 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 752 skb_shinfo(skb)->nr_frags + 1; 753 754 ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP)); 755 756 ctx.mss = skb_shinfo(skb)->gso_size; 757 if (ctx.mss) { 758 if (skb_header_cloned(skb)) { 759 if (unlikely(pskb_expand_head(skb, 0, 0, 760 GFP_ATOMIC) != 0)) { 761 tq->stats.drop_tso++; 762 goto drop_pkt; 763 } 764 tq->stats.copy_skb_header++; 765 } 766 vmxnet3_prepare_tso(skb, &ctx); 767 } else { 768 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { 769 770 /* non-tso pkts must not use more than 771 * VMXNET3_MAX_TXD_PER_PKT entries 772 */ 773 if (skb_linearize(skb) != 0) { 774 tq->stats.drop_too_many_frags++; 775 goto drop_pkt; 776 } 777 tq->stats.linearized++; 778 779 /* recalculate the # of descriptors to use */ 780 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 781 } 782 } 783 784 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); 785 if (ret >= 0) { 786 BUG_ON(ret <= 0 && ctx.copy_size != 0); 787 /* hdrs parsed, check against other limits */ 788 if (ctx.mss) { 789 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > 790 VMXNET3_MAX_TX_BUF_SIZE)) { 791 goto hdr_too_big; 792 } 793 } else { 794 if (skb->ip_summed == CHECKSUM_PARTIAL) { 795 if (unlikely(ctx.eth_ip_hdr_size + 796 skb->csum_offset > 797 VMXNET3_MAX_CSUM_OFFSET)) { 798 goto hdr_too_big; 799 } 800 } 801 } 802 } else { 803 tq->stats.drop_hdr_inspect_err++; 804 goto drop_pkt; 805 } 806 807 spin_lock_irqsave(&tq->tx_lock, flags); 808 809 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 810 tq->stats.tx_ring_full++; 811 dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u" 812 " next2fill %u\n", adapter->netdev->name, 813 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 814 815 vmxnet3_tq_stop(tq, adapter); 816 spin_unlock_irqrestore(&tq->tx_lock, flags); 817 return NETDEV_TX_BUSY; 818 } 819 820 /* fill tx descs related to addr & len */ 821 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 822 823 /* setup the EOP desc */ 824 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; 825 826 /* setup the SOP desc */ 827 gdesc = ctx.sop_txd; 828 if (ctx.mss) { 829 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 830 gdesc->txd.om = VMXNET3_OM_TSO; 831 gdesc->txd.msscof = ctx.mss; 832 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + 833 ctx.mss - 1) / ctx.mss; 834 } else { 835 if (skb->ip_summed == CHECKSUM_PARTIAL) { 836 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 837 gdesc->txd.om = VMXNET3_OM_CSUM; 838 gdesc->txd.msscof = ctx.eth_ip_hdr_size + 839 skb->csum_offset; 840 } else { 841 gdesc->txd.om = 0; 842 gdesc->txd.msscof = 0; 843 } 844 tq->shared->txNumDeferred++; 845 } 846 847 if (vlan_tx_tag_present(skb)) { 848 gdesc->txd.ti = 1; 849 gdesc->txd.tci = vlan_tx_tag_get(skb); 850 } 851 852 wmb(); 853 854 /* finally flips the GEN bit of the SOP desc */ 855 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 856 dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 857 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 858 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 859 gdesc->dword[3]); 860 861 spin_unlock_irqrestore(&tq->tx_lock, flags); 862 863 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { 864 tq->shared->txNumDeferred = 0; 865 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 866 tq->tx_ring.next2fill); 867 } 868 netdev->trans_start = jiffies; 869 870 return NETDEV_TX_OK; 871 872hdr_too_big: 873 tq->stats.drop_oversized_hdr++; 874drop_pkt: 875 tq->stats.drop_total++; 876 dev_kfree_skb(skb); 877 return NETDEV_TX_OK; 878} 879 880 881static netdev_tx_t 882vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 883{ 884 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 885 struct vmxnet3_tx_queue *tq = &adapter->tx_queue; 886 887 return vmxnet3_tq_xmit(skb, tq, adapter, netdev); 888} 889 890 891static void 892vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, 893 struct sk_buff *skb, 894 union Vmxnet3_GenericDesc *gdesc) 895{ 896 if (!gdesc->rcd.cnc && adapter->rxcsum) { 897 /* typical case: TCP/UDP over IP and both csums are correct */ 898 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == 899 VMXNET3_RCD_CSUM_OK) { 900 skb->ip_summed = CHECKSUM_UNNECESSARY; 901 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 902 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); 903 BUG_ON(gdesc->rcd.frg); 904 } else { 905 if (gdesc->rcd.csum) { 906 skb->csum = htons(gdesc->rcd.csum); 907 skb->ip_summed = CHECKSUM_PARTIAL; 908 } else { 909 skb->ip_summed = CHECKSUM_NONE; 910 } 911 } 912 } else { 913 skb->ip_summed = CHECKSUM_NONE; 914 } 915} 916 917 918static void 919vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, 920 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) 921{ 922 rq->stats.drop_err++; 923 if (!rcd->fcs) 924 rq->stats.drop_fcs++; 925 926 rq->stats.drop_total++; 927 928 /* 929 * We do not unmap and chain the rx buffer to the skb. 930 * We basically pretend this buffer is not used and will be recycled 931 * by vmxnet3_rq_alloc_rx_buf() 932 */ 933 934 /* 935 * ctx->skb may be NULL if this is the first and the only one 936 * desc for the pkt 937 */ 938 if (ctx->skb) 939 dev_kfree_skb_irq(ctx->skb); 940 941 ctx->skb = NULL; 942} 943 944 945static int 946vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, 947 struct vmxnet3_adapter *adapter, int quota) 948{ 949 static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; 950 u32 num_rxd = 0; 951 struct Vmxnet3_RxCompDesc *rcd; 952 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 953 954 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 955 while (rcd->gen == rq->comp_ring.gen) { 956 struct vmxnet3_rx_buf_info *rbi; 957 struct sk_buff *skb; 958 int num_to_alloc; 959 struct Vmxnet3_RxDesc *rxd; 960 u32 idx, ring_idx; 961 962 if (num_rxd >= quota) { 963 /* we may stop even before we see the EOP desc of 964 * the current pkt 965 */ 966 break; 967 } 968 num_rxd++; 969 970 idx = rcd->rxdIdx; 971 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 972 973 rxd = &rq->rx_ring[ring_idx].base[idx].rxd; 974 rbi = rq->buf_info[ring_idx] + idx; 975 976 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); 977 978 if (unlikely(rcd->eop && rcd->err)) { 979 vmxnet3_rx_error(rq, rcd, ctx, adapter); 980 goto rcd_done; 981 } 982 983 if (rcd->sop) { /* first buf of the pkt */ 984 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || 985 rcd->rqID != rq->qid); 986 987 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); 988 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); 989 990 if (unlikely(rcd->len == 0)) { 991 /* Pretend the rx buffer is skipped. */ 992 BUG_ON(!(rcd->sop && rcd->eop)); 993 dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n", 994 ring_idx, idx); 995 goto rcd_done; 996 } 997 998 ctx->skb = rbi->skb; 999 rbi->skb = NULL; 1000 1001 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, 1002 PCI_DMA_FROMDEVICE); 1003 1004 skb_put(ctx->skb, rcd->len); 1005 } else { 1006 BUG_ON(ctx->skb == NULL); 1007 /* non SOP buffer must be type 1 in most cases */ 1008 if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { 1009 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); 1010 1011 if (rcd->len) { 1012 pci_unmap_page(adapter->pdev, 1013 rbi->dma_addr, rbi->len, 1014 PCI_DMA_FROMDEVICE); 1015 1016 vmxnet3_append_frag(ctx->skb, rcd, rbi); 1017 rbi->page = NULL; 1018 } 1019 } else { 1020 /* 1021 * The only time a non-SOP buffer is type 0 is 1022 * when it's EOP and error flag is raised, which 1023 * has already been handled. 1024 */ 1025 BUG_ON(true); 1026 } 1027 } 1028 1029 skb = ctx->skb; 1030 if (rcd->eop) { 1031 skb->len += skb->data_len; 1032 skb->truesize += skb->data_len; 1033 1034 vmxnet3_rx_csum(adapter, skb, 1035 (union Vmxnet3_GenericDesc *)rcd); 1036 skb->protocol = eth_type_trans(skb, adapter->netdev); 1037 1038 if (unlikely(adapter->vlan_grp && rcd->ts)) { 1039 vlan_hwaccel_receive_skb(skb, 1040 adapter->vlan_grp, rcd->tci); 1041 } else { 1042 netif_receive_skb(skb); 1043 } 1044 1045 adapter->netdev->last_rx = jiffies; 1046 ctx->skb = NULL; 1047 } 1048 1049rcd_done: 1050 /* device may skip some rx descs */ 1051 rq->rx_ring[ring_idx].next2comp = idx; 1052 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, 1053 rq->rx_ring[ring_idx].size); 1054 1055 /* refill rx buffers frequently to avoid starving the h/w */ 1056 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + 1057 ring_idx); 1058 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, 1059 ring_idx, adapter))) { 1060 vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, 1061 adapter); 1062 1063 /* if needed, update the register */ 1064 if (unlikely(rq->shared->updateRxProd)) { 1065 VMXNET3_WRITE_BAR0_REG(adapter, 1066 rxprod_reg[ring_idx] + rq->qid * 8, 1067 rq->rx_ring[ring_idx].next2fill); 1068 rq->uncommitted[ring_idx] = 0; 1069 } 1070 } 1071 1072 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1073 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1074 } 1075 1076 return num_rxd; 1077} 1078 1079 1080static void 1081vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, 1082 struct vmxnet3_adapter *adapter) 1083{ 1084 u32 i, ring_idx; 1085 struct Vmxnet3_RxDesc *rxd; 1086 1087 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1088 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1089 rxd = &rq->rx_ring[ring_idx].base[i].rxd; 1090 1091 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1092 rq->buf_info[ring_idx][i].skb) { 1093 pci_unmap_single(adapter->pdev, rxd->addr, 1094 rxd->len, PCI_DMA_FROMDEVICE); 1095 dev_kfree_skb(rq->buf_info[ring_idx][i].skb); 1096 rq->buf_info[ring_idx][i].skb = NULL; 1097 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && 1098 rq->buf_info[ring_idx][i].page) { 1099 pci_unmap_page(adapter->pdev, rxd->addr, 1100 rxd->len, PCI_DMA_FROMDEVICE); 1101 put_page(rq->buf_info[ring_idx][i].page); 1102 rq->buf_info[ring_idx][i].page = NULL; 1103 } 1104 } 1105 1106 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; 1107 rq->rx_ring[ring_idx].next2fill = 1108 rq->rx_ring[ring_idx].next2comp = 0; 1109 rq->uncommitted[ring_idx] = 0; 1110 } 1111 1112 rq->comp_ring.gen = VMXNET3_INIT_GEN; 1113 rq->comp_ring.next2proc = 0; 1114} 1115 1116 1117void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 1118 struct vmxnet3_adapter *adapter) 1119{ 1120 int i; 1121 int j; 1122 1123 /* all rx buffers must have already been freed */ 1124 for (i = 0; i < 2; i++) { 1125 if (rq->buf_info[i]) { 1126 for (j = 0; j < rq->rx_ring[i].size; j++) 1127 BUG_ON(rq->buf_info[i][j].page != NULL); 1128 } 1129 } 1130 1131 1132 kfree(rq->buf_info[0]); 1133 1134 for (i = 0; i < 2; i++) { 1135 if (rq->rx_ring[i].base) { 1136 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size 1137 * sizeof(struct Vmxnet3_RxDesc), 1138 rq->rx_ring[i].base, 1139 rq->rx_ring[i].basePA); 1140 rq->rx_ring[i].base = NULL; 1141 } 1142 rq->buf_info[i] = NULL; 1143 } 1144 1145 if (rq->comp_ring.base) { 1146 pci_free_consistent(adapter->pdev, rq->comp_ring.size * 1147 sizeof(struct Vmxnet3_RxCompDesc), 1148 rq->comp_ring.base, rq->comp_ring.basePA); 1149 rq->comp_ring.base = NULL; 1150 } 1151} 1152 1153 1154static int 1155vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, 1156 struct vmxnet3_adapter *adapter) 1157{ 1158 int i; 1159 1160 /* initialize buf_info */ 1161 for (i = 0; i < rq->rx_ring[0].size; i++) { 1162 1163 /* 1st buf for a pkt is skbuff */ 1164 if (i % adapter->rx_buf_per_pkt == 0) { 1165 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; 1166 rq->buf_info[0][i].len = adapter->skb_buf_size; 1167 } else { /* subsequent bufs for a pkt is frag */ 1168 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; 1169 rq->buf_info[0][i].len = PAGE_SIZE; 1170 } 1171 } 1172 for (i = 0; i < rq->rx_ring[1].size; i++) { 1173 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; 1174 rq->buf_info[1][i].len = PAGE_SIZE; 1175 } 1176 1177 /* reset internal state and allocate buffers for both rings */ 1178 for (i = 0; i < 2; i++) { 1179 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; 1180 rq->uncommitted[i] = 0; 1181 1182 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * 1183 sizeof(struct Vmxnet3_RxDesc)); 1184 rq->rx_ring[i].gen = VMXNET3_INIT_GEN; 1185 } 1186 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, 1187 adapter) == 0) { 1188 /* at least has 1 rx buffer for the 1st ring */ 1189 return -ENOMEM; 1190 } 1191 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); 1192 1193 /* reset the comp ring */ 1194 rq->comp_ring.next2proc = 0; 1195 memset(rq->comp_ring.base, 0, rq->comp_ring.size * 1196 sizeof(struct Vmxnet3_RxCompDesc)); 1197 rq->comp_ring.gen = VMXNET3_INIT_GEN; 1198 1199 /* reset rxctx */ 1200 rq->rx_ctx.skb = NULL; 1201 1202 /* stats are not reset */ 1203 return 0; 1204} 1205 1206 1207static int 1208vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) 1209{ 1210 int i; 1211 size_t sz; 1212 struct vmxnet3_rx_buf_info *bi; 1213 1214 for (i = 0; i < 2; i++) { 1215 1216 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); 1217 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, 1218 &rq->rx_ring[i].basePA); 1219 if (!rq->rx_ring[i].base) { 1220 printk(KERN_ERR "%s: failed to allocate rx ring %d\n", 1221 adapter->netdev->name, i); 1222 goto err; 1223 } 1224 } 1225 1226 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1227 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, 1228 &rq->comp_ring.basePA); 1229 if (!rq->comp_ring.base) { 1230 printk(KERN_ERR "%s: failed to allocate rx comp ring\n", 1231 adapter->netdev->name); 1232 goto err; 1233 } 1234 1235 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1236 rq->rx_ring[1].size); 1237 bi = kmalloc(sz, GFP_KERNEL); 1238 if (!bi) { 1239 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n", 1240 adapter->netdev->name); 1241 goto err; 1242 } 1243 memset(bi, 0, sz); 1244 rq->buf_info[0] = bi; 1245 rq->buf_info[1] = bi + rq->rx_ring[0].size; 1246 1247 return 0; 1248 1249err: 1250 vmxnet3_rq_destroy(rq, adapter); 1251 return -ENOMEM; 1252} 1253 1254 1255static int 1256vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) 1257{ 1258 if (unlikely(adapter->shared->ecr)) 1259 vmxnet3_process_events(adapter); 1260 1261 vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter); 1262 return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget); 1263} 1264 1265 1266static int 1267vmxnet3_poll(struct napi_struct *napi, int budget) 1268{ 1269 struct vmxnet3_adapter *adapter = container_of(napi, 1270 struct vmxnet3_adapter, napi); 1271 int rxd_done; 1272 1273 rxd_done = vmxnet3_do_poll(adapter, budget); 1274 1275 if (rxd_done < budget) { 1276 napi_complete(napi); 1277 vmxnet3_enable_intr(adapter, 0); 1278 } 1279 return rxd_done; 1280} 1281 1282 1283/* Interrupt handler for vmxnet3 */ 1284static irqreturn_t 1285vmxnet3_intr(int irq, void *dev_id) 1286{ 1287 struct net_device *dev = dev_id; 1288 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1289 1290 if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) { 1291 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 1292 if (unlikely(icr == 0)) 1293 /* not ours */ 1294 return IRQ_NONE; 1295 } 1296 1297 1298 /* disable intr if needed */ 1299 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1300 vmxnet3_disable_intr(adapter, 0); 1301 1302 napi_schedule(&adapter->napi); 1303 1304 return IRQ_HANDLED; 1305} 1306 1307#ifdef CONFIG_NET_POLL_CONTROLLER 1308 1309 1310/* netpoll callback. */ 1311static void 1312vmxnet3_netpoll(struct net_device *netdev) 1313{ 1314 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1315 int irq; 1316 1317#ifdef CONFIG_PCI_MSI 1318 if (adapter->intr.type == VMXNET3_IT_MSIX) 1319 irq = adapter->intr.msix_entries[0].vector; 1320 else 1321#endif 1322 irq = adapter->pdev->irq; 1323 1324 disable_irq(irq); 1325 vmxnet3_intr(irq, netdev); 1326 enable_irq(irq); 1327} 1328#endif 1329 1330static int 1331vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) 1332{ 1333 int err; 1334 1335#ifdef CONFIG_PCI_MSI 1336 if (adapter->intr.type == VMXNET3_IT_MSIX) { 1337 /* we only use 1 MSI-X vector */ 1338 err = request_irq(adapter->intr.msix_entries[0].vector, 1339 vmxnet3_intr, 0, adapter->netdev->name, 1340 adapter->netdev); 1341 } else 1342#endif 1343 if (adapter->intr.type == VMXNET3_IT_MSI) { 1344 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1345 adapter->netdev->name, adapter->netdev); 1346 } else { 1347 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1348 IRQF_SHARED, adapter->netdev->name, 1349 adapter->netdev); 1350 } 1351 1352 if (err) 1353 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" 1354 ":%d\n", adapter->netdev->name, adapter->intr.type, err); 1355 1356 1357 if (!err) { 1358 int i; 1359 /* init our intr settings */ 1360 for (i = 0; i < adapter->intr.num_intrs; i++) 1361 adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE; 1362 1363 /* next setup intr index for all intr sources */ 1364 adapter->tx_queue.comp_ring.intr_idx = 0; 1365 adapter->rx_queue.comp_ring.intr_idx = 0; 1366 adapter->intr.event_intr_idx = 0; 1367 1368 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " 1369 "allocated\n", adapter->netdev->name, adapter->intr.type, 1370 adapter->intr.mask_mode, adapter->intr.num_intrs); 1371 } 1372 1373 return err; 1374} 1375 1376 1377static void 1378vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) 1379{ 1380 BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO || 1381 adapter->intr.num_intrs <= 0); 1382 1383 switch (adapter->intr.type) { 1384#ifdef CONFIG_PCI_MSI 1385 case VMXNET3_IT_MSIX: 1386 { 1387 int i; 1388 1389 for (i = 0; i < adapter->intr.num_intrs; i++) 1390 free_irq(adapter->intr.msix_entries[i].vector, 1391 adapter->netdev); 1392 break; 1393 } 1394#endif 1395 case VMXNET3_IT_MSI: 1396 free_irq(adapter->pdev->irq, adapter->netdev); 1397 break; 1398 case VMXNET3_IT_INTX: 1399 free_irq(adapter->pdev->irq, adapter->netdev); 1400 break; 1401 default: 1402 BUG_ON(true); 1403 } 1404} 1405 1406 1407static void 1408vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 1409{ 1410 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1411 struct Vmxnet3_DriverShared *shared = adapter->shared; 1412 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1413 1414 if (grp) { 1415 /* add vlan rx stripping. */ 1416 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { 1417 int i; 1418 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1419 adapter->vlan_grp = grp; 1420 1421 /* update FEATURES to device */ 1422 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1423 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1424 VMXNET3_CMD_UPDATE_FEATURE); 1425 /* 1426 * Clear entire vfTable; then enable untagged pkts. 1427 * Note: setting one entry in vfTable to non-zero turns 1428 * on VLAN rx filtering. 1429 */ 1430 for (i = 0; i < VMXNET3_VFT_SIZE; i++) 1431 vfTable[i] = 0; 1432 1433 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1434 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1435 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1436 } else { 1437 printk(KERN_ERR "%s: vlan_rx_register when device has " 1438 "no NETIF_F_HW_VLAN_RX\n", netdev->name); 1439 } 1440 } else { 1441 /* remove vlan rx stripping. */ 1442 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1443 adapter->vlan_grp = NULL; 1444 1445 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { 1446 int i; 1447 1448 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1449 /* clear entire vfTable; this also disables 1450 * VLAN rx filtering 1451 */ 1452 vfTable[i] = 0; 1453 } 1454 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1455 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1456 1457 /* update FEATURES to device */ 1458 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1459 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1460 VMXNET3_CMD_UPDATE_FEATURE); 1461 } 1462 } 1463} 1464 1465 1466static void 1467vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) 1468{ 1469 if (adapter->vlan_grp) { 1470 u16 vid; 1471 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1472 bool activeVlan = false; 1473 1474 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1475 if (vlan_group_get_device(adapter->vlan_grp, vid)) { 1476 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1477 activeVlan = true; 1478 } 1479 } 1480 if (activeVlan) { 1481 /* continue to allow untagged pkts */ 1482 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1483 } 1484 } 1485} 1486 1487 1488static void 1489vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1490{ 1491 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1492 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1493 1494 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1495 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1496 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1497} 1498 1499 1500static void 1501vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1502{ 1503 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1504 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1505 1506 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1507 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1508 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1509} 1510 1511 1512static u8 * 1513vmxnet3_copy_mc(struct net_device *netdev) 1514{ 1515 u8 *buf = NULL; 1516 u32 sz = netdev->mc_count * ETH_ALEN; 1517 1518 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ 1519 if (sz <= 0xffff) { 1520 /* We may be called with BH disabled */ 1521 buf = kmalloc(sz, GFP_ATOMIC); 1522 if (buf) { 1523 int i; 1524 struct dev_mc_list *mc = netdev->mc_list; 1525 1526 for (i = 0; i < netdev->mc_count; i++) { 1527 BUG_ON(!mc); 1528 memcpy(buf + i * ETH_ALEN, mc->dmi_addr, 1529 ETH_ALEN); 1530 mc = mc->next; 1531 } 1532 } 1533 } 1534 return buf; 1535} 1536 1537 1538static void 1539vmxnet3_set_mc(struct net_device *netdev) 1540{ 1541 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1542 struct Vmxnet3_RxFilterConf *rxConf = 1543 &adapter->shared->devRead.rxFilterConf; 1544 u8 *new_table = NULL; 1545 u32 new_mode = VMXNET3_RXM_UCAST; 1546 1547 if (netdev->flags & IFF_PROMISC) 1548 new_mode |= VMXNET3_RXM_PROMISC; 1549 1550 if (netdev->flags & IFF_BROADCAST) 1551 new_mode |= VMXNET3_RXM_BCAST; 1552 1553 if (netdev->flags & IFF_ALLMULTI) 1554 new_mode |= VMXNET3_RXM_ALL_MULTI; 1555 else 1556 if (netdev->mc_count > 0) { 1557 new_table = vmxnet3_copy_mc(netdev); 1558 if (new_table) { 1559 new_mode |= VMXNET3_RXM_MCAST; 1560 rxConf->mfTableLen = netdev->mc_count * 1561 ETH_ALEN; 1562 rxConf->mfTablePA = virt_to_phys(new_table); 1563 } else { 1564 printk(KERN_INFO "%s: failed to copy mcast list" 1565 ", setting ALL_MULTI\n", netdev->name); 1566 new_mode |= VMXNET3_RXM_ALL_MULTI; 1567 } 1568 } 1569 1570 1571 if (!(new_mode & VMXNET3_RXM_MCAST)) { 1572 rxConf->mfTableLen = 0; 1573 rxConf->mfTablePA = 0; 1574 } 1575 1576 if (new_mode != rxConf->rxMode) { 1577 rxConf->rxMode = new_mode; 1578 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1579 VMXNET3_CMD_UPDATE_RX_MODE); 1580 } 1581 1582 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1583 VMXNET3_CMD_UPDATE_MAC_FILTERS); 1584 1585 kfree(new_table); 1586} 1587 1588 1589/* 1590 * Set up driver_shared based on settings in adapter. 1591 */ 1592 1593static void 1594vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) 1595{ 1596 struct Vmxnet3_DriverShared *shared = adapter->shared; 1597 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1598 struct Vmxnet3_TxQueueConf *tqc; 1599 struct Vmxnet3_RxQueueConf *rqc; 1600 int i; 1601 1602 memset(shared, 0, sizeof(*shared)); 1603 1604 /* driver settings */ 1605 shared->magic = VMXNET3_REV1_MAGIC; 1606 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 1607 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 1608 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 1609 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 1610 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 1611 devRead->misc.driverInfo.uptVerSpt = 1; 1612 1613 devRead->misc.ddPA = virt_to_phys(adapter); 1614 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); 1615 1616 /* set up feature flags */ 1617 if (adapter->rxcsum) 1618 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 1619 1620 if (adapter->lro) { 1621 devRead->misc.uptFeatures |= UPT1_F_LRO; 1622 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; 1623 } 1624 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) 1625 && adapter->vlan_grp) { 1626 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1627 } 1628 1629 devRead->misc.mtu = adapter->netdev->mtu; 1630 devRead->misc.queueDescPA = adapter->queue_desc_pa; 1631 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + 1632 sizeof(struct Vmxnet3_RxQueueDesc); 1633 1634 /* tx queue settings */ 1635 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 1636 1637 devRead->misc.numTxQueues = 1; 1638 tqc = &adapter->tqd_start->conf; 1639 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; 1640 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; 1641 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; 1642 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); 1643 tqc->txRingSize = adapter->tx_queue.tx_ring.size; 1644 tqc->dataRingSize = adapter->tx_queue.data_ring.size; 1645 tqc->compRingSize = adapter->tx_queue.comp_ring.size; 1646 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * 1647 tqc->txRingSize; 1648 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 1649 1650 /* rx queue settings */ 1651 devRead->misc.numRxQueues = 1; 1652 rqc = &adapter->rqd_start->conf; 1653 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; 1654 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; 1655 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; 1656 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); 1657 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; 1658 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; 1659 rqc->compRingSize = adapter->rx_queue.comp_ring.size; 1660 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * 1661 (rqc->rxRingSize[0] + rqc->rxRingSize[1]); 1662 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 1663 1664 /* intr settings */ 1665 devRead->intrConf.autoMask = adapter->intr.mask_mode == 1666 VMXNET3_IMM_AUTO; 1667 devRead->intrConf.numIntrs = adapter->intr.num_intrs; 1668 for (i = 0; i < adapter->intr.num_intrs; i++) 1669 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; 1670 1671 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; 1672 1673 /* rx filter settings */ 1674 devRead->rxFilterConf.rxMode = 0; 1675 vmxnet3_restore_vlan(adapter); 1676 /* the rest are already zeroed */ 1677} 1678 1679 1680int 1681vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 1682{ 1683 int err; 1684 u32 ret; 1685 1686 dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 1687 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, 1688 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, 1689 adapter->rx_queue.rx_ring[0].size, 1690 adapter->rx_queue.rx_ring[1].size); 1691 1692 vmxnet3_tq_init(&adapter->tx_queue, adapter); 1693 err = vmxnet3_rq_init(&adapter->rx_queue, adapter); 1694 if (err) { 1695 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", 1696 adapter->netdev->name, err); 1697 goto rq_err; 1698 } 1699 1700 err = vmxnet3_request_irqs(adapter); 1701 if (err) { 1702 printk(KERN_ERR "Failed to setup irq for %s: error %d\n", 1703 adapter->netdev->name, err); 1704 goto irq_err; 1705 } 1706 1707 vmxnet3_setup_driver_shared(adapter); 1708 1709 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 1710 VMXNET3_GET_ADDR_LO(adapter->shared_pa)); 1711 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 1712 VMXNET3_GET_ADDR_HI(adapter->shared_pa)); 1713 1714 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1715 VMXNET3_CMD_ACTIVATE_DEV); 1716 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 1717 1718 if (ret != 0) { 1719 printk(KERN_ERR "Failed to activate dev %s: error %u\n", 1720 adapter->netdev->name, ret); 1721 err = -EINVAL; 1722 goto activate_err; 1723 } 1724 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD, 1725 adapter->rx_queue.rx_ring[0].next2fill); 1726 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2, 1727 adapter->rx_queue.rx_ring[1].next2fill); 1728 1729 /* Apply the rx filter settins last. */ 1730 vmxnet3_set_mc(adapter->netdev); 1731 1732 /* 1733 * Check link state when first activating device. It will start the 1734 * tx queue if the link is up. 1735 */ 1736 vmxnet3_check_link(adapter); 1737 1738 napi_enable(&adapter->napi); 1739 vmxnet3_enable_all_intrs(adapter); 1740 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 1741 return 0; 1742 1743activate_err: 1744 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); 1745 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); 1746 vmxnet3_free_irqs(adapter); 1747irq_err: 1748rq_err: 1749 /* free up buffers we allocated */ 1750 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); 1751 return err; 1752} 1753 1754 1755void 1756vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 1757{ 1758 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 1759} 1760 1761 1762int 1763vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 1764{ 1765 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 1766 return 0; 1767 1768 1769 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1770 VMXNET3_CMD_QUIESCE_DEV); 1771 vmxnet3_disable_all_intrs(adapter); 1772 1773 napi_disable(&adapter->napi); 1774 netif_tx_disable(adapter->netdev); 1775 adapter->link_speed = 0; 1776 netif_carrier_off(adapter->netdev); 1777 1778 vmxnet3_tq_cleanup(&adapter->tx_queue, adapter); 1779 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); 1780 vmxnet3_free_irqs(adapter); 1781 return 0; 1782} 1783 1784 1785static void 1786vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 1787{ 1788 u32 tmp; 1789 1790 tmp = *(u32 *)mac; 1791 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); 1792 1793 tmp = (mac[5] << 8) | mac[4]; 1794 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); 1795} 1796 1797 1798static int 1799vmxnet3_set_mac_addr(struct net_device *netdev, void *p) 1800{ 1801 struct sockaddr *addr = p; 1802 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1803 1804 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1805 vmxnet3_write_mac_addr(adapter, addr->sa_data); 1806 1807 return 0; 1808} 1809 1810 1811/* ==================== initialization and cleanup routines ============ */ 1812 1813static int 1814vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) 1815{ 1816 int err; 1817 unsigned long mmio_start, mmio_len; 1818 struct pci_dev *pdev = adapter->pdev; 1819 1820 err = pci_enable_device(pdev); 1821 if (err) { 1822 printk(KERN_ERR "Failed to enable adapter %s: error %d\n", 1823 pci_name(pdev), err); 1824 return err; 1825 } 1826 1827 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 1828 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 1829 printk(KERN_ERR "pci_set_consistent_dma_mask failed " 1830 "for adapter %s\n", pci_name(pdev)); 1831 err = -EIO; 1832 goto err_set_mask; 1833 } 1834 *dma64 = true; 1835 } else { 1836 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 1837 printk(KERN_ERR "pci_set_dma_mask failed for adapter " 1838 "%s\n", pci_name(pdev)); 1839 err = -EIO; 1840 goto err_set_mask; 1841 } 1842 *dma64 = false; 1843 } 1844 1845 err = pci_request_selected_regions(pdev, (1 << 2) - 1, 1846 vmxnet3_driver_name); 1847 if (err) { 1848 printk(KERN_ERR "Failed to request region for adapter %s: " 1849 "error %d\n", pci_name(pdev), err); 1850 goto err_set_mask; 1851 } 1852 1853 pci_set_master(pdev); 1854 1855 mmio_start = pci_resource_start(pdev, 0); 1856 mmio_len = pci_resource_len(pdev, 0); 1857 adapter->hw_addr0 = ioremap(mmio_start, mmio_len); 1858 if (!adapter->hw_addr0) { 1859 printk(KERN_ERR "Failed to map bar0 for adapter %s\n", 1860 pci_name(pdev)); 1861 err = -EIO; 1862 goto err_ioremap; 1863 } 1864 1865 mmio_start = pci_resource_start(pdev, 1); 1866 mmio_len = pci_resource_len(pdev, 1); 1867 adapter->hw_addr1 = ioremap(mmio_start, mmio_len); 1868 if (!adapter->hw_addr1) { 1869 printk(KERN_ERR "Failed to map bar1 for adapter %s\n", 1870 pci_name(pdev)); 1871 err = -EIO; 1872 goto err_bar1; 1873 } 1874 return 0; 1875 1876err_bar1: 1877 iounmap(adapter->hw_addr0); 1878err_ioremap: 1879 pci_release_selected_regions(pdev, (1 << 2) - 1); 1880err_set_mask: 1881 pci_disable_device(pdev); 1882 return err; 1883} 1884 1885 1886static void 1887vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) 1888{ 1889 BUG_ON(!adapter->pdev); 1890 1891 iounmap(adapter->hw_addr0); 1892 iounmap(adapter->hw_addr1); 1893 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); 1894 pci_disable_device(adapter->pdev); 1895} 1896 1897 1898static void 1899vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) 1900{ 1901 size_t sz; 1902 1903 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - 1904 VMXNET3_MAX_ETH_HDR_SIZE) { 1905 adapter->skb_buf_size = adapter->netdev->mtu + 1906 VMXNET3_MAX_ETH_HDR_SIZE; 1907 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) 1908 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; 1909 1910 adapter->rx_buf_per_pkt = 1; 1911 } else { 1912 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; 1913 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + 1914 VMXNET3_MAX_ETH_HDR_SIZE; 1915 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; 1916 } 1917 1918 /* 1919 * for simplicity, force the ring0 size to be a multiple of 1920 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 1921 */ 1922 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 1923 adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size + 1924 sz - 1) / sz * sz; 1925 adapter->rx_queue.rx_ring[0].size = min_t(u32, 1926 adapter->rx_queue.rx_ring[0].size, 1927 VMXNET3_RX_RING_MAX_SIZE / sz * sz); 1928} 1929 1930 1931int 1932vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 1933 u32 rx_ring_size, u32 rx_ring2_size) 1934{ 1935 int err; 1936 1937 adapter->tx_queue.tx_ring.size = tx_ring_size; 1938 adapter->tx_queue.data_ring.size = tx_ring_size; 1939 adapter->tx_queue.comp_ring.size = tx_ring_size; 1940 adapter->tx_queue.shared = &adapter->tqd_start->ctrl; 1941 adapter->tx_queue.stopped = true; 1942 err = vmxnet3_tq_create(&adapter->tx_queue, adapter); 1943 if (err) 1944 return err; 1945 1946 adapter->rx_queue.rx_ring[0].size = rx_ring_size; 1947 adapter->rx_queue.rx_ring[1].size = rx_ring2_size; 1948 vmxnet3_adjust_rx_ring_size(adapter); 1949 adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size + 1950 adapter->rx_queue.rx_ring[1].size; 1951 adapter->rx_queue.qid = 0; 1952 adapter->rx_queue.qid2 = 1; 1953 adapter->rx_queue.shared = &adapter->rqd_start->ctrl; 1954 err = vmxnet3_rq_create(&adapter->rx_queue, adapter); 1955 if (err) 1956 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 1957 1958 return err; 1959} 1960 1961static int 1962vmxnet3_open(struct net_device *netdev) 1963{ 1964 struct vmxnet3_adapter *adapter; 1965 int err; 1966 1967 adapter = netdev_priv(netdev); 1968 1969 spin_lock_init(&adapter->tx_queue.tx_lock); 1970 1971 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, 1972 VMXNET3_DEF_RX_RING_SIZE, 1973 VMXNET3_DEF_RX_RING_SIZE); 1974 if (err) 1975 goto queue_err; 1976 1977 err = vmxnet3_activate_dev(adapter); 1978 if (err) 1979 goto activate_err; 1980 1981 return 0; 1982 1983activate_err: 1984 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 1985 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 1986queue_err: 1987 return err; 1988} 1989 1990 1991static int 1992vmxnet3_close(struct net_device *netdev) 1993{ 1994 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1995 1996 /* 1997 * Reset_work may be in the middle of resetting the device, wait for its 1998 * completion. 1999 */ 2000 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2001 msleep(1); 2002 2003 vmxnet3_quiesce_dev(adapter); 2004 2005 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2006 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2007 2008 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2009 2010 2011 return 0; 2012} 2013 2014 2015void 2016vmxnet3_force_close(struct vmxnet3_adapter *adapter) 2017{ 2018 /* 2019 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise 2020 * vmxnet3_close() will deadlock. 2021 */ 2022 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); 2023 2024 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2025 napi_enable(&adapter->napi); 2026 dev_close(adapter->netdev); 2027} 2028 2029 2030static int 2031vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) 2032{ 2033 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2034 int err = 0; 2035 2036 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) 2037 return -EINVAL; 2038 2039 if (new_mtu > 1500 && !adapter->jumbo_frame) 2040 return -EINVAL; 2041 2042 netdev->mtu = new_mtu; 2043 2044 /* 2045 * Reset_work may be in the middle of resetting the device, wait for its 2046 * completion. 2047 */ 2048 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2049 msleep(1); 2050 2051 if (netif_running(netdev)) { 2052 vmxnet3_quiesce_dev(adapter); 2053 vmxnet3_reset_dev(adapter); 2054 2055 /* we need to re-create the rx queue based on the new mtu */ 2056 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2057 vmxnet3_adjust_rx_ring_size(adapter); 2058 adapter->rx_queue.comp_ring.size = 2059 adapter->rx_queue.rx_ring[0].size + 2060 adapter->rx_queue.rx_ring[1].size; 2061 err = vmxnet3_rq_create(&adapter->rx_queue, adapter); 2062 if (err) { 2063 printk(KERN_ERR "%s: failed to re-create rx queue," 2064 " error %d. Closing it.\n", netdev->name, err); 2065 goto out; 2066 } 2067 2068 err = vmxnet3_activate_dev(adapter); 2069 if (err) { 2070 printk(KERN_ERR "%s: failed to re-activate, error %d. " 2071 "Closing it\n", netdev->name, err); 2072 goto out; 2073 } 2074 } 2075 2076out: 2077 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2078 if (err) 2079 vmxnet3_force_close(adapter); 2080 2081 return err; 2082} 2083 2084 2085static void 2086vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) 2087{ 2088 struct net_device *netdev = adapter->netdev; 2089 2090 netdev->features = NETIF_F_SG | 2091 NETIF_F_HW_CSUM | 2092 NETIF_F_HW_VLAN_TX | 2093 NETIF_F_HW_VLAN_RX | 2094 NETIF_F_HW_VLAN_FILTER | 2095 NETIF_F_TSO | 2096 NETIF_F_TSO6 | 2097 NETIF_F_LRO; 2098 2099 printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro"); 2100 2101 adapter->rxcsum = true; 2102 adapter->jumbo_frame = true; 2103 adapter->lro = true; 2104 2105 if (dma64) { 2106 netdev->features |= NETIF_F_HIGHDMA; 2107 printk(" highDMA"); 2108 } 2109 2110 netdev->vlan_features = netdev->features; 2111 printk("\n"); 2112} 2113 2114 2115static void 2116vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) 2117{ 2118 u32 tmp; 2119 2120 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); 2121 *(u32 *)mac = tmp; 2122 2123 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); 2124 mac[4] = tmp & 0xff; 2125 mac[5] = (tmp >> 8) & 0xff; 2126} 2127 2128 2129static void 2130vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2131{ 2132 u32 cfg; 2133 2134 /* intr settings */ 2135 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2136 VMXNET3_CMD_GET_CONF_INTR); 2137 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2138 adapter->intr.type = cfg & 0x3; 2139 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2140 2141 if (adapter->intr.type == VMXNET3_IT_AUTO) { 2142 int err; 2143 2144#ifdef CONFIG_PCI_MSI 2145 adapter->intr.msix_entries[0].entry = 0; 2146 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2147 VMXNET3_LINUX_MAX_MSIX_VECT); 2148 if (!err) { 2149 adapter->intr.num_intrs = 1; 2150 adapter->intr.type = VMXNET3_IT_MSIX; 2151 return; 2152 } 2153#endif 2154 2155 err = pci_enable_msi(adapter->pdev); 2156 if (!err) { 2157 adapter->intr.num_intrs = 1; 2158 adapter->intr.type = VMXNET3_IT_MSI; 2159 return; 2160 } 2161 } 2162 2163 adapter->intr.type = VMXNET3_IT_INTX; 2164 2165 /* INT-X related setting */ 2166 adapter->intr.num_intrs = 1; 2167} 2168 2169 2170static void 2171vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) 2172{ 2173 if (adapter->intr.type == VMXNET3_IT_MSIX) 2174 pci_disable_msix(adapter->pdev); 2175 else if (adapter->intr.type == VMXNET3_IT_MSI) 2176 pci_disable_msi(adapter->pdev); 2177 else 2178 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); 2179} 2180 2181 2182static void 2183vmxnet3_tx_timeout(struct net_device *netdev) 2184{ 2185 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2186 adapter->tx_timeout_count++; 2187 2188 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); 2189 schedule_work(&adapter->work); 2190} 2191 2192 2193static void 2194vmxnet3_reset_work(struct work_struct *data) 2195{ 2196 struct vmxnet3_adapter *adapter; 2197 2198 adapter = container_of(data, struct vmxnet3_adapter, work); 2199 2200 /* if another thread is resetting the device, no need to proceed */ 2201 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) 2202 return; 2203 2204 /* if the device is closed, we must leave it alone */ 2205 if (netif_running(adapter->netdev)) { 2206 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); 2207 vmxnet3_quiesce_dev(adapter); 2208 vmxnet3_reset_dev(adapter); 2209 vmxnet3_activate_dev(adapter); 2210 } else { 2211 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); 2212 } 2213 2214 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2215} 2216 2217 2218static int __devinit 2219vmxnet3_probe_device(struct pci_dev *pdev, 2220 const struct pci_device_id *id) 2221{ 2222 static const struct net_device_ops vmxnet3_netdev_ops = { 2223 .ndo_open = vmxnet3_open, 2224 .ndo_stop = vmxnet3_close, 2225 .ndo_start_xmit = vmxnet3_xmit_frame, 2226 .ndo_set_mac_address = vmxnet3_set_mac_addr, 2227 .ndo_change_mtu = vmxnet3_change_mtu, 2228 .ndo_get_stats = vmxnet3_get_stats, 2229 .ndo_tx_timeout = vmxnet3_tx_timeout, 2230 .ndo_set_multicast_list = vmxnet3_set_mc, 2231 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register, 2232 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid, 2233 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid, 2234#ifdef CONFIG_NET_POLL_CONTROLLER 2235 .ndo_poll_controller = vmxnet3_netpoll, 2236#endif 2237 }; 2238 int err; 2239 bool dma64 = false; /* stupid gcc */ 2240 u32 ver; 2241 struct net_device *netdev; 2242 struct vmxnet3_adapter *adapter; 2243 u8 mac[ETH_ALEN]; 2244 2245 netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter)); 2246 if (!netdev) { 2247 printk(KERN_ERR "Failed to alloc ethernet device for adapter " 2248 "%s\n", pci_name(pdev)); 2249 return -ENOMEM; 2250 } 2251 2252 pci_set_drvdata(pdev, netdev); 2253 adapter = netdev_priv(netdev); 2254 adapter->netdev = netdev; 2255 adapter->pdev = pdev; 2256 2257 adapter->shared = pci_alloc_consistent(adapter->pdev, 2258 sizeof(struct Vmxnet3_DriverShared), 2259 &adapter->shared_pa); 2260 if (!adapter->shared) { 2261 printk(KERN_ERR "Failed to allocate memory for %s\n", 2262 pci_name(pdev)); 2263 err = -ENOMEM; 2264 goto err_alloc_shared; 2265 } 2266 2267 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, 2268 sizeof(struct Vmxnet3_TxQueueDesc) + 2269 sizeof(struct Vmxnet3_RxQueueDesc), 2270 &adapter->queue_desc_pa); 2271 2272 if (!adapter->tqd_start) { 2273 printk(KERN_ERR "Failed to allocate memory for %s\n", 2274 pci_name(pdev)); 2275 err = -ENOMEM; 2276 goto err_alloc_queue_desc; 2277 } 2278 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start 2279 + 1); 2280 2281 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 2282 if (adapter->pm_conf == NULL) { 2283 printk(KERN_ERR "Failed to allocate memory for %s\n", 2284 pci_name(pdev)); 2285 err = -ENOMEM; 2286 goto err_alloc_pm; 2287 } 2288 2289 err = vmxnet3_alloc_pci_resources(adapter, &dma64); 2290 if (err < 0) 2291 goto err_alloc_pci; 2292 2293 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); 2294 if (ver & 1) { 2295 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); 2296 } else { 2297 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" 2298 " %s\n", ver, pci_name(pdev)); 2299 err = -EBUSY; 2300 goto err_ver; 2301 } 2302 2303 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); 2304 if (ver & 1) { 2305 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); 2306 } else { 2307 printk(KERN_ERR "Incompatible upt version (0x%x) for " 2308 "adapter %s\n", ver, pci_name(pdev)); 2309 err = -EBUSY; 2310 goto err_ver; 2311 } 2312 2313 vmxnet3_declare_features(adapter, dma64); 2314 2315 adapter->dev_number = atomic_read(&devices_found); 2316 vmxnet3_alloc_intr_resources(adapter); 2317 2318 vmxnet3_read_mac_addr(adapter, mac); 2319 memcpy(netdev->dev_addr, mac, netdev->addr_len); 2320 2321 netdev->netdev_ops = &vmxnet3_netdev_ops; 2322 netdev->watchdog_timeo = 5 * HZ; 2323 vmxnet3_set_ethtool_ops(netdev); 2324 2325 INIT_WORK(&adapter->work, vmxnet3_reset_work); 2326 2327 netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64); 2328 SET_NETDEV_DEV(netdev, &pdev->dev); 2329 err = register_netdev(netdev); 2330 2331 if (err) { 2332 printk(KERN_ERR "Failed to register adapter %s\n", 2333 pci_name(pdev)); 2334 goto err_register; 2335 } 2336 2337 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2338 atomic_inc(&devices_found); 2339 return 0; 2340 2341err_register: 2342 vmxnet3_free_intr_resources(adapter); 2343err_ver: 2344 vmxnet3_free_pci_resources(adapter); 2345err_alloc_pci: 2346 kfree(adapter->pm_conf); 2347err_alloc_pm: 2348 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + 2349 sizeof(struct Vmxnet3_RxQueueDesc), 2350 adapter->tqd_start, adapter->queue_desc_pa); 2351err_alloc_queue_desc: 2352 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 2353 adapter->shared, adapter->shared_pa); 2354err_alloc_shared: 2355 pci_set_drvdata(pdev, NULL); 2356 free_netdev(netdev); 2357 return err; 2358} 2359 2360 2361static void __devexit 2362vmxnet3_remove_device(struct pci_dev *pdev) 2363{ 2364 struct net_device *netdev = pci_get_drvdata(pdev); 2365 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2366 2367 flush_scheduled_work(); 2368 2369 unregister_netdev(netdev); 2370 2371 vmxnet3_free_intr_resources(adapter); 2372 vmxnet3_free_pci_resources(adapter); 2373 kfree(adapter->pm_conf); 2374 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + 2375 sizeof(struct Vmxnet3_RxQueueDesc), 2376 adapter->tqd_start, adapter->queue_desc_pa); 2377 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 2378 adapter->shared, adapter->shared_pa); 2379 free_netdev(netdev); 2380} 2381 2382 2383#ifdef CONFIG_PM 2384 2385static int 2386vmxnet3_suspend(struct device *device) 2387{ 2388 struct pci_dev *pdev = to_pci_dev(device); 2389 struct net_device *netdev = pci_get_drvdata(pdev); 2390 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2391 struct Vmxnet3_PMConf *pmConf; 2392 struct ethhdr *ehdr; 2393 struct arphdr *ahdr; 2394 u8 *arpreq; 2395 struct in_device *in_dev; 2396 struct in_ifaddr *ifa; 2397 int i = 0; 2398 2399 if (!netif_running(netdev)) 2400 return 0; 2401 2402 vmxnet3_disable_all_intrs(adapter); 2403 vmxnet3_free_irqs(adapter); 2404 vmxnet3_free_intr_resources(adapter); 2405 2406 netif_device_detach(netdev); 2407 netif_stop_queue(netdev); 2408 2409 /* Create wake-up filters. */ 2410 pmConf = adapter->pm_conf; 2411 memset(pmConf, 0, sizeof(*pmConf)); 2412 2413 if (adapter->wol & WAKE_UCAST) { 2414 pmConf->filters[i].patternSize = ETH_ALEN; 2415 pmConf->filters[i].maskSize = 1; 2416 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 2417 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 2418 2419 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2420 i++; 2421 } 2422 2423 if (adapter->wol & WAKE_ARP) { 2424 in_dev = in_dev_get(netdev); 2425 if (!in_dev) 2426 goto skip_arp; 2427 2428 ifa = (struct in_ifaddr *)in_dev->ifa_list; 2429 if (!ifa) 2430 goto skip_arp; 2431 2432 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ 2433 sizeof(struct arphdr) + /* ARP header */ 2434 2 * ETH_ALEN + /* 2 Ethernet addresses*/ 2435 2 * sizeof(u32); /*2 IPv4 addresses */ 2436 pmConf->filters[i].maskSize = 2437 (pmConf->filters[i].patternSize - 1) / 8 + 1; 2438 2439 /* ETH_P_ARP in Ethernet header. */ 2440 ehdr = (struct ethhdr *)pmConf->filters[i].pattern; 2441 ehdr->h_proto = htons(ETH_P_ARP); 2442 2443 /* ARPOP_REQUEST in ARP header. */ 2444 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; 2445 ahdr->ar_op = htons(ARPOP_REQUEST); 2446 arpreq = (u8 *)(ahdr + 1); 2447 2448 /* The Unicast IPv4 address in 'tip' field. */ 2449 arpreq += 2 * ETH_ALEN + sizeof(u32); 2450 *(u32 *)arpreq = ifa->ifa_address; 2451 2452 /* The mask for the relevant bits. */ 2453 pmConf->filters[i].mask[0] = 0x00; 2454 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ 2455 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ 2456 pmConf->filters[i].mask[3] = 0x00; 2457 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ 2458 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 2459 in_dev_put(in_dev); 2460 2461 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2462 i++; 2463 } 2464 2465skip_arp: 2466 if (adapter->wol & WAKE_MAGIC) 2467 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 2468 2469 pmConf->numFilters = i; 2470 2471 adapter->shared->devRead.pmConfDesc.confVer = 1; 2472 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2473 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2474 2475 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2476 VMXNET3_CMD_UPDATE_PMCFG); 2477 2478 pci_save_state(pdev); 2479 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 2480 adapter->wol); 2481 pci_disable_device(pdev); 2482 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND)); 2483 2484 return 0; 2485} 2486 2487 2488static int 2489vmxnet3_resume(struct device *device) 2490{ 2491 int err; 2492 struct pci_dev *pdev = to_pci_dev(device); 2493 struct net_device *netdev = pci_get_drvdata(pdev); 2494 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 2495 struct Vmxnet3_PMConf *pmConf; 2496 2497 if (!netif_running(netdev)) 2498 return 0; 2499 2500 /* Destroy wake-up filters. */ 2501 pmConf = adapter->pm_conf; 2502 memset(pmConf, 0, sizeof(*pmConf)); 2503 2504 adapter->shared->devRead.pmConfDesc.confVer = 1; 2505 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2506 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2507 2508 netif_device_attach(netdev); 2509 pci_set_power_state(pdev, PCI_D0); 2510 pci_restore_state(pdev); 2511 err = pci_enable_device_mem(pdev); 2512 if (err != 0) 2513 return err; 2514 2515 pci_enable_wake(pdev, PCI_D0, 0); 2516 2517 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2518 VMXNET3_CMD_UPDATE_PMCFG); 2519 vmxnet3_alloc_intr_resources(adapter); 2520 vmxnet3_request_irqs(adapter); 2521 vmxnet3_enable_all_intrs(adapter); 2522 2523 return 0; 2524} 2525 2526static struct dev_pm_ops vmxnet3_pm_ops = { 2527 .suspend = vmxnet3_suspend, 2528 .resume = vmxnet3_resume, 2529}; 2530#endif 2531 2532static struct pci_driver vmxnet3_driver = { 2533 .name = vmxnet3_driver_name, 2534 .id_table = vmxnet3_pciid_table, 2535 .probe = vmxnet3_probe_device, 2536 .remove = __devexit_p(vmxnet3_remove_device), 2537#ifdef CONFIG_PM 2538 .driver.pm = &vmxnet3_pm_ops, 2539#endif 2540}; 2541 2542 2543static int __init 2544vmxnet3_init_module(void) 2545{ 2546 printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, 2547 VMXNET3_DRIVER_VERSION_REPORT); 2548 return pci_register_driver(&vmxnet3_driver); 2549} 2550 2551module_init(vmxnet3_init_module); 2552 2553 2554static void 2555vmxnet3_exit_module(void) 2556{ 2557 pci_unregister_driver(&vmxnet3_driver); 2558} 2559 2560module_exit(vmxnet3_exit_module); 2561 2562MODULE_AUTHOR("VMware, Inc."); 2563MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC); 2564MODULE_LICENSE("GPL v2"); 2565MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING); 2566