1/******************************************************************************* 2 3 Intel PRO/1000 Linux driver 4 Copyright(c) 1999 - 2006 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 Linux NICS <linux.nics@intel.com> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 27*******************************************************************************/ 28 29#include "e1000.h" 30#include <net/ip6_checksum.h> 31#include <linux/io.h> 32#include <linux/prefetch.h> 33#include <linux/bitops.h> 34#include <linux/if_vlan.h> 35 36char e1000_driver_name[] = "e1000"; 37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 38#define DRV_VERSION "7.3.21-k8-NAPI" 39const char e1000_driver_version[] = DRV_VERSION; 40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41 42/* e1000_pci_tbl - PCI Device ID Table 43 * 44 * Last entry must be all 0s 45 * 46 * Macro expands to... 47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 48 */ 49static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { 50 INTEL_E1000_ETHERNET_DEVICE(0x1000), 51 INTEL_E1000_ETHERNET_DEVICE(0x1001), 52 INTEL_E1000_ETHERNET_DEVICE(0x1004), 53 INTEL_E1000_ETHERNET_DEVICE(0x1008), 54 INTEL_E1000_ETHERNET_DEVICE(0x1009), 55 INTEL_E1000_ETHERNET_DEVICE(0x100C), 56 INTEL_E1000_ETHERNET_DEVICE(0x100D), 57 INTEL_E1000_ETHERNET_DEVICE(0x100E), 58 INTEL_E1000_ETHERNET_DEVICE(0x100F), 59 INTEL_E1000_ETHERNET_DEVICE(0x1010), 60 INTEL_E1000_ETHERNET_DEVICE(0x1011), 61 INTEL_E1000_ETHERNET_DEVICE(0x1012), 62 INTEL_E1000_ETHERNET_DEVICE(0x1013), 63 INTEL_E1000_ETHERNET_DEVICE(0x1014), 64 INTEL_E1000_ETHERNET_DEVICE(0x1015), 65 INTEL_E1000_ETHERNET_DEVICE(0x1016), 66 INTEL_E1000_ETHERNET_DEVICE(0x1017), 67 INTEL_E1000_ETHERNET_DEVICE(0x1018), 68 INTEL_E1000_ETHERNET_DEVICE(0x1019), 69 INTEL_E1000_ETHERNET_DEVICE(0x101A), 70 INTEL_E1000_ETHERNET_DEVICE(0x101D), 71 INTEL_E1000_ETHERNET_DEVICE(0x101E), 72 INTEL_E1000_ETHERNET_DEVICE(0x1026), 73 INTEL_E1000_ETHERNET_DEVICE(0x1027), 74 INTEL_E1000_ETHERNET_DEVICE(0x1028), 75 INTEL_E1000_ETHERNET_DEVICE(0x1075), 76 INTEL_E1000_ETHERNET_DEVICE(0x1076), 77 INTEL_E1000_ETHERNET_DEVICE(0x1077), 78 INTEL_E1000_ETHERNET_DEVICE(0x1078), 79 INTEL_E1000_ETHERNET_DEVICE(0x1079), 80 INTEL_E1000_ETHERNET_DEVICE(0x107A), 81 INTEL_E1000_ETHERNET_DEVICE(0x107B), 82 INTEL_E1000_ETHERNET_DEVICE(0x107C), 83 INTEL_E1000_ETHERNET_DEVICE(0x108A), 84 INTEL_E1000_ETHERNET_DEVICE(0x1099), 85 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E), 87 /* required last entry */ 88 {0,} 89}; 90 91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 92 93int e1000_up(struct e1000_adapter *adapter); 94void e1000_down(struct e1000_adapter *adapter); 95void e1000_reinit_locked(struct e1000_adapter *adapter); 96void e1000_reset(struct e1000_adapter *adapter); 97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 99void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 100void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 101static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 102 struct e1000_tx_ring *txdr); 103static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 104 struct e1000_rx_ring *rxdr); 105static void e1000_free_tx_resources(struct e1000_adapter *adapter, 106 struct e1000_tx_ring *tx_ring); 107static void e1000_free_rx_resources(struct e1000_adapter *adapter, 108 struct e1000_rx_ring *rx_ring); 109void e1000_update_stats(struct e1000_adapter *adapter); 110 111static int e1000_init_module(void); 112static void e1000_exit_module(void); 113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 114static void __devexit e1000_remove(struct pci_dev *pdev); 115static int e1000_alloc_queues(struct e1000_adapter *adapter); 116static int e1000_sw_init(struct e1000_adapter *adapter); 117static int e1000_open(struct net_device *netdev); 118static int e1000_close(struct net_device *netdev); 119static void e1000_configure_tx(struct e1000_adapter *adapter); 120static void e1000_configure_rx(struct e1000_adapter *adapter); 121static void e1000_setup_rctl(struct e1000_adapter *adapter); 122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); 123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); 124static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 125 struct e1000_tx_ring *tx_ring); 126static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 127 struct e1000_rx_ring *rx_ring); 128static void e1000_set_rx_mode(struct net_device *netdev); 129static void e1000_update_phy_info_task(struct work_struct *work); 130static void e1000_watchdog(struct work_struct *work); 131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); 132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 133 struct net_device *netdev); 134static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 135static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 136static int e1000_set_mac(struct net_device *netdev, void *p); 137static irqreturn_t e1000_intr(int irq, void *data); 138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 139 struct e1000_tx_ring *tx_ring); 140static int e1000_clean(struct napi_struct *napi, int budget); 141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 142 struct e1000_rx_ring *rx_ring, 143 int *work_done, int work_to_do); 144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 145 struct e1000_rx_ring *rx_ring, 146 int *work_done, int work_to_do); 147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 148 struct e1000_rx_ring *rx_ring, 149 int cleaned_count); 150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 151 struct e1000_rx_ring *rx_ring, 152 int cleaned_count); 153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 155 int cmd); 156static void e1000_enter_82542_rst(struct e1000_adapter *adapter); 157static void e1000_leave_82542_rst(struct e1000_adapter *adapter); 158static void e1000_tx_timeout(struct net_device *dev); 159static void e1000_reset_task(struct work_struct *work); 160static void e1000_smartspeed(struct e1000_adapter *adapter); 161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 162 struct sk_buff *skb); 163 164static bool e1000_vlan_used(struct e1000_adapter *adapter); 165static void e1000_vlan_mode(struct net_device *netdev, 166 netdev_features_t features); 167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 168 bool filter_on); 169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 171static void e1000_restore_vlan(struct e1000_adapter *adapter); 172 173#ifdef CONFIG_PM 174static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 175static int e1000_resume(struct pci_dev *pdev); 176#endif 177static void e1000_shutdown(struct pci_dev *pdev); 178 179#ifdef CONFIG_NET_POLL_CONTROLLER 180/* for netdump / net console */ 181static void e1000_netpoll (struct net_device *netdev); 182#endif 183 184#define COPYBREAK_DEFAULT 256 185static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; 186module_param(copybreak, uint, 0644); 187MODULE_PARM_DESC(copybreak, 188 "Maximum size of packet that is copied to a new buffer on receive"); 189 190static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 191 pci_channel_state_t state); 192static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); 193static void e1000_io_resume(struct pci_dev *pdev); 194 195static struct pci_error_handlers e1000_err_handler = { 196 .error_detected = e1000_io_error_detected, 197 .slot_reset = e1000_io_slot_reset, 198 .resume = e1000_io_resume, 199}; 200 201static struct pci_driver e1000_driver = { 202 .name = e1000_driver_name, 203 .id_table = e1000_pci_tbl, 204 .probe = e1000_probe, 205 .remove = __devexit_p(e1000_remove), 206#ifdef CONFIG_PM 207 /* Power Management Hooks */ 208 .suspend = e1000_suspend, 209 .resume = e1000_resume, 210#endif 211 .shutdown = e1000_shutdown, 212 .err_handler = &e1000_err_handler 213}; 214 215MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 216MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); 217MODULE_LICENSE("GPL"); 218MODULE_VERSION(DRV_VERSION); 219 220#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) 221static int debug = -1; 222module_param(debug, int, 0); 223MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 224 225/** 226 * e1000_get_hw_dev - return device 227 * used by hardware layer to print debugging information 228 * 229 **/ 230struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) 231{ 232 struct e1000_adapter *adapter = hw->back; 233 return adapter->netdev; 234} 235 236/** 237 * e1000_init_module - Driver Registration Routine 238 * 239 * e1000_init_module is the first routine called when the driver is 240 * loaded. All it does is register with the PCI subsystem. 241 **/ 242 243static int __init e1000_init_module(void) 244{ 245 int ret; 246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); 247 248 pr_info("%s\n", e1000_copyright); 249 250 ret = pci_register_driver(&e1000_driver); 251 if (copybreak != COPYBREAK_DEFAULT) { 252 if (copybreak == 0) 253 pr_info("copybreak disabled\n"); 254 else 255 pr_info("copybreak enabled for " 256 "packets <= %u bytes\n", copybreak); 257 } 258 return ret; 259} 260 261module_init(e1000_init_module); 262 263/** 264 * e1000_exit_module - Driver Exit Cleanup Routine 265 * 266 * e1000_exit_module is called just before the driver is removed 267 * from memory. 268 **/ 269 270static void __exit e1000_exit_module(void) 271{ 272 pci_unregister_driver(&e1000_driver); 273} 274 275module_exit(e1000_exit_module); 276 277static int e1000_request_irq(struct e1000_adapter *adapter) 278{ 279 struct net_device *netdev = adapter->netdev; 280 irq_handler_t handler = e1000_intr; 281 int irq_flags = IRQF_SHARED; 282 int err; 283 284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 285 netdev); 286 if (err) { 287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err); 288 } 289 290 return err; 291} 292 293static void e1000_free_irq(struct e1000_adapter *adapter) 294{ 295 struct net_device *netdev = adapter->netdev; 296 297 free_irq(adapter->pdev->irq, netdev); 298} 299 300/** 301 * e1000_irq_disable - Mask off interrupt generation on the NIC 302 * @adapter: board private structure 303 **/ 304 305static void e1000_irq_disable(struct e1000_adapter *adapter) 306{ 307 struct e1000_hw *hw = &adapter->hw; 308 309 ew32(IMC, ~0); 310 E1000_WRITE_FLUSH(); 311 synchronize_irq(adapter->pdev->irq); 312} 313 314/** 315 * e1000_irq_enable - Enable default interrupt generation settings 316 * @adapter: board private structure 317 **/ 318 319static void e1000_irq_enable(struct e1000_adapter *adapter) 320{ 321 struct e1000_hw *hw = &adapter->hw; 322 323 ew32(IMS, IMS_ENABLE_MASK); 324 E1000_WRITE_FLUSH(); 325} 326 327static void e1000_update_mng_vlan(struct e1000_adapter *adapter) 328{ 329 struct e1000_hw *hw = &adapter->hw; 330 struct net_device *netdev = adapter->netdev; 331 u16 vid = hw->mng_cookie.vlan_id; 332 u16 old_vid = adapter->mng_vlan_id; 333 334 if (!e1000_vlan_used(adapter)) 335 return; 336 337 if (!test_bit(vid, adapter->active_vlans)) { 338 if (hw->mng_cookie.status & 339 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 340 e1000_vlan_rx_add_vid(netdev, vid); 341 adapter->mng_vlan_id = vid; 342 } else { 343 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 344 } 345 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && 346 (vid != old_vid) && 347 !test_bit(old_vid, adapter->active_vlans)) 348 e1000_vlan_rx_kill_vid(netdev, old_vid); 349 } else { 350 adapter->mng_vlan_id = vid; 351 } 352} 353 354static void e1000_init_manageability(struct e1000_adapter *adapter) 355{ 356 struct e1000_hw *hw = &adapter->hw; 357 358 if (adapter->en_mng_pt) { 359 u32 manc = er32(MANC); 360 361 /* disable hardware interception of ARP */ 362 manc &= ~(E1000_MANC_ARP_EN); 363 364 ew32(MANC, manc); 365 } 366} 367 368static void e1000_release_manageability(struct e1000_adapter *adapter) 369{ 370 struct e1000_hw *hw = &adapter->hw; 371 372 if (adapter->en_mng_pt) { 373 u32 manc = er32(MANC); 374 375 /* re-enable hardware interception of ARP */ 376 manc |= E1000_MANC_ARP_EN; 377 378 ew32(MANC, manc); 379 } 380} 381 382/** 383 * e1000_configure - configure the hardware for RX and TX 384 * @adapter = private board structure 385 **/ 386static void e1000_configure(struct e1000_adapter *adapter) 387{ 388 struct net_device *netdev = adapter->netdev; 389 int i; 390 391 e1000_set_rx_mode(netdev); 392 393 e1000_restore_vlan(adapter); 394 e1000_init_manageability(adapter); 395 396 e1000_configure_tx(adapter); 397 e1000_setup_rctl(adapter); 398 e1000_configure_rx(adapter); 399 /* call E1000_DESC_UNUSED which always leaves 400 * at least 1 descriptor unused to make sure 401 * next_to_use != next_to_clean */ 402 for (i = 0; i < adapter->num_rx_queues; i++) { 403 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 404 adapter->alloc_rx_buf(adapter, ring, 405 E1000_DESC_UNUSED(ring)); 406 } 407} 408 409int e1000_up(struct e1000_adapter *adapter) 410{ 411 struct e1000_hw *hw = &adapter->hw; 412 413 /* hardware has been reset, we need to reload some things */ 414 e1000_configure(adapter); 415 416 clear_bit(__E1000_DOWN, &adapter->flags); 417 418 napi_enable(&adapter->napi); 419 420 e1000_irq_enable(adapter); 421 422 netif_wake_queue(adapter->netdev); 423 424 /* fire a link change interrupt to start the watchdog */ 425 ew32(ICS, E1000_ICS_LSC); 426 return 0; 427} 428 429/** 430 * e1000_power_up_phy - restore link in case the phy was powered down 431 * @adapter: address of board private structure 432 * 433 * The phy may be powered down to save power and turn off link when the 434 * driver is unloaded and wake on lan is not enabled (among others) 435 * *** this routine MUST be followed by a call to e1000_reset *** 436 * 437 **/ 438 439void e1000_power_up_phy(struct e1000_adapter *adapter) 440{ 441 struct e1000_hw *hw = &adapter->hw; 442 u16 mii_reg = 0; 443 444 /* Just clear the power down bit to wake the phy back up */ 445 if (hw->media_type == e1000_media_type_copper) { 446 /* according to the manual, the phy will retain its 447 * settings across a power-down/up cycle */ 448 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 449 mii_reg &= ~MII_CR_POWER_DOWN; 450 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 451 } 452} 453 454static void e1000_power_down_phy(struct e1000_adapter *adapter) 455{ 456 struct e1000_hw *hw = &adapter->hw; 457 458 /* Power down the PHY so no link is implied when interface is down * 459 * The PHY cannot be powered down if any of the following is true * 460 * (a) WoL is enabled 461 * (b) AMT is active 462 * (c) SoL/IDER session is active */ 463 if (!adapter->wol && hw->mac_type >= e1000_82540 && 464 hw->media_type == e1000_media_type_copper) { 465 u16 mii_reg = 0; 466 467 switch (hw->mac_type) { 468 case e1000_82540: 469 case e1000_82545: 470 case e1000_82545_rev_3: 471 case e1000_82546: 472 case e1000_ce4100: 473 case e1000_82546_rev_3: 474 case e1000_82541: 475 case e1000_82541_rev_2: 476 case e1000_82547: 477 case e1000_82547_rev_2: 478 if (er32(MANC) & E1000_MANC_SMBUS_EN) 479 goto out; 480 break; 481 default: 482 goto out; 483 } 484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); 485 mii_reg |= MII_CR_POWER_DOWN; 486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); 487 msleep(1); 488 } 489out: 490 return; 491} 492 493static void e1000_down_and_stop(struct e1000_adapter *adapter) 494{ 495 set_bit(__E1000_DOWN, &adapter->flags); 496 497 /* Only kill reset task if adapter is not resetting */ 498 if (!test_bit(__E1000_RESETTING, &adapter->flags)) 499 cancel_work_sync(&adapter->reset_task); 500 501 cancel_delayed_work_sync(&adapter->watchdog_task); 502 cancel_delayed_work_sync(&adapter->phy_info_task); 503 cancel_delayed_work_sync(&adapter->fifo_stall_task); 504} 505 506void e1000_down(struct e1000_adapter *adapter) 507{ 508 struct e1000_hw *hw = &adapter->hw; 509 struct net_device *netdev = adapter->netdev; 510 u32 rctl, tctl; 511 512 513 /* disable receives in the hardware */ 514 rctl = er32(RCTL); 515 ew32(RCTL, rctl & ~E1000_RCTL_EN); 516 /* flush and sleep below */ 517 518 netif_tx_disable(netdev); 519 520 /* disable transmits in the hardware */ 521 tctl = er32(TCTL); 522 tctl &= ~E1000_TCTL_EN; 523 ew32(TCTL, tctl); 524 /* flush both disables and wait for them to finish */ 525 E1000_WRITE_FLUSH(); 526 msleep(10); 527 528 napi_disable(&adapter->napi); 529 530 e1000_irq_disable(adapter); 531 532 /* 533 * Setting DOWN must be after irq_disable to prevent 534 * a screaming interrupt. Setting DOWN also prevents 535 * tasks from rescheduling. 536 */ 537 e1000_down_and_stop(adapter); 538 539 adapter->link_speed = 0; 540 adapter->link_duplex = 0; 541 netif_carrier_off(netdev); 542 543 e1000_reset(adapter); 544 e1000_clean_all_tx_rings(adapter); 545 e1000_clean_all_rx_rings(adapter); 546} 547 548static void e1000_reinit_safe(struct e1000_adapter *adapter) 549{ 550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 551 msleep(1); 552 mutex_lock(&adapter->mutex); 553 e1000_down(adapter); 554 e1000_up(adapter); 555 mutex_unlock(&adapter->mutex); 556 clear_bit(__E1000_RESETTING, &adapter->flags); 557} 558 559void e1000_reinit_locked(struct e1000_adapter *adapter) 560{ 561 /* if rtnl_lock is not held the call path is bogus */ 562 ASSERT_RTNL(); 563 WARN_ON(in_interrupt()); 564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 565 msleep(1); 566 e1000_down(adapter); 567 e1000_up(adapter); 568 clear_bit(__E1000_RESETTING, &adapter->flags); 569} 570 571void e1000_reset(struct e1000_adapter *adapter) 572{ 573 struct e1000_hw *hw = &adapter->hw; 574 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 575 bool legacy_pba_adjust = false; 576 u16 hwm; 577 578 /* Repartition Pba for greater than 9k mtu 579 * To take effect CTRL.RST is required. 580 */ 581 582 switch (hw->mac_type) { 583 case e1000_82542_rev2_0: 584 case e1000_82542_rev2_1: 585 case e1000_82543: 586 case e1000_82544: 587 case e1000_82540: 588 case e1000_82541: 589 case e1000_82541_rev_2: 590 legacy_pba_adjust = true; 591 pba = E1000_PBA_48K; 592 break; 593 case e1000_82545: 594 case e1000_82545_rev_3: 595 case e1000_82546: 596 case e1000_ce4100: 597 case e1000_82546_rev_3: 598 pba = E1000_PBA_48K; 599 break; 600 case e1000_82547: 601 case e1000_82547_rev_2: 602 legacy_pba_adjust = true; 603 pba = E1000_PBA_30K; 604 break; 605 case e1000_undefined: 606 case e1000_num_macs: 607 break; 608 } 609 610 if (legacy_pba_adjust) { 611 if (hw->max_frame_size > E1000_RXBUFFER_8192) 612 pba -= 8; /* allocate more FIFO for Tx */ 613 614 if (hw->mac_type == e1000_82547) { 615 adapter->tx_fifo_head = 0; 616 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 617 adapter->tx_fifo_size = 618 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 619 atomic_set(&adapter->tx_fifo_stall, 0); 620 } 621 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 622 /* adjust PBA for jumbo frames */ 623 ew32(PBA, pba); 624 625 /* To maintain wire speed transmits, the Tx FIFO should be 626 * large enough to accommodate two full transmit packets, 627 * rounded up to the next 1KB and expressed in KB. Likewise, 628 * the Rx FIFO should be large enough to accommodate at least 629 * one full receive packet and is similarly rounded up and 630 * expressed in KB. */ 631 pba = er32(PBA); 632 /* upper 16 bits has Tx packet buffer allocation size in KB */ 633 tx_space = pba >> 16; 634 /* lower 16 bits has Rx packet buffer allocation size in KB */ 635 pba &= 0xffff; 636 /* 637 * the tx fifo also stores 16 bytes of information about the tx 638 * but don't include ethernet FCS because hardware appends it 639 */ 640 min_tx_space = (hw->max_frame_size + 641 sizeof(struct e1000_tx_desc) - 642 ETH_FCS_LEN) * 2; 643 min_tx_space = ALIGN(min_tx_space, 1024); 644 min_tx_space >>= 10; 645 /* software strips receive CRC, so leave room for it */ 646 min_rx_space = hw->max_frame_size; 647 min_rx_space = ALIGN(min_rx_space, 1024); 648 min_rx_space >>= 10; 649 650 /* If current Tx allocation is less than the min Tx FIFO size, 651 * and the min Tx FIFO size is less than the current Rx FIFO 652 * allocation, take space away from current Rx allocation */ 653 if (tx_space < min_tx_space && 654 ((min_tx_space - tx_space) < pba)) { 655 pba = pba - (min_tx_space - tx_space); 656 657 /* PCI/PCIx hardware has PBA alignment constraints */ 658 switch (hw->mac_type) { 659 case e1000_82545 ... e1000_82546_rev_3: 660 pba &= ~(E1000_PBA_8K - 1); 661 break; 662 default: 663 break; 664 } 665 666 /* if short on rx space, rx wins and must trump tx 667 * adjustment or use Early Receive if available */ 668 if (pba < min_rx_space) 669 pba = min_rx_space; 670 } 671 } 672 673 ew32(PBA, pba); 674 675 /* 676 * flow control settings: 677 * The high water mark must be low enough to fit one full frame 678 * (or the size used for early receive) above it in the Rx FIFO. 679 * Set it to the lower of: 680 * - 90% of the Rx FIFO size, and 681 * - the full Rx FIFO size minus the early receive size (for parts 682 * with ERT support assuming ERT set to E1000_ERT_2048), or 683 * - the full Rx FIFO size minus one full frame 684 */ 685 hwm = min(((pba << 10) * 9 / 10), 686 ((pba << 10) - hw->max_frame_size)); 687 688 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 689 hw->fc_low_water = hw->fc_high_water - 8; 690 hw->fc_pause_time = E1000_FC_PAUSE_TIME; 691 hw->fc_send_xon = 1; 692 hw->fc = hw->original_fc; 693 694 /* Allow time for pending master requests to run */ 695 e1000_reset_hw(hw); 696 if (hw->mac_type >= e1000_82544) 697 ew32(WUC, 0); 698 699 if (e1000_init_hw(hw)) 700 e_dev_err("Hardware Error\n"); 701 e1000_update_mng_vlan(adapter); 702 703 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 704 if (hw->mac_type >= e1000_82544 && 705 hw->autoneg == 1 && 706 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 707 u32 ctrl = er32(CTRL); 708 /* clear phy power management bit if we are in gig only mode, 709 * which if enabled will attempt negotiation to 100Mb, which 710 * can cause a loss of link at power off or driver unload */ 711 ctrl &= ~E1000_CTRL_SWDPIN3; 712 ew32(CTRL, ctrl); 713 } 714 715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE); 717 718 e1000_reset_adaptive(hw); 719 e1000_phy_get_info(hw, &adapter->phy_info); 720 721 e1000_release_manageability(adapter); 722} 723 724/** 725 * Dump the eeprom for users having checksum issues 726 **/ 727static void e1000_dump_eeprom(struct e1000_adapter *adapter) 728{ 729 struct net_device *netdev = adapter->netdev; 730 struct ethtool_eeprom eeprom; 731 const struct ethtool_ops *ops = netdev->ethtool_ops; 732 u8 *data; 733 int i; 734 u16 csum_old, csum_new = 0; 735 736 eeprom.len = ops->get_eeprom_len(netdev); 737 eeprom.offset = 0; 738 739 data = kmalloc(eeprom.len, GFP_KERNEL); 740 if (!data) 741 return; 742 743 ops->get_eeprom(netdev, &eeprom, data); 744 745 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + 746 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); 747 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) 748 csum_new += data[i] + (data[i + 1] << 8); 749 csum_new = EEPROM_SUM - csum_new; 750 751 pr_err("/*********************/\n"); 752 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); 753 pr_err("Calculated : 0x%04x\n", csum_new); 754 755 pr_err("Offset Values\n"); 756 pr_err("======== ======\n"); 757 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 758 759 pr_err("Include this output when contacting your support provider.\n"); 760 pr_err("This is not a software error! Something bad happened to\n"); 761 pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); 762 pr_err("result in further problems, possibly loss of data,\n"); 763 pr_err("corruption or system hangs!\n"); 764 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); 765 pr_err("which is invalid and requires you to set the proper MAC\n"); 766 pr_err("address manually before continuing to enable this network\n"); 767 pr_err("device. Please inspect the EEPROM dump and report the\n"); 768 pr_err("issue to your hardware vendor or Intel Customer Support.\n"); 769 pr_err("/*********************/\n"); 770 771 kfree(data); 772} 773 774/** 775 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not 776 * @pdev: PCI device information struct 777 * 778 * Return true if an adapter needs ioport resources 779 **/ 780static int e1000_is_need_ioport(struct pci_dev *pdev) 781{ 782 switch (pdev->device) { 783 case E1000_DEV_ID_82540EM: 784 case E1000_DEV_ID_82540EM_LOM: 785 case E1000_DEV_ID_82540EP: 786 case E1000_DEV_ID_82540EP_LOM: 787 case E1000_DEV_ID_82540EP_LP: 788 case E1000_DEV_ID_82541EI: 789 case E1000_DEV_ID_82541EI_MOBILE: 790 case E1000_DEV_ID_82541ER: 791 case E1000_DEV_ID_82541ER_LOM: 792 case E1000_DEV_ID_82541GI: 793 case E1000_DEV_ID_82541GI_LF: 794 case E1000_DEV_ID_82541GI_MOBILE: 795 case E1000_DEV_ID_82544EI_COPPER: 796 case E1000_DEV_ID_82544EI_FIBER: 797 case E1000_DEV_ID_82544GC_COPPER: 798 case E1000_DEV_ID_82544GC_LOM: 799 case E1000_DEV_ID_82545EM_COPPER: 800 case E1000_DEV_ID_82545EM_FIBER: 801 case E1000_DEV_ID_82546EB_COPPER: 802 case E1000_DEV_ID_82546EB_FIBER: 803 case E1000_DEV_ID_82546EB_QUAD_COPPER: 804 return true; 805 default: 806 return false; 807 } 808} 809 810static netdev_features_t e1000_fix_features(struct net_device *netdev, 811 netdev_features_t features) 812{ 813 /* 814 * Since there is no support for separate rx/tx vlan accel 815 * enable/disable make sure tx flag is always in same state as rx. 816 */ 817 if (features & NETIF_F_HW_VLAN_RX) 818 features |= NETIF_F_HW_VLAN_TX; 819 else 820 features &= ~NETIF_F_HW_VLAN_TX; 821 822 return features; 823} 824 825static int e1000_set_features(struct net_device *netdev, 826 netdev_features_t features) 827{ 828 struct e1000_adapter *adapter = netdev_priv(netdev); 829 netdev_features_t changed = features ^ netdev->features; 830 831 if (changed & NETIF_F_HW_VLAN_RX) 832 e1000_vlan_mode(netdev, features); 833 834 if (!(changed & NETIF_F_RXCSUM)) 835 return 0; 836 837 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 838 839 if (netif_running(netdev)) 840 e1000_reinit_locked(adapter); 841 else 842 e1000_reset(adapter); 843 844 return 0; 845} 846 847static const struct net_device_ops e1000_netdev_ops = { 848 .ndo_open = e1000_open, 849 .ndo_stop = e1000_close, 850 .ndo_start_xmit = e1000_xmit_frame, 851 .ndo_get_stats = e1000_get_stats, 852 .ndo_set_rx_mode = e1000_set_rx_mode, 853 .ndo_set_mac_address = e1000_set_mac, 854 .ndo_tx_timeout = e1000_tx_timeout, 855 .ndo_change_mtu = e1000_change_mtu, 856 .ndo_do_ioctl = e1000_ioctl, 857 .ndo_validate_addr = eth_validate_addr, 858 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, 859 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, 860#ifdef CONFIG_NET_POLL_CONTROLLER 861 .ndo_poll_controller = e1000_netpoll, 862#endif 863 .ndo_fix_features = e1000_fix_features, 864 .ndo_set_features = e1000_set_features, 865}; 866 867/** 868 * e1000_init_hw_struct - initialize members of hw struct 869 * @adapter: board private struct 870 * @hw: structure used by e1000_hw.c 871 * 872 * Factors out initialization of the e1000_hw struct to its own function 873 * that can be called very early at init (just after struct allocation). 874 * Fields are initialized based on PCI device information and 875 * OS network device settings (MTU size). 876 * Returns negative error codes if MAC type setup fails. 877 */ 878static int e1000_init_hw_struct(struct e1000_adapter *adapter, 879 struct e1000_hw *hw) 880{ 881 struct pci_dev *pdev = adapter->pdev; 882 883 /* PCI config space info */ 884 hw->vendor_id = pdev->vendor; 885 hw->device_id = pdev->device; 886 hw->subsystem_vendor_id = pdev->subsystem_vendor; 887 hw->subsystem_id = pdev->subsystem_device; 888 hw->revision_id = pdev->revision; 889 890 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 891 892 hw->max_frame_size = adapter->netdev->mtu + 893 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 894 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 895 896 /* identify the MAC */ 897 if (e1000_set_mac_type(hw)) { 898 e_err(probe, "Unknown MAC Type\n"); 899 return -EIO; 900 } 901 902 switch (hw->mac_type) { 903 default: 904 break; 905 case e1000_82541: 906 case e1000_82547: 907 case e1000_82541_rev_2: 908 case e1000_82547_rev_2: 909 hw->phy_init_script = 1; 910 break; 911 } 912 913 e1000_set_media_type(hw); 914 e1000_get_bus_info(hw); 915 916 hw->wait_autoneg_complete = false; 917 hw->tbi_compatibility_en = true; 918 hw->adaptive_ifs = true; 919 920 /* Copper options */ 921 922 if (hw->media_type == e1000_media_type_copper) { 923 hw->mdix = AUTO_ALL_MODES; 924 hw->disable_polarity_correction = false; 925 hw->master_slave = E1000_MASTER_SLAVE; 926 } 927 928 return 0; 929} 930 931/** 932 * e1000_probe - Device Initialization Routine 933 * @pdev: PCI device information struct 934 * @ent: entry in e1000_pci_tbl 935 * 936 * Returns 0 on success, negative on failure 937 * 938 * e1000_probe initializes an adapter identified by a pci_dev structure. 939 * The OS initialization, configuring of the adapter private structure, 940 * and a hardware reset occur. 941 **/ 942static int __devinit e1000_probe(struct pci_dev *pdev, 943 const struct pci_device_id *ent) 944{ 945 struct net_device *netdev; 946 struct e1000_adapter *adapter; 947 struct e1000_hw *hw; 948 949 static int cards_found = 0; 950 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 951 int i, err, pci_using_dac; 952 u16 eeprom_data = 0; 953 u16 tmp = 0; 954 u16 eeprom_apme_mask = E1000_EEPROM_APME; 955 int bars, need_ioport; 956 957 /* do not allocate ioport bars when not needed */ 958 need_ioport = e1000_is_need_ioport(pdev); 959 if (need_ioport) { 960 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 961 err = pci_enable_device(pdev); 962 } else { 963 bars = pci_select_bars(pdev, IORESOURCE_MEM); 964 err = pci_enable_device_mem(pdev); 965 } 966 if (err) 967 return err; 968 969 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 970 if (err) 971 goto err_pci_reg; 972 973 pci_set_master(pdev); 974 err = pci_save_state(pdev); 975 if (err) 976 goto err_alloc_etherdev; 977 978 err = -ENOMEM; 979 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 980 if (!netdev) 981 goto err_alloc_etherdev; 982 983 SET_NETDEV_DEV(netdev, &pdev->dev); 984 985 pci_set_drvdata(pdev, netdev); 986 adapter = netdev_priv(netdev); 987 adapter->netdev = netdev; 988 adapter->pdev = pdev; 989 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 990 adapter->bars = bars; 991 adapter->need_ioport = need_ioport; 992 993 hw = &adapter->hw; 994 hw->back = adapter; 995 996 err = -EIO; 997 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); 998 if (!hw->hw_addr) 999 goto err_ioremap; 1000 1001 if (adapter->need_ioport) { 1002 for (i = BAR_1; i <= BAR_5; i++) { 1003 if (pci_resource_len(pdev, i) == 0) 1004 continue; 1005 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 1006 hw->io_base = pci_resource_start(pdev, i); 1007 break; 1008 } 1009 } 1010 } 1011 1012 /* make ready for any if (hw->...) below */ 1013 err = e1000_init_hw_struct(adapter, hw); 1014 if (err) 1015 goto err_sw_init; 1016 1017 /* 1018 * there is a workaround being applied below that limits 1019 * 64-bit DMA addresses to 64-bit hardware. There are some 1020 * 32-bit adapters that Tx hang when given 64-bit DMA addresses 1021 */ 1022 pci_using_dac = 0; 1023 if ((hw->bus_type == e1000_bus_type_pcix) && 1024 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 1025 /* 1026 * according to DMA-API-HOWTO, coherent calls will always 1027 * succeed if the set call did 1028 */ 1029 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1030 pci_using_dac = 1; 1031 } else { 1032 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1033 if (err) { 1034 pr_err("No usable DMA config, aborting\n"); 1035 goto err_dma; 1036 } 1037 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1038 } 1039 1040 netdev->netdev_ops = &e1000_netdev_ops; 1041 e1000_set_ethtool_ops(netdev); 1042 netdev->watchdog_timeo = 5 * HZ; 1043 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 1044 1045 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1046 1047 adapter->bd_number = cards_found; 1048 1049 /* setup the private structure */ 1050 1051 err = e1000_sw_init(adapter); 1052 if (err) 1053 goto err_sw_init; 1054 1055 err = -EIO; 1056 if (hw->mac_type == e1000_ce4100) { 1057 hw->ce4100_gbe_mdio_base_virt = 1058 ioremap(pci_resource_start(pdev, BAR_1), 1059 pci_resource_len(pdev, BAR_1)); 1060 1061 if (!hw->ce4100_gbe_mdio_base_virt) 1062 goto err_mdio_ioremap; 1063 } 1064 1065 if (hw->mac_type >= e1000_82543) { 1066 netdev->hw_features = NETIF_F_SG | 1067 NETIF_F_HW_CSUM | 1068 NETIF_F_HW_VLAN_RX; 1069 netdev->features = NETIF_F_HW_VLAN_TX | 1070 NETIF_F_HW_VLAN_FILTER; 1071 } 1072 1073 if ((hw->mac_type >= e1000_82544) && 1074 (hw->mac_type != e1000_82547)) 1075 netdev->hw_features |= NETIF_F_TSO; 1076 1077 netdev->priv_flags |= IFF_SUPP_NOFCS; 1078 1079 netdev->features |= netdev->hw_features; 1080 netdev->hw_features |= NETIF_F_RXCSUM; 1081 netdev->hw_features |= NETIF_F_RXFCS; 1082 1083 if (pci_using_dac) { 1084 netdev->features |= NETIF_F_HIGHDMA; 1085 netdev->vlan_features |= NETIF_F_HIGHDMA; 1086 } 1087 1088 netdev->vlan_features |= NETIF_F_TSO; 1089 netdev->vlan_features |= NETIF_F_HW_CSUM; 1090 netdev->vlan_features |= NETIF_F_SG; 1091 1092 netdev->priv_flags |= IFF_UNICAST_FLT; 1093 1094 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1095 1096 /* initialize eeprom parameters */ 1097 if (e1000_init_eeprom_params(hw)) { 1098 e_err(probe, "EEPROM initialization failed\n"); 1099 goto err_eeprom; 1100 } 1101 1102 /* before reading the EEPROM, reset the controller to 1103 * put the device in a known good starting state */ 1104 1105 e1000_reset_hw(hw); 1106 1107 /* make sure the EEPROM is good */ 1108 if (e1000_validate_eeprom_checksum(hw) < 0) { 1109 e_err(probe, "The EEPROM Checksum Is Not Valid\n"); 1110 e1000_dump_eeprom(adapter); 1111 /* 1112 * set MAC address to all zeroes to invalidate and temporary 1113 * disable this device for the user. This blocks regular 1114 * traffic while still permitting ethtool ioctls from reaching 1115 * the hardware as well as allowing the user to run the 1116 * interface after manually setting a hw addr using 1117 * `ip set address` 1118 */ 1119 memset(hw->mac_addr, 0, netdev->addr_len); 1120 } else { 1121 /* copy the MAC address out of the EEPROM */ 1122 if (e1000_read_mac_addr(hw)) 1123 e_err(probe, "EEPROM Read Error\n"); 1124 } 1125 /* don't block initalization here due to bad MAC address */ 1126 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 1127 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 1128 1129 if (!is_valid_ether_addr(netdev->perm_addr)) 1130 e_err(probe, "Invalid MAC Address\n"); 1131 1132 1133 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); 1134 INIT_DELAYED_WORK(&adapter->fifo_stall_task, 1135 e1000_82547_tx_fifo_stall_task); 1136 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); 1137 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1138 1139 e1000_check_options(adapter); 1140 1141 /* Initial Wake on LAN setting 1142 * If APM wake is enabled in the EEPROM, 1143 * enable the ACPI Magic Packet filter 1144 */ 1145 1146 switch (hw->mac_type) { 1147 case e1000_82542_rev2_0: 1148 case e1000_82542_rev2_1: 1149 case e1000_82543: 1150 break; 1151 case e1000_82544: 1152 e1000_read_eeprom(hw, 1153 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 1154 eeprom_apme_mask = E1000_EEPROM_82544_APM; 1155 break; 1156 case e1000_82546: 1157 case e1000_82546_rev_3: 1158 if (er32(STATUS) & E1000_STATUS_FUNC_1){ 1159 e1000_read_eeprom(hw, 1160 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1161 break; 1162 } 1163 /* Fall Through */ 1164 default: 1165 e1000_read_eeprom(hw, 1166 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1167 break; 1168 } 1169 if (eeprom_data & eeprom_apme_mask) 1170 adapter->eeprom_wol |= E1000_WUFC_MAG; 1171 1172 /* now that we have the eeprom settings, apply the special cases 1173 * where the eeprom may be wrong or the board simply won't support 1174 * wake on lan on a particular port */ 1175 switch (pdev->device) { 1176 case E1000_DEV_ID_82546GB_PCIE: 1177 adapter->eeprom_wol = 0; 1178 break; 1179 case E1000_DEV_ID_82546EB_FIBER: 1180 case E1000_DEV_ID_82546GB_FIBER: 1181 /* Wake events only supported on port A for dual fiber 1182 * regardless of eeprom setting */ 1183 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1184 adapter->eeprom_wol = 0; 1185 break; 1186 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1187 /* if quad port adapter, disable WoL on all but port A */ 1188 if (global_quad_port_a != 0) 1189 adapter->eeprom_wol = 0; 1190 else 1191 adapter->quad_port_a = true; 1192 /* Reset for multiple quad port adapters */ 1193 if (++global_quad_port_a == 4) 1194 global_quad_port_a = 0; 1195 break; 1196 } 1197 1198 /* initialize the wol settings based on the eeprom settings */ 1199 adapter->wol = adapter->eeprom_wol; 1200 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1201 1202 /* Auto detect PHY address */ 1203 if (hw->mac_type == e1000_ce4100) { 1204 for (i = 0; i < 32; i++) { 1205 hw->phy_addr = i; 1206 e1000_read_phy_reg(hw, PHY_ID2, &tmp); 1207 if (tmp == 0 || tmp == 0xFF) { 1208 if (i == 31) 1209 goto err_eeprom; 1210 continue; 1211 } else 1212 break; 1213 } 1214 } 1215 1216 /* reset the hardware with the new settings */ 1217 e1000_reset(adapter); 1218 1219 strcpy(netdev->name, "eth%d"); 1220 err = register_netdev(netdev); 1221 if (err) 1222 goto err_register; 1223 1224 e1000_vlan_filter_on_off(adapter, false); 1225 1226 /* print bus type/speed/width info */ 1227 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", 1228 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1229 ((hw->bus_speed == e1000_bus_speed_133) ? 133 : 1230 (hw->bus_speed == e1000_bus_speed_120) ? 120 : 1231 (hw->bus_speed == e1000_bus_speed_100) ? 100 : 1232 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), 1233 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), 1234 netdev->dev_addr); 1235 1236 /* carrier off reporting is important to ethtool even BEFORE open */ 1237 netif_carrier_off(netdev); 1238 1239 e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); 1240 1241 cards_found++; 1242 return 0; 1243 1244err_register: 1245err_eeprom: 1246 e1000_phy_hw_reset(hw); 1247 1248 if (hw->flash_address) 1249 iounmap(hw->flash_address); 1250 kfree(adapter->tx_ring); 1251 kfree(adapter->rx_ring); 1252err_dma: 1253err_sw_init: 1254err_mdio_ioremap: 1255 iounmap(hw->ce4100_gbe_mdio_base_virt); 1256 iounmap(hw->hw_addr); 1257err_ioremap: 1258 free_netdev(netdev); 1259err_alloc_etherdev: 1260 pci_release_selected_regions(pdev, bars); 1261err_pci_reg: 1262 pci_disable_device(pdev); 1263 return err; 1264} 1265 1266/** 1267 * e1000_remove - Device Removal Routine 1268 * @pdev: PCI device information struct 1269 * 1270 * e1000_remove is called by the PCI subsystem to alert the driver 1271 * that it should release a PCI device. The could be caused by a 1272 * Hot-Plug event, or because the driver is going to be removed from 1273 * memory. 1274 **/ 1275 1276static void __devexit e1000_remove(struct pci_dev *pdev) 1277{ 1278 struct net_device *netdev = pci_get_drvdata(pdev); 1279 struct e1000_adapter *adapter = netdev_priv(netdev); 1280 struct e1000_hw *hw = &adapter->hw; 1281 1282 e1000_down_and_stop(adapter); 1283 e1000_release_manageability(adapter); 1284 1285 unregister_netdev(netdev); 1286 1287 e1000_phy_hw_reset(hw); 1288 1289 kfree(adapter->tx_ring); 1290 kfree(adapter->rx_ring); 1291 1292 if (hw->mac_type == e1000_ce4100) 1293 iounmap(hw->ce4100_gbe_mdio_base_virt); 1294 iounmap(hw->hw_addr); 1295 if (hw->flash_address) 1296 iounmap(hw->flash_address); 1297 pci_release_selected_regions(pdev, adapter->bars); 1298 1299 free_netdev(netdev); 1300 1301 pci_disable_device(pdev); 1302} 1303 1304/** 1305 * e1000_sw_init - Initialize general software structures (struct e1000_adapter) 1306 * @adapter: board private structure to initialize 1307 * 1308 * e1000_sw_init initializes the Adapter private data structure. 1309 * e1000_init_hw_struct MUST be called before this function 1310 **/ 1311 1312static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 1313{ 1314 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1315 1316 adapter->num_tx_queues = 1; 1317 adapter->num_rx_queues = 1; 1318 1319 if (e1000_alloc_queues(adapter)) { 1320 e_err(probe, "Unable to allocate memory for queues\n"); 1321 return -ENOMEM; 1322 } 1323 1324 /* Explicitly disable IRQ since the NIC can be in any state. */ 1325 e1000_irq_disable(adapter); 1326 1327 spin_lock_init(&adapter->stats_lock); 1328 mutex_init(&adapter->mutex); 1329 1330 set_bit(__E1000_DOWN, &adapter->flags); 1331 1332 return 0; 1333} 1334 1335/** 1336 * e1000_alloc_queues - Allocate memory for all rings 1337 * @adapter: board private structure to initialize 1338 * 1339 * We allocate one ring per queue at run-time since we don't know the 1340 * number of queues at compile-time. 1341 **/ 1342 1343static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 1344{ 1345 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1346 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1347 if (!adapter->tx_ring) 1348 return -ENOMEM; 1349 1350 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1351 sizeof(struct e1000_rx_ring), GFP_KERNEL); 1352 if (!adapter->rx_ring) { 1353 kfree(adapter->tx_ring); 1354 return -ENOMEM; 1355 } 1356 1357 return E1000_SUCCESS; 1358} 1359 1360/** 1361 * e1000_open - Called when a network interface is made active 1362 * @netdev: network interface device structure 1363 * 1364 * Returns 0 on success, negative value on failure 1365 * 1366 * The open entry point is called when a network interface is made 1367 * active by the system (IFF_UP). At this point all resources needed 1368 * for transmit and receive operations are allocated, the interrupt 1369 * handler is registered with the OS, the watchdog task is started, 1370 * and the stack is notified that the interface is ready. 1371 **/ 1372 1373static int e1000_open(struct net_device *netdev) 1374{ 1375 struct e1000_adapter *adapter = netdev_priv(netdev); 1376 struct e1000_hw *hw = &adapter->hw; 1377 int err; 1378 1379 /* disallow open during test */ 1380 if (test_bit(__E1000_TESTING, &adapter->flags)) 1381 return -EBUSY; 1382 1383 netif_carrier_off(netdev); 1384 1385 /* allocate transmit descriptors */ 1386 err = e1000_setup_all_tx_resources(adapter); 1387 if (err) 1388 goto err_setup_tx; 1389 1390 /* allocate receive descriptors */ 1391 err = e1000_setup_all_rx_resources(adapter); 1392 if (err) 1393 goto err_setup_rx; 1394 1395 e1000_power_up_phy(adapter); 1396 1397 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 1398 if ((hw->mng_cookie.status & 1399 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1400 e1000_update_mng_vlan(adapter); 1401 } 1402 1403 /* before we allocate an interrupt, we must be ready to handle it. 1404 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1405 * as soon as we call pci_request_irq, so we have to setup our 1406 * clean_rx handler before we do so. */ 1407 e1000_configure(adapter); 1408 1409 err = e1000_request_irq(adapter); 1410 if (err) 1411 goto err_req_irq; 1412 1413 /* From here on the code is the same as e1000_up() */ 1414 clear_bit(__E1000_DOWN, &adapter->flags); 1415 1416 napi_enable(&adapter->napi); 1417 1418 e1000_irq_enable(adapter); 1419 1420 netif_start_queue(netdev); 1421 1422 /* fire a link status change interrupt to start the watchdog */ 1423 ew32(ICS, E1000_ICS_LSC); 1424 1425 return E1000_SUCCESS; 1426 1427err_req_irq: 1428 e1000_power_down_phy(adapter); 1429 e1000_free_all_rx_resources(adapter); 1430err_setup_rx: 1431 e1000_free_all_tx_resources(adapter); 1432err_setup_tx: 1433 e1000_reset(adapter); 1434 1435 return err; 1436} 1437 1438/** 1439 * e1000_close - Disables a network interface 1440 * @netdev: network interface device structure 1441 * 1442 * Returns 0, this is not allowed to fail 1443 * 1444 * The close entry point is called when an interface is de-activated 1445 * by the OS. The hardware is still under the drivers control, but 1446 * needs to be disabled. A global MAC reset is issued to stop the 1447 * hardware, and all transmit and receive resources are freed. 1448 **/ 1449 1450static int e1000_close(struct net_device *netdev) 1451{ 1452 struct e1000_adapter *adapter = netdev_priv(netdev); 1453 struct e1000_hw *hw = &adapter->hw; 1454 1455 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 1456 e1000_down(adapter); 1457 e1000_power_down_phy(adapter); 1458 e1000_free_irq(adapter); 1459 1460 e1000_free_all_tx_resources(adapter); 1461 e1000_free_all_rx_resources(adapter); 1462 1463 /* kill manageability vlan ID if supported, but not if a vlan with 1464 * the same ID is registered on the host OS (let 8021q kill it) */ 1465 if ((hw->mng_cookie.status & 1466 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 1467 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { 1468 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1469 } 1470 1471 return 0; 1472} 1473 1474/** 1475 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 1476 * @adapter: address of board private structure 1477 * @start: address of beginning of memory 1478 * @len: length of memory 1479 **/ 1480static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, 1481 unsigned long len) 1482{ 1483 struct e1000_hw *hw = &adapter->hw; 1484 unsigned long begin = (unsigned long)start; 1485 unsigned long end = begin + len; 1486 1487 /* First rev 82545 and 82546 need to not allow any memory 1488 * write location to cross 64k boundary due to errata 23 */ 1489 if (hw->mac_type == e1000_82545 || 1490 hw->mac_type == e1000_ce4100 || 1491 hw->mac_type == e1000_82546) { 1492 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1493 } 1494 1495 return true; 1496} 1497 1498/** 1499 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1500 * @adapter: board private structure 1501 * @txdr: tx descriptor ring (for a specific queue) to setup 1502 * 1503 * Return 0 on success, negative on failure 1504 **/ 1505 1506static int e1000_setup_tx_resources(struct e1000_adapter *adapter, 1507 struct e1000_tx_ring *txdr) 1508{ 1509 struct pci_dev *pdev = adapter->pdev; 1510 int size; 1511 1512 size = sizeof(struct e1000_buffer) * txdr->count; 1513 txdr->buffer_info = vzalloc(size); 1514 if (!txdr->buffer_info) { 1515 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1516 "ring\n"); 1517 return -ENOMEM; 1518 } 1519 1520 /* round up to nearest 4K */ 1521 1522 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1523 txdr->size = ALIGN(txdr->size, 4096); 1524 1525 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1526 GFP_KERNEL); 1527 if (!txdr->desc) { 1528setup_tx_desc_die: 1529 vfree(txdr->buffer_info); 1530 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1531 "ring\n"); 1532 return -ENOMEM; 1533 } 1534 1535 /* Fix for errata 23, can't cross 64kB boundary */ 1536 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1537 void *olddesc = txdr->desc; 1538 dma_addr_t olddma = txdr->dma; 1539 e_err(tx_err, "txdr align check failed: %u bytes at %p\n", 1540 txdr->size, txdr->desc); 1541 /* Try again, without freeing the previous */ 1542 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, 1543 &txdr->dma, GFP_KERNEL); 1544 /* Failed allocation, critical failure */ 1545 if (!txdr->desc) { 1546 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1547 olddma); 1548 goto setup_tx_desc_die; 1549 } 1550 1551 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1552 /* give up */ 1553 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, 1554 txdr->dma); 1555 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1556 olddma); 1557 e_err(probe, "Unable to allocate aligned memory " 1558 "for the transmit descriptor ring\n"); 1559 vfree(txdr->buffer_info); 1560 return -ENOMEM; 1561 } else { 1562 /* Free old allocation, new allocation was successful */ 1563 dma_free_coherent(&pdev->dev, txdr->size, olddesc, 1564 olddma); 1565 } 1566 } 1567 memset(txdr->desc, 0, txdr->size); 1568 1569 txdr->next_to_use = 0; 1570 txdr->next_to_clean = 0; 1571 1572 return 0; 1573} 1574 1575/** 1576 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources 1577 * (Descriptors) for all queues 1578 * @adapter: board private structure 1579 * 1580 * Return 0 on success, negative on failure 1581 **/ 1582 1583int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) 1584{ 1585 int i, err = 0; 1586 1587 for (i = 0; i < adapter->num_tx_queues; i++) { 1588 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1589 if (err) { 1590 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 1591 for (i-- ; i >= 0; i--) 1592 e1000_free_tx_resources(adapter, 1593 &adapter->tx_ring[i]); 1594 break; 1595 } 1596 } 1597 1598 return err; 1599} 1600 1601/** 1602 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1603 * @adapter: board private structure 1604 * 1605 * Configure the Tx unit of the MAC after a reset. 1606 **/ 1607 1608static void e1000_configure_tx(struct e1000_adapter *adapter) 1609{ 1610 u64 tdba; 1611 struct e1000_hw *hw = &adapter->hw; 1612 u32 tdlen, tctl, tipg; 1613 u32 ipgr1, ipgr2; 1614 1615 /* Setup the HW Tx Head and Tail descriptor pointers */ 1616 1617 switch (adapter->num_tx_queues) { 1618 case 1: 1619 default: 1620 tdba = adapter->tx_ring[0].dma; 1621 tdlen = adapter->tx_ring[0].count * 1622 sizeof(struct e1000_tx_desc); 1623 ew32(TDLEN, tdlen); 1624 ew32(TDBAH, (tdba >> 32)); 1625 ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); 1626 ew32(TDT, 0); 1627 ew32(TDH, 0); 1628 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); 1629 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); 1630 break; 1631 } 1632 1633 /* Set the default values for the Tx Inter Packet Gap timer */ 1634 if ((hw->media_type == e1000_media_type_fiber || 1635 hw->media_type == e1000_media_type_internal_serdes)) 1636 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1637 else 1638 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1639 1640 switch (hw->mac_type) { 1641 case e1000_82542_rev2_0: 1642 case e1000_82542_rev2_1: 1643 tipg = DEFAULT_82542_TIPG_IPGT; 1644 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1645 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1646 break; 1647 default: 1648 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1649 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1650 break; 1651 } 1652 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; 1653 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; 1654 ew32(TIPG, tipg); 1655 1656 /* Set the Tx Interrupt Delay register */ 1657 1658 ew32(TIDV, adapter->tx_int_delay); 1659 if (hw->mac_type >= e1000_82540) 1660 ew32(TADV, adapter->tx_abs_int_delay); 1661 1662 /* Program the Transmit Control Register */ 1663 1664 tctl = er32(TCTL); 1665 tctl &= ~E1000_TCTL_CT; 1666 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1667 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1668 1669 e1000_config_collision_dist(hw); 1670 1671 /* Setup Transmit Descriptor Settings for eop descriptor */ 1672 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; 1673 1674 /* only set IDE if we are delaying interrupts using the timers */ 1675 if (adapter->tx_int_delay) 1676 adapter->txd_cmd |= E1000_TXD_CMD_IDE; 1677 1678 if (hw->mac_type < e1000_82543) 1679 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1680 else 1681 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1682 1683 /* Cache if we're 82544 running in PCI-X because we'll 1684 * need this to apply a workaround later in the send path. */ 1685 if (hw->mac_type == e1000_82544 && 1686 hw->bus_type == e1000_bus_type_pcix) 1687 adapter->pcix_82544 = true; 1688 1689 ew32(TCTL, tctl); 1690 1691} 1692 1693/** 1694 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1695 * @adapter: board private structure 1696 * @rxdr: rx descriptor ring (for a specific queue) to setup 1697 * 1698 * Returns 0 on success, negative on failure 1699 **/ 1700 1701static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1702 struct e1000_rx_ring *rxdr) 1703{ 1704 struct pci_dev *pdev = adapter->pdev; 1705 int size, desc_len; 1706 1707 size = sizeof(struct e1000_buffer) * rxdr->count; 1708 rxdr->buffer_info = vzalloc(size); 1709 if (!rxdr->buffer_info) { 1710 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1711 "ring\n"); 1712 return -ENOMEM; 1713 } 1714 1715 desc_len = sizeof(struct e1000_rx_desc); 1716 1717 /* Round up to nearest 4K */ 1718 1719 rxdr->size = rxdr->count * desc_len; 1720 rxdr->size = ALIGN(rxdr->size, 4096); 1721 1722 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1723 GFP_KERNEL); 1724 1725 if (!rxdr->desc) { 1726 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1727 "ring\n"); 1728setup_rx_desc_die: 1729 vfree(rxdr->buffer_info); 1730 return -ENOMEM; 1731 } 1732 1733 /* Fix for errata 23, can't cross 64kB boundary */ 1734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1735 void *olddesc = rxdr->desc; 1736 dma_addr_t olddma = rxdr->dma; 1737 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", 1738 rxdr->size, rxdr->desc); 1739 /* Try again, without freeing the previous */ 1740 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, 1741 &rxdr->dma, GFP_KERNEL); 1742 /* Failed allocation, critical failure */ 1743 if (!rxdr->desc) { 1744 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1745 olddma); 1746 e_err(probe, "Unable to allocate memory for the Rx " 1747 "descriptor ring\n"); 1748 goto setup_rx_desc_die; 1749 } 1750 1751 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1752 /* give up */ 1753 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, 1754 rxdr->dma); 1755 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1756 olddma); 1757 e_err(probe, "Unable to allocate aligned memory for " 1758 "the Rx descriptor ring\n"); 1759 goto setup_rx_desc_die; 1760 } else { 1761 /* Free old allocation, new allocation was successful */ 1762 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1763 olddma); 1764 } 1765 } 1766 memset(rxdr->desc, 0, rxdr->size); 1767 1768 rxdr->next_to_clean = 0; 1769 rxdr->next_to_use = 0; 1770 rxdr->rx_skb_top = NULL; 1771 1772 return 0; 1773} 1774 1775/** 1776 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources 1777 * (Descriptors) for all queues 1778 * @adapter: board private structure 1779 * 1780 * Return 0 on success, negative on failure 1781 **/ 1782 1783int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) 1784{ 1785 int i, err = 0; 1786 1787 for (i = 0; i < adapter->num_rx_queues; i++) { 1788 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1789 if (err) { 1790 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 1791 for (i-- ; i >= 0; i--) 1792 e1000_free_rx_resources(adapter, 1793 &adapter->rx_ring[i]); 1794 break; 1795 } 1796 } 1797 1798 return err; 1799} 1800 1801/** 1802 * e1000_setup_rctl - configure the receive control registers 1803 * @adapter: Board private structure 1804 **/ 1805static void e1000_setup_rctl(struct e1000_adapter *adapter) 1806{ 1807 struct e1000_hw *hw = &adapter->hw; 1808 u32 rctl; 1809 1810 rctl = er32(RCTL); 1811 1812 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1813 1814 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 1815 E1000_RCTL_RDMTS_HALF | 1816 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); 1817 1818 if (hw->tbi_compatibility_on == 1) 1819 rctl |= E1000_RCTL_SBP; 1820 else 1821 rctl &= ~E1000_RCTL_SBP; 1822 1823 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1824 rctl &= ~E1000_RCTL_LPE; 1825 else 1826 rctl |= E1000_RCTL_LPE; 1827 1828 /* Setup buffer sizes */ 1829 rctl &= ~E1000_RCTL_SZ_4096; 1830 rctl |= E1000_RCTL_BSEX; 1831 switch (adapter->rx_buffer_len) { 1832 case E1000_RXBUFFER_2048: 1833 default: 1834 rctl |= E1000_RCTL_SZ_2048; 1835 rctl &= ~E1000_RCTL_BSEX; 1836 break; 1837 case E1000_RXBUFFER_4096: 1838 rctl |= E1000_RCTL_SZ_4096; 1839 break; 1840 case E1000_RXBUFFER_8192: 1841 rctl |= E1000_RCTL_SZ_8192; 1842 break; 1843 case E1000_RXBUFFER_16384: 1844 rctl |= E1000_RCTL_SZ_16384; 1845 break; 1846 } 1847 1848 ew32(RCTL, rctl); 1849} 1850 1851/** 1852 * e1000_configure_rx - Configure 8254x Receive Unit after Reset 1853 * @adapter: board private structure 1854 * 1855 * Configure the Rx unit of the MAC after a reset. 1856 **/ 1857 1858static void e1000_configure_rx(struct e1000_adapter *adapter) 1859{ 1860 u64 rdba; 1861 struct e1000_hw *hw = &adapter->hw; 1862 u32 rdlen, rctl, rxcsum; 1863 1864 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1865 rdlen = adapter->rx_ring[0].count * 1866 sizeof(struct e1000_rx_desc); 1867 adapter->clean_rx = e1000_clean_jumbo_rx_irq; 1868 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; 1869 } else { 1870 rdlen = adapter->rx_ring[0].count * 1871 sizeof(struct e1000_rx_desc); 1872 adapter->clean_rx = e1000_clean_rx_irq; 1873 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1874 } 1875 1876 /* disable receives while setting up the descriptors */ 1877 rctl = er32(RCTL); 1878 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1879 1880 /* set the Receive Delay Timer Register */ 1881 ew32(RDTR, adapter->rx_int_delay); 1882 1883 if (hw->mac_type >= e1000_82540) { 1884 ew32(RADV, adapter->rx_abs_int_delay); 1885 if (adapter->itr_setting != 0) 1886 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1887 } 1888 1889 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1890 * the Base and Length of the Rx Descriptor Ring */ 1891 switch (adapter->num_rx_queues) { 1892 case 1: 1893 default: 1894 rdba = adapter->rx_ring[0].dma; 1895 ew32(RDLEN, rdlen); 1896 ew32(RDBAH, (rdba >> 32)); 1897 ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); 1898 ew32(RDT, 0); 1899 ew32(RDH, 0); 1900 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); 1901 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); 1902 break; 1903 } 1904 1905 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1906 if (hw->mac_type >= e1000_82543) { 1907 rxcsum = er32(RXCSUM); 1908 if (adapter->rx_csum) 1909 rxcsum |= E1000_RXCSUM_TUOFL; 1910 else 1911 /* don't need to clear IPPCSE as it defaults to 0 */ 1912 rxcsum &= ~E1000_RXCSUM_TUOFL; 1913 ew32(RXCSUM, rxcsum); 1914 } 1915 1916 /* Enable Receives */ 1917 ew32(RCTL, rctl | E1000_RCTL_EN); 1918} 1919 1920/** 1921 * e1000_free_tx_resources - Free Tx Resources per Queue 1922 * @adapter: board private structure 1923 * @tx_ring: Tx descriptor ring for a specific queue 1924 * 1925 * Free all transmit software resources 1926 **/ 1927 1928static void e1000_free_tx_resources(struct e1000_adapter *adapter, 1929 struct e1000_tx_ring *tx_ring) 1930{ 1931 struct pci_dev *pdev = adapter->pdev; 1932 1933 e1000_clean_tx_ring(adapter, tx_ring); 1934 1935 vfree(tx_ring->buffer_info); 1936 tx_ring->buffer_info = NULL; 1937 1938 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 1939 tx_ring->dma); 1940 1941 tx_ring->desc = NULL; 1942} 1943 1944/** 1945 * e1000_free_all_tx_resources - Free Tx Resources for All Queues 1946 * @adapter: board private structure 1947 * 1948 * Free all transmit software resources 1949 **/ 1950 1951void e1000_free_all_tx_resources(struct e1000_adapter *adapter) 1952{ 1953 int i; 1954 1955 for (i = 0; i < adapter->num_tx_queues; i++) 1956 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); 1957} 1958 1959static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1960 struct e1000_buffer *buffer_info) 1961{ 1962 if (buffer_info->dma) { 1963 if (buffer_info->mapped_as_page) 1964 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, 1965 buffer_info->length, DMA_TO_DEVICE); 1966 else 1967 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1968 buffer_info->length, 1969 DMA_TO_DEVICE); 1970 buffer_info->dma = 0; 1971 } 1972 if (buffer_info->skb) { 1973 dev_kfree_skb_any(buffer_info->skb); 1974 buffer_info->skb = NULL; 1975 } 1976 buffer_info->time_stamp = 0; 1977 /* buffer_info must be completely set up in the transmit path */ 1978} 1979 1980/** 1981 * e1000_clean_tx_ring - Free Tx Buffers 1982 * @adapter: board private structure 1983 * @tx_ring: ring to be cleaned 1984 **/ 1985 1986static void e1000_clean_tx_ring(struct e1000_adapter *adapter, 1987 struct e1000_tx_ring *tx_ring) 1988{ 1989 struct e1000_hw *hw = &adapter->hw; 1990 struct e1000_buffer *buffer_info; 1991 unsigned long size; 1992 unsigned int i; 1993 1994 /* Free all the Tx ring sk_buffs */ 1995 1996 for (i = 0; i < tx_ring->count; i++) { 1997 buffer_info = &tx_ring->buffer_info[i]; 1998 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 1999 } 2000 2001 size = sizeof(struct e1000_buffer) * tx_ring->count; 2002 memset(tx_ring->buffer_info, 0, size); 2003 2004 /* Zero out the descriptor ring */ 2005 2006 memset(tx_ring->desc, 0, tx_ring->size); 2007 2008 tx_ring->next_to_use = 0; 2009 tx_ring->next_to_clean = 0; 2010 tx_ring->last_tx_tso = false; 2011 2012 writel(0, hw->hw_addr + tx_ring->tdh); 2013 writel(0, hw->hw_addr + tx_ring->tdt); 2014} 2015 2016/** 2017 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues 2018 * @adapter: board private structure 2019 **/ 2020 2021static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) 2022{ 2023 int i; 2024 2025 for (i = 0; i < adapter->num_tx_queues; i++) 2026 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); 2027} 2028 2029/** 2030 * e1000_free_rx_resources - Free Rx Resources 2031 * @adapter: board private structure 2032 * @rx_ring: ring to clean the resources from 2033 * 2034 * Free all receive software resources 2035 **/ 2036 2037static void e1000_free_rx_resources(struct e1000_adapter *adapter, 2038 struct e1000_rx_ring *rx_ring) 2039{ 2040 struct pci_dev *pdev = adapter->pdev; 2041 2042 e1000_clean_rx_ring(adapter, rx_ring); 2043 2044 vfree(rx_ring->buffer_info); 2045 rx_ring->buffer_info = NULL; 2046 2047 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2048 rx_ring->dma); 2049 2050 rx_ring->desc = NULL; 2051} 2052 2053/** 2054 * e1000_free_all_rx_resources - Free Rx Resources for All Queues 2055 * @adapter: board private structure 2056 * 2057 * Free all receive software resources 2058 **/ 2059 2060void e1000_free_all_rx_resources(struct e1000_adapter *adapter) 2061{ 2062 int i; 2063 2064 for (i = 0; i < adapter->num_rx_queues; i++) 2065 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); 2066} 2067 2068/** 2069 * e1000_clean_rx_ring - Free Rx Buffers per Queue 2070 * @adapter: board private structure 2071 * @rx_ring: ring to free buffers from 2072 **/ 2073 2074static void e1000_clean_rx_ring(struct e1000_adapter *adapter, 2075 struct e1000_rx_ring *rx_ring) 2076{ 2077 struct e1000_hw *hw = &adapter->hw; 2078 struct e1000_buffer *buffer_info; 2079 struct pci_dev *pdev = adapter->pdev; 2080 unsigned long size; 2081 unsigned int i; 2082 2083 /* Free all the Rx ring sk_buffs */ 2084 for (i = 0; i < rx_ring->count; i++) { 2085 buffer_info = &rx_ring->buffer_info[i]; 2086 if (buffer_info->dma && 2087 adapter->clean_rx == e1000_clean_rx_irq) { 2088 dma_unmap_single(&pdev->dev, buffer_info->dma, 2089 buffer_info->length, 2090 DMA_FROM_DEVICE); 2091 } else if (buffer_info->dma && 2092 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 2093 dma_unmap_page(&pdev->dev, buffer_info->dma, 2094 buffer_info->length, 2095 DMA_FROM_DEVICE); 2096 } 2097 2098 buffer_info->dma = 0; 2099 if (buffer_info->page) { 2100 put_page(buffer_info->page); 2101 buffer_info->page = NULL; 2102 } 2103 if (buffer_info->skb) { 2104 dev_kfree_skb(buffer_info->skb); 2105 buffer_info->skb = NULL; 2106 } 2107 } 2108 2109 /* there also may be some cached data from a chained receive */ 2110 if (rx_ring->rx_skb_top) { 2111 dev_kfree_skb(rx_ring->rx_skb_top); 2112 rx_ring->rx_skb_top = NULL; 2113 } 2114 2115 size = sizeof(struct e1000_buffer) * rx_ring->count; 2116 memset(rx_ring->buffer_info, 0, size); 2117 2118 /* Zero out the descriptor ring */ 2119 memset(rx_ring->desc, 0, rx_ring->size); 2120 2121 rx_ring->next_to_clean = 0; 2122 rx_ring->next_to_use = 0; 2123 2124 writel(0, hw->hw_addr + rx_ring->rdh); 2125 writel(0, hw->hw_addr + rx_ring->rdt); 2126} 2127 2128/** 2129 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues 2130 * @adapter: board private structure 2131 **/ 2132 2133static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) 2134{ 2135 int i; 2136 2137 for (i = 0; i < adapter->num_rx_queues; i++) 2138 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); 2139} 2140 2141/* The 82542 2.0 (revision 2) needs to have the receive unit in reset 2142 * and memory write and invalidate disabled for certain operations 2143 */ 2144static void e1000_enter_82542_rst(struct e1000_adapter *adapter) 2145{ 2146 struct e1000_hw *hw = &adapter->hw; 2147 struct net_device *netdev = adapter->netdev; 2148 u32 rctl; 2149 2150 e1000_pci_clear_mwi(hw); 2151 2152 rctl = er32(RCTL); 2153 rctl |= E1000_RCTL_RST; 2154 ew32(RCTL, rctl); 2155 E1000_WRITE_FLUSH(); 2156 mdelay(5); 2157 2158 if (netif_running(netdev)) 2159 e1000_clean_all_rx_rings(adapter); 2160} 2161 2162static void e1000_leave_82542_rst(struct e1000_adapter *adapter) 2163{ 2164 struct e1000_hw *hw = &adapter->hw; 2165 struct net_device *netdev = adapter->netdev; 2166 u32 rctl; 2167 2168 rctl = er32(RCTL); 2169 rctl &= ~E1000_RCTL_RST; 2170 ew32(RCTL, rctl); 2171 E1000_WRITE_FLUSH(); 2172 mdelay(5); 2173 2174 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) 2175 e1000_pci_set_mwi(hw); 2176 2177 if (netif_running(netdev)) { 2178 /* No need to loop, because 82542 supports only 1 queue */ 2179 struct e1000_rx_ring *ring = &adapter->rx_ring[0]; 2180 e1000_configure_rx(adapter); 2181 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 2182 } 2183} 2184 2185/** 2186 * e1000_set_mac - Change the Ethernet Address of the NIC 2187 * @netdev: network interface device structure 2188 * @p: pointer to an address structure 2189 * 2190 * Returns 0 on success, negative on failure 2191 **/ 2192 2193static int e1000_set_mac(struct net_device *netdev, void *p) 2194{ 2195 struct e1000_adapter *adapter = netdev_priv(netdev); 2196 struct e1000_hw *hw = &adapter->hw; 2197 struct sockaddr *addr = p; 2198 2199 if (!is_valid_ether_addr(addr->sa_data)) 2200 return -EADDRNOTAVAIL; 2201 2202 /* 82542 2.0 needs to be in reset to write receive address registers */ 2203 2204 if (hw->mac_type == e1000_82542_rev2_0) 2205 e1000_enter_82542_rst(adapter); 2206 2207 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2208 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); 2209 2210 e1000_rar_set(hw, hw->mac_addr, 0); 2211 2212 if (hw->mac_type == e1000_82542_rev2_0) 2213 e1000_leave_82542_rst(adapter); 2214 2215 return 0; 2216} 2217 2218/** 2219 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2220 * @netdev: network interface device structure 2221 * 2222 * The set_rx_mode entry point is called whenever the unicast or multicast 2223 * address lists or the network interface flags are updated. This routine is 2224 * responsible for configuring the hardware for proper unicast, multicast, 2225 * promiscuous mode, and all-multi behavior. 2226 **/ 2227 2228static void e1000_set_rx_mode(struct net_device *netdev) 2229{ 2230 struct e1000_adapter *adapter = netdev_priv(netdev); 2231 struct e1000_hw *hw = &adapter->hw; 2232 struct netdev_hw_addr *ha; 2233 bool use_uc = false; 2234 u32 rctl; 2235 u32 hash_value; 2236 int i, rar_entries = E1000_RAR_ENTRIES; 2237 int mta_reg_count = E1000_NUM_MTA_REGISTERS; 2238 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2239 2240 if (!mcarray) { 2241 e_err(probe, "memory allocation failed\n"); 2242 return; 2243 } 2244 2245 /* Check for Promiscuous and All Multicast modes */ 2246 2247 rctl = er32(RCTL); 2248 2249 if (netdev->flags & IFF_PROMISC) { 2250 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2251 rctl &= ~E1000_RCTL_VFE; 2252 } else { 2253 if (netdev->flags & IFF_ALLMULTI) 2254 rctl |= E1000_RCTL_MPE; 2255 else 2256 rctl &= ~E1000_RCTL_MPE; 2257 /* Enable VLAN filter if there is a VLAN */ 2258 if (e1000_vlan_used(adapter)) 2259 rctl |= E1000_RCTL_VFE; 2260 } 2261 2262 if (netdev_uc_count(netdev) > rar_entries - 1) { 2263 rctl |= E1000_RCTL_UPE; 2264 } else if (!(netdev->flags & IFF_PROMISC)) { 2265 rctl &= ~E1000_RCTL_UPE; 2266 use_uc = true; 2267 } 2268 2269 ew32(RCTL, rctl); 2270 2271 /* 82542 2.0 needs to be in reset to write receive address registers */ 2272 2273 if (hw->mac_type == e1000_82542_rev2_0) 2274 e1000_enter_82542_rst(adapter); 2275 2276 /* load the first 14 addresses into the exact filters 1-14. Unicast 2277 * addresses take precedence to avoid disabling unicast filtering 2278 * when possible. 2279 * 2280 * RAR 0 is used for the station MAC address 2281 * if there are not 14 addresses, go ahead and clear the filters 2282 */ 2283 i = 1; 2284 if (use_uc) 2285 netdev_for_each_uc_addr(ha, netdev) { 2286 if (i == rar_entries) 2287 break; 2288 e1000_rar_set(hw, ha->addr, i++); 2289 } 2290 2291 netdev_for_each_mc_addr(ha, netdev) { 2292 if (i == rar_entries) { 2293 /* load any remaining addresses into the hash table */ 2294 u32 hash_reg, hash_bit, mta; 2295 hash_value = e1000_hash_mc_addr(hw, ha->addr); 2296 hash_reg = (hash_value >> 5) & 0x7F; 2297 hash_bit = hash_value & 0x1F; 2298 mta = (1 << hash_bit); 2299 mcarray[hash_reg] |= mta; 2300 } else { 2301 e1000_rar_set(hw, ha->addr, i++); 2302 } 2303 } 2304 2305 for (; i < rar_entries; i++) { 2306 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); 2307 E1000_WRITE_FLUSH(); 2308 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); 2309 E1000_WRITE_FLUSH(); 2310 } 2311 2312 /* write the hash table completely, write from bottom to avoid 2313 * both stupid write combining chipsets, and flushing each write */ 2314 for (i = mta_reg_count - 1; i >= 0 ; i--) { 2315 /* 2316 * If we are on an 82544 has an errata where writing odd 2317 * offsets overwrites the previous even offset, but writing 2318 * backwards over the range solves the issue by always 2319 * writing the odd offset first 2320 */ 2321 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); 2322 } 2323 E1000_WRITE_FLUSH(); 2324 2325 if (hw->mac_type == e1000_82542_rev2_0) 2326 e1000_leave_82542_rst(adapter); 2327 2328 kfree(mcarray); 2329} 2330 2331/** 2332 * e1000_update_phy_info_task - get phy info 2333 * @work: work struct contained inside adapter struct 2334 * 2335 * Need to wait a few seconds after link up to get diagnostic information from 2336 * the phy 2337 */ 2338static void e1000_update_phy_info_task(struct work_struct *work) 2339{ 2340 struct e1000_adapter *adapter = container_of(work, 2341 struct e1000_adapter, 2342 phy_info_task.work); 2343 if (test_bit(__E1000_DOWN, &adapter->flags)) 2344 return; 2345 mutex_lock(&adapter->mutex); 2346 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 2347 mutex_unlock(&adapter->mutex); 2348} 2349 2350/** 2351 * e1000_82547_tx_fifo_stall_task - task to complete work 2352 * @work: work struct contained inside adapter struct 2353 **/ 2354static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) 2355{ 2356 struct e1000_adapter *adapter = container_of(work, 2357 struct e1000_adapter, 2358 fifo_stall_task.work); 2359 struct e1000_hw *hw = &adapter->hw; 2360 struct net_device *netdev = adapter->netdev; 2361 u32 tctl; 2362 2363 if (test_bit(__E1000_DOWN, &adapter->flags)) 2364 return; 2365 mutex_lock(&adapter->mutex); 2366 if (atomic_read(&adapter->tx_fifo_stall)) { 2367 if ((er32(TDT) == er32(TDH)) && 2368 (er32(TDFT) == er32(TDFH)) && 2369 (er32(TDFTS) == er32(TDFHS))) { 2370 tctl = er32(TCTL); 2371 ew32(TCTL, tctl & ~E1000_TCTL_EN); 2372 ew32(TDFT, adapter->tx_head_addr); 2373 ew32(TDFH, adapter->tx_head_addr); 2374 ew32(TDFTS, adapter->tx_head_addr); 2375 ew32(TDFHS, adapter->tx_head_addr); 2376 ew32(TCTL, tctl); 2377 E1000_WRITE_FLUSH(); 2378 2379 adapter->tx_fifo_head = 0; 2380 atomic_set(&adapter->tx_fifo_stall, 0); 2381 netif_wake_queue(netdev); 2382 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { 2383 schedule_delayed_work(&adapter->fifo_stall_task, 1); 2384 } 2385 } 2386 mutex_unlock(&adapter->mutex); 2387} 2388 2389bool e1000_has_link(struct e1000_adapter *adapter) 2390{ 2391 struct e1000_hw *hw = &adapter->hw; 2392 bool link_active = false; 2393 2394 /* get_link_status is set on LSC (link status) interrupt or rx 2395 * sequence error interrupt (except on intel ce4100). 2396 * get_link_status will stay false until the 2397 * e1000_check_for_link establishes link for copper adapters 2398 * ONLY 2399 */ 2400 switch (hw->media_type) { 2401 case e1000_media_type_copper: 2402 if (hw->mac_type == e1000_ce4100) 2403 hw->get_link_status = 1; 2404 if (hw->get_link_status) { 2405 e1000_check_for_link(hw); 2406 link_active = !hw->get_link_status; 2407 } else { 2408 link_active = true; 2409 } 2410 break; 2411 case e1000_media_type_fiber: 2412 e1000_check_for_link(hw); 2413 link_active = !!(er32(STATUS) & E1000_STATUS_LU); 2414 break; 2415 case e1000_media_type_internal_serdes: 2416 e1000_check_for_link(hw); 2417 link_active = hw->serdes_has_link; 2418 break; 2419 default: 2420 break; 2421 } 2422 2423 return link_active; 2424} 2425 2426/** 2427 * e1000_watchdog - work function 2428 * @work: work struct contained inside adapter struct 2429 **/ 2430static void e1000_watchdog(struct work_struct *work) 2431{ 2432 struct e1000_adapter *adapter = container_of(work, 2433 struct e1000_adapter, 2434 watchdog_task.work); 2435 struct e1000_hw *hw = &adapter->hw; 2436 struct net_device *netdev = adapter->netdev; 2437 struct e1000_tx_ring *txdr = adapter->tx_ring; 2438 u32 link, tctl; 2439 2440 if (test_bit(__E1000_DOWN, &adapter->flags)) 2441 return; 2442 2443 mutex_lock(&adapter->mutex); 2444 link = e1000_has_link(adapter); 2445 if ((netif_carrier_ok(netdev)) && link) 2446 goto link_up; 2447 2448 if (link) { 2449 if (!netif_carrier_ok(netdev)) { 2450 u32 ctrl; 2451 bool txb2b = true; 2452 /* update snapshot of PHY registers on LSC */ 2453 e1000_get_speed_and_duplex(hw, 2454 &adapter->link_speed, 2455 &adapter->link_duplex); 2456 2457 ctrl = er32(CTRL); 2458 pr_info("%s NIC Link is Up %d Mbps %s, " 2459 "Flow Control: %s\n", 2460 netdev->name, 2461 adapter->link_speed, 2462 adapter->link_duplex == FULL_DUPLEX ? 2463 "Full Duplex" : "Half Duplex", 2464 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2465 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2466 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2467 E1000_CTRL_TFCE) ? "TX" : "None"))); 2468 2469 /* adjust timeout factor according to speed/duplex */ 2470 adapter->tx_timeout_factor = 1; 2471 switch (adapter->link_speed) { 2472 case SPEED_10: 2473 txb2b = false; 2474 adapter->tx_timeout_factor = 16; 2475 break; 2476 case SPEED_100: 2477 txb2b = false; 2478 /* maybe add some timeout factor ? */ 2479 break; 2480 } 2481 2482 /* enable transmits in the hardware */ 2483 tctl = er32(TCTL); 2484 tctl |= E1000_TCTL_EN; 2485 ew32(TCTL, tctl); 2486 2487 netif_carrier_on(netdev); 2488 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2489 schedule_delayed_work(&adapter->phy_info_task, 2490 2 * HZ); 2491 adapter->smartspeed = 0; 2492 } 2493 } else { 2494 if (netif_carrier_ok(netdev)) { 2495 adapter->link_speed = 0; 2496 adapter->link_duplex = 0; 2497 pr_info("%s NIC Link is Down\n", 2498 netdev->name); 2499 netif_carrier_off(netdev); 2500 2501 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2502 schedule_delayed_work(&adapter->phy_info_task, 2503 2 * HZ); 2504 } 2505 2506 e1000_smartspeed(adapter); 2507 } 2508 2509link_up: 2510 e1000_update_stats(adapter); 2511 2512 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2513 adapter->tpt_old = adapter->stats.tpt; 2514 hw->collision_delta = adapter->stats.colc - adapter->colc_old; 2515 adapter->colc_old = adapter->stats.colc; 2516 2517 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; 2518 adapter->gorcl_old = adapter->stats.gorcl; 2519 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; 2520 adapter->gotcl_old = adapter->stats.gotcl; 2521 2522 e1000_update_adaptive(hw); 2523 2524 if (!netif_carrier_ok(netdev)) { 2525 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2526 /* We've lost link, so the controller stops DMA, 2527 * but we've got queued Tx work that's never going 2528 * to get done, so reset controller to flush Tx. 2529 * (Do the reset outside of interrupt context). */ 2530 adapter->tx_timeout_count++; 2531 schedule_work(&adapter->reset_task); 2532 /* exit immediately since reset is imminent */ 2533 goto unlock; 2534 } 2535 } 2536 2537 /* Simple mode for Interrupt Throttle Rate (ITR) */ 2538 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { 2539 /* 2540 * Symmetric Tx/Rx gets a reduced ITR=2000; 2541 * Total asymmetrical Tx or Rx gets ITR=8000; 2542 * everyone else is between 2000-8000. 2543 */ 2544 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; 2545 u32 dif = (adapter->gotcl > adapter->gorcl ? 2546 adapter->gotcl - adapter->gorcl : 2547 adapter->gorcl - adapter->gotcl) / 10000; 2548 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 2549 2550 ew32(ITR, 1000000000 / (itr * 256)); 2551 } 2552 2553 /* Cause software interrupt to ensure rx ring is cleaned */ 2554 ew32(ICS, E1000_ICS_RXDMT0); 2555 2556 /* Force detection of hung controller every watchdog period */ 2557 adapter->detect_tx_hung = true; 2558 2559 /* Reschedule the task */ 2560 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2561 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); 2562 2563unlock: 2564 mutex_unlock(&adapter->mutex); 2565} 2566 2567enum latency_range { 2568 lowest_latency = 0, 2569 low_latency = 1, 2570 bulk_latency = 2, 2571 latency_invalid = 255 2572}; 2573 2574/** 2575 * e1000_update_itr - update the dynamic ITR value based on statistics 2576 * @adapter: pointer to adapter 2577 * @itr_setting: current adapter->itr 2578 * @packets: the number of packets during this measurement interval 2579 * @bytes: the number of bytes during this measurement interval 2580 * 2581 * Stores a new ITR value based on packets and byte 2582 * counts during the last interrupt. The advantage of per interrupt 2583 * computation is faster updates and more accurate ITR for the current 2584 * traffic pattern. Constants in this function were computed 2585 * based on theoretical maximum wire speed and thresholds were set based 2586 * on testing data as well as attempting to minimize response time 2587 * while increasing bulk throughput. 2588 * this functionality is controlled by the InterruptThrottleRate module 2589 * parameter (see e1000_param.c) 2590 **/ 2591static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2592 u16 itr_setting, int packets, int bytes) 2593{ 2594 unsigned int retval = itr_setting; 2595 struct e1000_hw *hw = &adapter->hw; 2596 2597 if (unlikely(hw->mac_type < e1000_82540)) 2598 goto update_itr_done; 2599 2600 if (packets == 0) 2601 goto update_itr_done; 2602 2603 switch (itr_setting) { 2604 case lowest_latency: 2605 /* jumbo frames get bulk treatment*/ 2606 if (bytes/packets > 8000) 2607 retval = bulk_latency; 2608 else if ((packets < 5) && (bytes > 512)) 2609 retval = low_latency; 2610 break; 2611 case low_latency: /* 50 usec aka 20000 ints/s */ 2612 if (bytes > 10000) { 2613 /* jumbo frames need bulk latency setting */ 2614 if (bytes/packets > 8000) 2615 retval = bulk_latency; 2616 else if ((packets < 10) || ((bytes/packets) > 1200)) 2617 retval = bulk_latency; 2618 else if ((packets > 35)) 2619 retval = lowest_latency; 2620 } else if (bytes/packets > 2000) 2621 retval = bulk_latency; 2622 else if (packets <= 2 && bytes < 512) 2623 retval = lowest_latency; 2624 break; 2625 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2626 if (bytes > 25000) { 2627 if (packets > 35) 2628 retval = low_latency; 2629 } else if (bytes < 6000) { 2630 retval = low_latency; 2631 } 2632 break; 2633 } 2634 2635update_itr_done: 2636 return retval; 2637} 2638 2639static void e1000_set_itr(struct e1000_adapter *adapter) 2640{ 2641 struct e1000_hw *hw = &adapter->hw; 2642 u16 current_itr; 2643 u32 new_itr = adapter->itr; 2644 2645 if (unlikely(hw->mac_type < e1000_82540)) 2646 return; 2647 2648 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 2649 if (unlikely(adapter->link_speed != SPEED_1000)) { 2650 current_itr = 0; 2651 new_itr = 4000; 2652 goto set_itr_now; 2653 } 2654 2655 adapter->tx_itr = e1000_update_itr(adapter, 2656 adapter->tx_itr, 2657 adapter->total_tx_packets, 2658 adapter->total_tx_bytes); 2659 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2660 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) 2661 adapter->tx_itr = low_latency; 2662 2663 adapter->rx_itr = e1000_update_itr(adapter, 2664 adapter->rx_itr, 2665 adapter->total_rx_packets, 2666 adapter->total_rx_bytes); 2667 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2668 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) 2669 adapter->rx_itr = low_latency; 2670 2671 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2672 2673 switch (current_itr) { 2674 /* counts and packets in update_itr are dependent on these numbers */ 2675 case lowest_latency: 2676 new_itr = 70000; 2677 break; 2678 case low_latency: 2679 new_itr = 20000; /* aka hwitr = ~200 */ 2680 break; 2681 case bulk_latency: 2682 new_itr = 4000; 2683 break; 2684 default: 2685 break; 2686 } 2687 2688set_itr_now: 2689 if (new_itr != adapter->itr) { 2690 /* this attempts to bias the interrupt rate towards Bulk 2691 * by adding intermediate steps when interrupt rate is 2692 * increasing */ 2693 new_itr = new_itr > adapter->itr ? 2694 min(adapter->itr + (new_itr >> 2), new_itr) : 2695 new_itr; 2696 adapter->itr = new_itr; 2697 ew32(ITR, 1000000000 / (new_itr * 256)); 2698 } 2699} 2700 2701#define E1000_TX_FLAGS_CSUM 0x00000001 2702#define E1000_TX_FLAGS_VLAN 0x00000002 2703#define E1000_TX_FLAGS_TSO 0x00000004 2704#define E1000_TX_FLAGS_IPV4 0x00000008 2705#define E1000_TX_FLAGS_NO_FCS 0x00000010 2706#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 2707#define E1000_TX_FLAGS_VLAN_SHIFT 16 2708 2709static int e1000_tso(struct e1000_adapter *adapter, 2710 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2711{ 2712 struct e1000_context_desc *context_desc; 2713 struct e1000_buffer *buffer_info; 2714 unsigned int i; 2715 u32 cmd_length = 0; 2716 u16 ipcse = 0, tucse, mss; 2717 u8 ipcss, ipcso, tucss, tucso, hdr_len; 2718 int err; 2719 2720 if (skb_is_gso(skb)) { 2721 if (skb_header_cloned(skb)) { 2722 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2723 if (err) 2724 return err; 2725 } 2726 2727 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2728 mss = skb_shinfo(skb)->gso_size; 2729 if (skb->protocol == htons(ETH_P_IP)) { 2730 struct iphdr *iph = ip_hdr(skb); 2731 iph->tot_len = 0; 2732 iph->check = 0; 2733 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2734 iph->daddr, 0, 2735 IPPROTO_TCP, 2736 0); 2737 cmd_length = E1000_TXD_CMD_IP; 2738 ipcse = skb_transport_offset(skb) - 1; 2739 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2740 ipv6_hdr(skb)->payload_len = 0; 2741 tcp_hdr(skb)->check = 2742 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2743 &ipv6_hdr(skb)->daddr, 2744 0, IPPROTO_TCP, 0); 2745 ipcse = 0; 2746 } 2747 ipcss = skb_network_offset(skb); 2748 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; 2749 tucss = skb_transport_offset(skb); 2750 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 2751 tucse = 0; 2752 2753 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2754 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2755 2756 i = tx_ring->next_to_use; 2757 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2758 buffer_info = &tx_ring->buffer_info[i]; 2759 2760 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2761 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2762 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 2763 context_desc->upper_setup.tcp_fields.tucss = tucss; 2764 context_desc->upper_setup.tcp_fields.tucso = tucso; 2765 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); 2766 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); 2767 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2768 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2769 2770 buffer_info->time_stamp = jiffies; 2771 buffer_info->next_to_watch = i; 2772 2773 if (++i == tx_ring->count) i = 0; 2774 tx_ring->next_to_use = i; 2775 2776 return true; 2777 } 2778 return false; 2779} 2780 2781static bool e1000_tx_csum(struct e1000_adapter *adapter, 2782 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2783{ 2784 struct e1000_context_desc *context_desc; 2785 struct e1000_buffer *buffer_info; 2786 unsigned int i; 2787 u8 css; 2788 u32 cmd_len = E1000_TXD_CMD_DEXT; 2789 2790 if (skb->ip_summed != CHECKSUM_PARTIAL) 2791 return false; 2792 2793 switch (skb->protocol) { 2794 case cpu_to_be16(ETH_P_IP): 2795 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2796 cmd_len |= E1000_TXD_CMD_TCP; 2797 break; 2798 case cpu_to_be16(ETH_P_IPV6): 2799 /* XXX not handling all IPV6 headers */ 2800 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2801 cmd_len |= E1000_TXD_CMD_TCP; 2802 break; 2803 default: 2804 if (unlikely(net_ratelimit())) 2805 e_warn(drv, "checksum_partial proto=%x!\n", 2806 skb->protocol); 2807 break; 2808 } 2809 2810 css = skb_checksum_start_offset(skb); 2811 2812 i = tx_ring->next_to_use; 2813 buffer_info = &tx_ring->buffer_info[i]; 2814 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2815 2816 context_desc->lower_setup.ip_config = 0; 2817 context_desc->upper_setup.tcp_fields.tucss = css; 2818 context_desc->upper_setup.tcp_fields.tucso = 2819 css + skb->csum_offset; 2820 context_desc->upper_setup.tcp_fields.tucse = 0; 2821 context_desc->tcp_seg_setup.data = 0; 2822 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 2823 2824 buffer_info->time_stamp = jiffies; 2825 buffer_info->next_to_watch = i; 2826 2827 if (unlikely(++i == tx_ring->count)) i = 0; 2828 tx_ring->next_to_use = i; 2829 2830 return true; 2831} 2832 2833#define E1000_MAX_TXD_PWR 12 2834#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2835 2836static int e1000_tx_map(struct e1000_adapter *adapter, 2837 struct e1000_tx_ring *tx_ring, 2838 struct sk_buff *skb, unsigned int first, 2839 unsigned int max_per_txd, unsigned int nr_frags, 2840 unsigned int mss) 2841{ 2842 struct e1000_hw *hw = &adapter->hw; 2843 struct pci_dev *pdev = adapter->pdev; 2844 struct e1000_buffer *buffer_info; 2845 unsigned int len = skb_headlen(skb); 2846 unsigned int offset = 0, size, count = 0, i; 2847 unsigned int f, bytecount, segs; 2848 2849 i = tx_ring->next_to_use; 2850 2851 while (len) { 2852 buffer_info = &tx_ring->buffer_info[i]; 2853 size = min(len, max_per_txd); 2854 /* Workaround for Controller erratum -- 2855 * descriptor for non-tso packet in a linear SKB that follows a 2856 * tso gets written back prematurely before the data is fully 2857 * DMA'd to the controller */ 2858 if (!skb->data_len && tx_ring->last_tx_tso && 2859 !skb_is_gso(skb)) { 2860 tx_ring->last_tx_tso = false; 2861 size -= 4; 2862 } 2863 2864 /* Workaround for premature desc write-backs 2865 * in TSO mode. Append 4-byte sentinel desc */ 2866 if (unlikely(mss && !nr_frags && size == len && size > 8)) 2867 size -= 4; 2868 /* work-around for errata 10 and it applies 2869 * to all controllers in PCI-X mode 2870 * The fix is to make sure that the first descriptor of a 2871 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 2872 */ 2873 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 2874 (size > 2015) && count == 0)) 2875 size = 2015; 2876 2877 /* Workaround for potential 82544 hang in PCI-X. Avoid 2878 * terminating buffers within evenly-aligned dwords. */ 2879 if (unlikely(adapter->pcix_82544 && 2880 !((unsigned long)(skb->data + offset + size - 1) & 4) && 2881 size > 4)) 2882 size -= 4; 2883 2884 buffer_info->length = size; 2885 /* set time_stamp *before* dma to help avoid a possible race */ 2886 buffer_info->time_stamp = jiffies; 2887 buffer_info->mapped_as_page = false; 2888 buffer_info->dma = dma_map_single(&pdev->dev, 2889 skb->data + offset, 2890 size, DMA_TO_DEVICE); 2891 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2892 goto dma_error; 2893 buffer_info->next_to_watch = i; 2894 2895 len -= size; 2896 offset += size; 2897 count++; 2898 if (len) { 2899 i++; 2900 if (unlikely(i == tx_ring->count)) 2901 i = 0; 2902 } 2903 } 2904 2905 for (f = 0; f < nr_frags; f++) { 2906 const struct skb_frag_struct *frag; 2907 2908 frag = &skb_shinfo(skb)->frags[f]; 2909 len = skb_frag_size(frag); 2910 offset = 0; 2911 2912 while (len) { 2913 unsigned long bufend; 2914 i++; 2915 if (unlikely(i == tx_ring->count)) 2916 i = 0; 2917 2918 buffer_info = &tx_ring->buffer_info[i]; 2919 size = min(len, max_per_txd); 2920 /* Workaround for premature desc write-backs 2921 * in TSO mode. Append 4-byte sentinel desc */ 2922 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 2923 size -= 4; 2924 /* Workaround for potential 82544 hang in PCI-X. 2925 * Avoid terminating buffers within evenly-aligned 2926 * dwords. */ 2927 bufend = (unsigned long) 2928 page_to_phys(skb_frag_page(frag)); 2929 bufend += offset + size - 1; 2930 if (unlikely(adapter->pcix_82544 && 2931 !(bufend & 4) && 2932 size > 4)) 2933 size -= 4; 2934 2935 buffer_info->length = size; 2936 buffer_info->time_stamp = jiffies; 2937 buffer_info->mapped_as_page = true; 2938 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 2939 offset, size, DMA_TO_DEVICE); 2940 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2941 goto dma_error; 2942 buffer_info->next_to_watch = i; 2943 2944 len -= size; 2945 offset += size; 2946 count++; 2947 } 2948 } 2949 2950 segs = skb_shinfo(skb)->gso_segs ?: 1; 2951 /* multiply data chunks by size of headers */ 2952 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; 2953 2954 tx_ring->buffer_info[i].skb = skb; 2955 tx_ring->buffer_info[i].segs = segs; 2956 tx_ring->buffer_info[i].bytecount = bytecount; 2957 tx_ring->buffer_info[first].next_to_watch = i; 2958 2959 return count; 2960 2961dma_error: 2962 dev_err(&pdev->dev, "TX DMA map failed\n"); 2963 buffer_info->dma = 0; 2964 if (count) 2965 count--; 2966 2967 while (count--) { 2968 if (i==0) 2969 i += tx_ring->count; 2970 i--; 2971 buffer_info = &tx_ring->buffer_info[i]; 2972 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2973 } 2974 2975 return 0; 2976} 2977 2978static void e1000_tx_queue(struct e1000_adapter *adapter, 2979 struct e1000_tx_ring *tx_ring, int tx_flags, 2980 int count) 2981{ 2982 struct e1000_hw *hw = &adapter->hw; 2983 struct e1000_tx_desc *tx_desc = NULL; 2984 struct e1000_buffer *buffer_info; 2985 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2986 unsigned int i; 2987 2988 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2989 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2990 E1000_TXD_CMD_TSE; 2991 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2992 2993 if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) 2994 txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2995 } 2996 2997 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { 2998 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; 2999 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 3000 } 3001 3002 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { 3003 txd_lower |= E1000_TXD_CMD_VLE; 3004 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); 3005 } 3006 3007 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 3008 txd_lower &= ~(E1000_TXD_CMD_IFCS); 3009 3010 i = tx_ring->next_to_use; 3011 3012 while (count--) { 3013 buffer_info = &tx_ring->buffer_info[i]; 3014 tx_desc = E1000_TX_DESC(*tx_ring, i); 3015 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3016 tx_desc->lower.data = 3017 cpu_to_le32(txd_lower | buffer_info->length); 3018 tx_desc->upper.data = cpu_to_le32(txd_upper); 3019 if (unlikely(++i == tx_ring->count)) i = 0; 3020 } 3021 3022 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3023 3024 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ 3025 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 3026 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 3027 3028 /* Force memory writes to complete before letting h/w 3029 * know there are new descriptors to fetch. (Only 3030 * applicable for weak-ordered memory model archs, 3031 * such as IA-64). */ 3032 wmb(); 3033 3034 tx_ring->next_to_use = i; 3035 writel(i, hw->hw_addr + tx_ring->tdt); 3036 /* we need this if more than one processor can write to our tail 3037 * at a time, it syncronizes IO on IA64/Altix systems */ 3038 mmiowb(); 3039} 3040 3041/** 3042 * 82547 workaround to avoid controller hang in half-duplex environment. 3043 * The workaround is to avoid queuing a large packet that would span 3044 * the internal Tx FIFO ring boundary by notifying the stack to resend 3045 * the packet at a later time. This gives the Tx FIFO an opportunity to 3046 * flush all packets. When that occurs, we reset the Tx FIFO pointers 3047 * to the beginning of the Tx FIFO. 3048 **/ 3049 3050#define E1000_FIFO_HDR 0x10 3051#define E1000_82547_PAD_LEN 0x3E0 3052 3053static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, 3054 struct sk_buff *skb) 3055{ 3056 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3057 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; 3058 3059 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3060 3061 if (adapter->link_duplex != HALF_DUPLEX) 3062 goto no_fifo_stall_required; 3063 3064 if (atomic_read(&adapter->tx_fifo_stall)) 3065 return 1; 3066 3067 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { 3068 atomic_set(&adapter->tx_fifo_stall, 1); 3069 return 1; 3070 } 3071 3072no_fifo_stall_required: 3073 adapter->tx_fifo_head += skb_fifo_len; 3074 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 3075 adapter->tx_fifo_head -= adapter->tx_fifo_size; 3076 return 0; 3077} 3078 3079static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 3080{ 3081 struct e1000_adapter *adapter = netdev_priv(netdev); 3082 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3083 3084 netif_stop_queue(netdev); 3085 /* Herbert's original patch had: 3086 * smp_mb__after_netif_stop_queue(); 3087 * but since that doesn't exist yet, just open code it. */ 3088 smp_mb(); 3089 3090 /* We need to check again in a case another CPU has just 3091 * made room available. */ 3092 if (likely(E1000_DESC_UNUSED(tx_ring) < size)) 3093 return -EBUSY; 3094 3095 /* A reprieve! */ 3096 netif_start_queue(netdev); 3097 ++adapter->restart_queue; 3098 return 0; 3099} 3100 3101static int e1000_maybe_stop_tx(struct net_device *netdev, 3102 struct e1000_tx_ring *tx_ring, int size) 3103{ 3104 if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) 3105 return 0; 3106 return __e1000_maybe_stop_tx(netdev, size); 3107} 3108 3109#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 3111 struct net_device *netdev) 3112{ 3113 struct e1000_adapter *adapter = netdev_priv(netdev); 3114 struct e1000_hw *hw = &adapter->hw; 3115 struct e1000_tx_ring *tx_ring; 3116 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 3117 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 3118 unsigned int tx_flags = 0; 3119 unsigned int len = skb_headlen(skb); 3120 unsigned int nr_frags; 3121 unsigned int mss; 3122 int count = 0; 3123 int tso; 3124 unsigned int f; 3125 3126 /* This goes back to the question of how to logically map a tx queue 3127 * to a flow. Right now, performance is impacted slightly negatively 3128 * if using multiple tx queues. If the stack breaks away from a 3129 * single qdisc implementation, we can look at this again. */ 3130 tx_ring = adapter->tx_ring; 3131 3132 if (unlikely(skb->len <= 0)) { 3133 dev_kfree_skb_any(skb); 3134 return NETDEV_TX_OK; 3135 } 3136 3137 mss = skb_shinfo(skb)->gso_size; 3138 /* The controller does a simple calculation to 3139 * make sure there is enough room in the FIFO before 3140 * initiating the DMA for each buffer. The calc is: 3141 * 4 = ceil(buffer len/mss). To make sure we don't 3142 * overrun the FIFO, adjust the max buffer len if mss 3143 * drops. */ 3144 if (mss) { 3145 u8 hdr_len; 3146 max_per_txd = min(mss << 2, max_per_txd); 3147 max_txd_pwr = fls(max_per_txd) - 1; 3148 3149 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3150 if (skb->data_len && hdr_len == len) { 3151 switch (hw->mac_type) { 3152 unsigned int pull_size; 3153 case e1000_82544: 3154 /* Make sure we have room to chop off 4 bytes, 3155 * and that the end alignment will work out to 3156 * this hardware's requirements 3157 * NOTE: this is a TSO only workaround 3158 * if end byte alignment not correct move us 3159 * into the next dword */ 3160 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 3161 break; 3162 /* fall through */ 3163 pull_size = min((unsigned int)4, skb->data_len); 3164 if (!__pskb_pull_tail(skb, pull_size)) { 3165 e_err(drv, "__pskb_pull_tail " 3166 "failed.\n"); 3167 dev_kfree_skb_any(skb); 3168 return NETDEV_TX_OK; 3169 } 3170 len = skb_headlen(skb); 3171 break; 3172 default: 3173 /* do nothing */ 3174 break; 3175 } 3176 } 3177 } 3178 3179 /* reserve a descriptor for the offload context */ 3180 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3181 count++; 3182 count++; 3183 3184 /* Controller Erratum workaround */ 3185 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3186 count++; 3187 3188 count += TXD_USE_COUNT(len, max_txd_pwr); 3189 3190 if (adapter->pcix_82544) 3191 count++; 3192 3193 /* work-around for errata 10 and it applies to all controllers 3194 * in PCI-X mode, so add one more descriptor to the count 3195 */ 3196 if (unlikely((hw->bus_type == e1000_bus_type_pcix) && 3197 (len > 2015))) 3198 count++; 3199 3200 nr_frags = skb_shinfo(skb)->nr_frags; 3201 for (f = 0; f < nr_frags; f++) 3202 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3203 max_txd_pwr); 3204 if (adapter->pcix_82544) 3205 count += nr_frags; 3206 3207 /* need: count + 2 desc gap to keep tail from touching 3208 * head, otherwise try next time */ 3209 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 3210 return NETDEV_TX_BUSY; 3211 3212 if (unlikely((hw->mac_type == e1000_82547) && 3213 (e1000_82547_fifo_workaround(adapter, skb)))) { 3214 netif_stop_queue(netdev); 3215 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3216 schedule_delayed_work(&adapter->fifo_stall_task, 1); 3217 return NETDEV_TX_BUSY; 3218 } 3219 3220 if (vlan_tx_tag_present(skb)) { 3221 tx_flags |= E1000_TX_FLAGS_VLAN; 3222 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3223 } 3224 3225 first = tx_ring->next_to_use; 3226 3227 tso = e1000_tso(adapter, tx_ring, skb); 3228 if (tso < 0) { 3229 dev_kfree_skb_any(skb); 3230 return NETDEV_TX_OK; 3231 } 3232 3233 if (likely(tso)) { 3234 if (likely(hw->mac_type != e1000_82544)) 3235 tx_ring->last_tx_tso = true; 3236 tx_flags |= E1000_TX_FLAGS_TSO; 3237 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3238 tx_flags |= E1000_TX_FLAGS_CSUM; 3239 3240 if (likely(skb->protocol == htons(ETH_P_IP))) 3241 tx_flags |= E1000_TX_FLAGS_IPV4; 3242 3243 if (unlikely(skb->no_fcs)) 3244 tx_flags |= E1000_TX_FLAGS_NO_FCS; 3245 3246 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, 3247 nr_frags, mss); 3248 3249 if (count) { 3250 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3251 /* Make sure there is space in the ring for the next send. */ 3252 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3253 3254 } else { 3255 dev_kfree_skb_any(skb); 3256 tx_ring->buffer_info[first].time_stamp = 0; 3257 tx_ring->next_to_use = first; 3258 } 3259 3260 return NETDEV_TX_OK; 3261} 3262 3263#define NUM_REGS 38 /* 1 based count */ 3264static void e1000_regdump(struct e1000_adapter *adapter) 3265{ 3266 struct e1000_hw *hw = &adapter->hw; 3267 u32 regs[NUM_REGS]; 3268 u32 *regs_buff = regs; 3269 int i = 0; 3270 3271 static const char * const reg_name[] = { 3272 "CTRL", "STATUS", 3273 "RCTL", "RDLEN", "RDH", "RDT", "RDTR", 3274 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", 3275 "TIDV", "TXDCTL", "TADV", "TARC0", 3276 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", 3277 "TXDCTL1", "TARC1", 3278 "CTRL_EXT", "ERT", "RDBAL", "RDBAH", 3279 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", 3280 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" 3281 }; 3282 3283 regs_buff[0] = er32(CTRL); 3284 regs_buff[1] = er32(STATUS); 3285 3286 regs_buff[2] = er32(RCTL); 3287 regs_buff[3] = er32(RDLEN); 3288 regs_buff[4] = er32(RDH); 3289 regs_buff[5] = er32(RDT); 3290 regs_buff[6] = er32(RDTR); 3291 3292 regs_buff[7] = er32(TCTL); 3293 regs_buff[8] = er32(TDBAL); 3294 regs_buff[9] = er32(TDBAH); 3295 regs_buff[10] = er32(TDLEN); 3296 regs_buff[11] = er32(TDH); 3297 regs_buff[12] = er32(TDT); 3298 regs_buff[13] = er32(TIDV); 3299 regs_buff[14] = er32(TXDCTL); 3300 regs_buff[15] = er32(TADV); 3301 regs_buff[16] = er32(TARC0); 3302 3303 regs_buff[17] = er32(TDBAL1); 3304 regs_buff[18] = er32(TDBAH1); 3305 regs_buff[19] = er32(TDLEN1); 3306 regs_buff[20] = er32(TDH1); 3307 regs_buff[21] = er32(TDT1); 3308 regs_buff[22] = er32(TXDCTL1); 3309 regs_buff[23] = er32(TARC1); 3310 regs_buff[24] = er32(CTRL_EXT); 3311 regs_buff[25] = er32(ERT); 3312 regs_buff[26] = er32(RDBAL0); 3313 regs_buff[27] = er32(RDBAH0); 3314 regs_buff[28] = er32(TDFH); 3315 regs_buff[29] = er32(TDFT); 3316 regs_buff[30] = er32(TDFHS); 3317 regs_buff[31] = er32(TDFTS); 3318 regs_buff[32] = er32(TDFPC); 3319 regs_buff[33] = er32(RDFH); 3320 regs_buff[34] = er32(RDFT); 3321 regs_buff[35] = er32(RDFHS); 3322 regs_buff[36] = er32(RDFTS); 3323 regs_buff[37] = er32(RDFPC); 3324 3325 pr_info("Register dump\n"); 3326 for (i = 0; i < NUM_REGS; i++) 3327 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); 3328} 3329 3330/* 3331 * e1000_dump: Print registers, tx ring and rx ring 3332 */ 3333static void e1000_dump(struct e1000_adapter *adapter) 3334{ 3335 /* this code doesn't handle multiple rings */ 3336 struct e1000_tx_ring *tx_ring = adapter->tx_ring; 3337 struct e1000_rx_ring *rx_ring = adapter->rx_ring; 3338 int i; 3339 3340 if (!netif_msg_hw(adapter)) 3341 return; 3342 3343 /* Print Registers */ 3344 e1000_regdump(adapter); 3345 3346 /* 3347 * transmit dump 3348 */ 3349 pr_info("TX Desc ring0 dump\n"); 3350 3351 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) 3352 * 3353 * Legacy Transmit Descriptor 3354 * +--------------------------------------------------------------+ 3355 * 0 | Buffer Address [63:0] (Reserved on Write Back) | 3356 * +--------------------------------------------------------------+ 3357 * 8 | Special | CSS | Status | CMD | CSO | Length | 3358 * +--------------------------------------------------------------+ 3359 * 63 48 47 36 35 32 31 24 23 16 15 0 3360 * 3361 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload 3362 * 63 48 47 40 39 32 31 16 15 8 7 0 3363 * +----------------------------------------------------------------+ 3364 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | 3365 * +----------------------------------------------------------------+ 3366 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | 3367 * +----------------------------------------------------------------+ 3368 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3369 * 3370 * Extended Data Descriptor (DTYP=0x1) 3371 * +----------------------------------------------------------------+ 3372 * 0 | Buffer Address [63:0] | 3373 * +----------------------------------------------------------------+ 3374 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | 3375 * +----------------------------------------------------------------+ 3376 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 3377 */ 3378 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3379 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); 3380 3381 if (!netif_msg_tx_done(adapter)) 3382 goto rx_ring_summary; 3383 3384 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 3385 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 3386 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; 3387 struct my_u { __le64 a; __le64 b; }; 3388 struct my_u *u = (struct my_u *)tx_desc; 3389 const char *type; 3390 3391 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) 3392 type = "NTC/U"; 3393 else if (i == tx_ring->next_to_use) 3394 type = "NTU"; 3395 else if (i == tx_ring->next_to_clean) 3396 type = "NTC"; 3397 else 3398 type = ""; 3399 3400 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", 3401 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, 3402 le64_to_cpu(u->a), le64_to_cpu(u->b), 3403 (u64)buffer_info->dma, buffer_info->length, 3404 buffer_info->next_to_watch, 3405 (u64)buffer_info->time_stamp, buffer_info->skb, type); 3406 } 3407 3408rx_ring_summary: 3409 /* 3410 * receive dump 3411 */ 3412 pr_info("\nRX Desc ring dump\n"); 3413 3414 /* Legacy Receive Descriptor Format 3415 * 3416 * +-----------------------------------------------------+ 3417 * | Buffer Address [63:0] | 3418 * +-----------------------------------------------------+ 3419 * | VLAN Tag | Errors | Status 0 | Packet csum | Length | 3420 * +-----------------------------------------------------+ 3421 * 63 48 47 40 39 32 31 16 15 0 3422 */ 3423 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); 3424 3425 if (!netif_msg_rx_status(adapter)) 3426 goto exit; 3427 3428 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { 3429 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 3430 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; 3431 struct my_u { __le64 a; __le64 b; }; 3432 struct my_u *u = (struct my_u *)rx_desc; 3433 const char *type; 3434 3435 if (i == rx_ring->next_to_use) 3436 type = "NTU"; 3437 else if (i == rx_ring->next_to_clean) 3438 type = "NTC"; 3439 else 3440 type = ""; 3441 3442 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", 3443 i, le64_to_cpu(u->a), le64_to_cpu(u->b), 3444 (u64)buffer_info->dma, buffer_info->skb, type); 3445 } /* for */ 3446 3447 /* dump the descriptor caches */ 3448 /* rx */ 3449 pr_info("Rx descriptor cache in 64bit format\n"); 3450 for (i = 0x6000; i <= 0x63FF ; i += 0x10) { 3451 pr_info("R%04X: %08X|%08X %08X|%08X\n", 3452 i, 3453 readl(adapter->hw.hw_addr + i+4), 3454 readl(adapter->hw.hw_addr + i), 3455 readl(adapter->hw.hw_addr + i+12), 3456 readl(adapter->hw.hw_addr + i+8)); 3457 } 3458 /* tx */ 3459 pr_info("Tx descriptor cache in 64bit format\n"); 3460 for (i = 0x7000; i <= 0x73FF ; i += 0x10) { 3461 pr_info("T%04X: %08X|%08X %08X|%08X\n", 3462 i, 3463 readl(adapter->hw.hw_addr + i+4), 3464 readl(adapter->hw.hw_addr + i), 3465 readl(adapter->hw.hw_addr + i+12), 3466 readl(adapter->hw.hw_addr + i+8)); 3467 } 3468exit: 3469 return; 3470} 3471 3472/** 3473 * e1000_tx_timeout - Respond to a Tx Hang 3474 * @netdev: network interface device structure 3475 **/ 3476 3477static void e1000_tx_timeout(struct net_device *netdev) 3478{ 3479 struct e1000_adapter *adapter = netdev_priv(netdev); 3480 3481 /* Do the reset outside of interrupt context */ 3482 adapter->tx_timeout_count++; 3483 schedule_work(&adapter->reset_task); 3484} 3485 3486static void e1000_reset_task(struct work_struct *work) 3487{ 3488 struct e1000_adapter *adapter = 3489 container_of(work, struct e1000_adapter, reset_task); 3490 3491 if (test_bit(__E1000_DOWN, &adapter->flags)) 3492 return; 3493 e_err(drv, "Reset adapter\n"); 3494 e1000_reinit_safe(adapter); 3495} 3496 3497/** 3498 * e1000_get_stats - Get System Network Statistics 3499 * @netdev: network interface device structure 3500 * 3501 * Returns the address of the device statistics structure. 3502 * The statistics are actually updated from the watchdog. 3503 **/ 3504 3505static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3506{ 3507 /* only return the current stats */ 3508 return &netdev->stats; 3509} 3510 3511/** 3512 * e1000_change_mtu - Change the Maximum Transfer Unit 3513 * @netdev: network interface device structure 3514 * @new_mtu: new value for maximum frame size 3515 * 3516 * Returns 0 on success, negative on failure 3517 **/ 3518 3519static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 3520{ 3521 struct e1000_adapter *adapter = netdev_priv(netdev); 3522 struct e1000_hw *hw = &adapter->hw; 3523 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3524 3525 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3526 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3527 e_err(probe, "Invalid MTU setting\n"); 3528 return -EINVAL; 3529 } 3530 3531 /* Adapter-specific max frame size limits. */ 3532 switch (hw->mac_type) { 3533 case e1000_undefined ... e1000_82542_rev2_1: 3534 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3535 e_err(probe, "Jumbo Frames not supported.\n"); 3536 return -EINVAL; 3537 } 3538 break; 3539 default: 3540 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3541 break; 3542 } 3543 3544 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 3545 msleep(1); 3546 /* e1000_down has a dependency on max_frame_size */ 3547 hw->max_frame_size = max_frame; 3548 if (netif_running(netdev)) 3549 e1000_down(adapter); 3550 3551 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3552 * means we reserve 2 more, this pushes us to allocate from the next 3553 * larger slab size. 3554 * i.e. RXBUFFER_2048 --> size-4096 slab 3555 * however with the new *_jumbo_rx* routines, jumbo receives will use 3556 * fragmented skbs */ 3557 3558 if (max_frame <= E1000_RXBUFFER_2048) 3559 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3560 else 3561#if (PAGE_SIZE >= E1000_RXBUFFER_16384) 3562 adapter->rx_buffer_len = E1000_RXBUFFER_16384; 3563#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) 3564 adapter->rx_buffer_len = PAGE_SIZE; 3565#endif 3566 3567 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3568 if (!hw->tbi_compatibility_on && 3569 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || 3570 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3571 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3572 3573 pr_info("%s changing MTU from %d to %d\n", 3574 netdev->name, netdev->mtu, new_mtu); 3575 netdev->mtu = new_mtu; 3576 3577 if (netif_running(netdev)) 3578 e1000_up(adapter); 3579 else 3580 e1000_reset(adapter); 3581 3582 clear_bit(__E1000_RESETTING, &adapter->flags); 3583 3584 return 0; 3585} 3586 3587/** 3588 * e1000_update_stats - Update the board statistics counters 3589 * @adapter: board private structure 3590 **/ 3591 3592void e1000_update_stats(struct e1000_adapter *adapter) 3593{ 3594 struct net_device *netdev = adapter->netdev; 3595 struct e1000_hw *hw = &adapter->hw; 3596 struct pci_dev *pdev = adapter->pdev; 3597 unsigned long flags; 3598 u16 phy_tmp; 3599 3600#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3601 3602 /* 3603 * Prevent stats update while adapter is being reset, or if the pci 3604 * connection is down. 3605 */ 3606 if (adapter->link_speed == 0) 3607 return; 3608 if (pci_channel_offline(pdev)) 3609 return; 3610 3611 spin_lock_irqsave(&adapter->stats_lock, flags); 3612 3613 /* these counters are modified from e1000_tbi_adjust_stats, 3614 * called from the interrupt context, so they must only 3615 * be written while holding adapter->stats_lock 3616 */ 3617 3618 adapter->stats.crcerrs += er32(CRCERRS); 3619 adapter->stats.gprc += er32(GPRC); 3620 adapter->stats.gorcl += er32(GORCL); 3621 adapter->stats.gorch += er32(GORCH); 3622 adapter->stats.bprc += er32(BPRC); 3623 adapter->stats.mprc += er32(MPRC); 3624 adapter->stats.roc += er32(ROC); 3625 3626 adapter->stats.prc64 += er32(PRC64); 3627 adapter->stats.prc127 += er32(PRC127); 3628 adapter->stats.prc255 += er32(PRC255); 3629 adapter->stats.prc511 += er32(PRC511); 3630 adapter->stats.prc1023 += er32(PRC1023); 3631 adapter->stats.prc1522 += er32(PRC1522); 3632 3633 adapter->stats.symerrs += er32(SYMERRS); 3634 adapter->stats.mpc += er32(MPC); 3635 adapter->stats.scc += er32(SCC); 3636 adapter->stats.ecol += er32(ECOL); 3637 adapter->stats.mcc += er32(MCC); 3638 adapter->stats.latecol += er32(LATECOL); 3639 adapter->stats.dc += er32(DC); 3640 adapter->stats.sec += er32(SEC); 3641 adapter->stats.rlec += er32(RLEC); 3642 adapter->stats.xonrxc += er32(XONRXC); 3643 adapter->stats.xontxc += er32(XONTXC); 3644 adapter->stats.xoffrxc += er32(XOFFRXC); 3645 adapter->stats.xofftxc += er32(XOFFTXC); 3646 adapter->stats.fcruc += er32(FCRUC); 3647 adapter->stats.gptc += er32(GPTC); 3648 adapter->stats.gotcl += er32(GOTCL); 3649 adapter->stats.gotch += er32(GOTCH); 3650 adapter->stats.rnbc += er32(RNBC); 3651 adapter->stats.ruc += er32(RUC); 3652 adapter->stats.rfc += er32(RFC); 3653 adapter->stats.rjc += er32(RJC); 3654 adapter->stats.torl += er32(TORL); 3655 adapter->stats.torh += er32(TORH); 3656 adapter->stats.totl += er32(TOTL); 3657 adapter->stats.toth += er32(TOTH); 3658 adapter->stats.tpr += er32(TPR); 3659 3660 adapter->stats.ptc64 += er32(PTC64); 3661 adapter->stats.ptc127 += er32(PTC127); 3662 adapter->stats.ptc255 += er32(PTC255); 3663 adapter->stats.ptc511 += er32(PTC511); 3664 adapter->stats.ptc1023 += er32(PTC1023); 3665 adapter->stats.ptc1522 += er32(PTC1522); 3666 3667 adapter->stats.mptc += er32(MPTC); 3668 adapter->stats.bptc += er32(BPTC); 3669 3670 /* used for adaptive IFS */ 3671 3672 hw->tx_packet_delta = er32(TPT); 3673 adapter->stats.tpt += hw->tx_packet_delta; 3674 hw->collision_delta = er32(COLC); 3675 adapter->stats.colc += hw->collision_delta; 3676 3677 if (hw->mac_type >= e1000_82543) { 3678 adapter->stats.algnerrc += er32(ALGNERRC); 3679 adapter->stats.rxerrc += er32(RXERRC); 3680 adapter->stats.tncrs += er32(TNCRS); 3681 adapter->stats.cexterr += er32(CEXTERR); 3682 adapter->stats.tsctc += er32(TSCTC); 3683 adapter->stats.tsctfc += er32(TSCTFC); 3684 } 3685 3686 /* Fill out the OS statistics structure */ 3687 netdev->stats.multicast = adapter->stats.mprc; 3688 netdev->stats.collisions = adapter->stats.colc; 3689 3690 /* Rx Errors */ 3691 3692 /* RLEC on some newer hardware can be incorrect so build 3693 * our own version based on RUC and ROC */ 3694 netdev->stats.rx_errors = adapter->stats.rxerrc + 3695 adapter->stats.crcerrs + adapter->stats.algnerrc + 3696 adapter->stats.ruc + adapter->stats.roc + 3697 adapter->stats.cexterr; 3698 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3699 netdev->stats.rx_length_errors = adapter->stats.rlerrc; 3700 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 3701 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 3702 netdev->stats.rx_missed_errors = adapter->stats.mpc; 3703 3704 /* Tx Errors */ 3705 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3706 netdev->stats.tx_errors = adapter->stats.txerrc; 3707 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 3708 netdev->stats.tx_window_errors = adapter->stats.latecol; 3709 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 3710 if (hw->bad_tx_carr_stats_fd && 3711 adapter->link_duplex == FULL_DUPLEX) { 3712 netdev->stats.tx_carrier_errors = 0; 3713 adapter->stats.tncrs = 0; 3714 } 3715 3716 /* Tx Dropped needs to be maintained elsewhere */ 3717 3718 /* Phy Stats */ 3719 if (hw->media_type == e1000_media_type_copper) { 3720 if ((adapter->link_speed == SPEED_1000) && 3721 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { 3722 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3723 adapter->phy_stats.idle_errors += phy_tmp; 3724 } 3725 3726 if ((hw->mac_type <= e1000_82546) && 3727 (hw->phy_type == e1000_phy_m88) && 3728 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) 3729 adapter->phy_stats.receive_errors += phy_tmp; 3730 } 3731 3732 /* Management Stats */ 3733 if (hw->has_smbus) { 3734 adapter->stats.mgptc += er32(MGTPTC); 3735 adapter->stats.mgprc += er32(MGTPRC); 3736 adapter->stats.mgpdc += er32(MGTPDC); 3737 } 3738 3739 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3740} 3741 3742/** 3743 * e1000_intr - Interrupt Handler 3744 * @irq: interrupt number 3745 * @data: pointer to a network interface device structure 3746 **/ 3747 3748static irqreturn_t e1000_intr(int irq, void *data) 3749{ 3750 struct net_device *netdev = data; 3751 struct e1000_adapter *adapter = netdev_priv(netdev); 3752 struct e1000_hw *hw = &adapter->hw; 3753 u32 icr = er32(ICR); 3754 3755 if (unlikely((!icr))) 3756 return IRQ_NONE; /* Not our interrupt */ 3757 3758 /* 3759 * we might have caused the interrupt, but the above 3760 * read cleared it, and just in case the driver is 3761 * down there is nothing to do so return handled 3762 */ 3763 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) 3764 return IRQ_HANDLED; 3765 3766 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3767 hw->get_link_status = 1; 3768 /* guard against interrupt when we're going down */ 3769 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3770 schedule_delayed_work(&adapter->watchdog_task, 1); 3771 } 3772 3773 /* disable interrupts, without the synchronize_irq bit */ 3774 ew32(IMC, ~0); 3775 E1000_WRITE_FLUSH(); 3776 3777 if (likely(napi_schedule_prep(&adapter->napi))) { 3778 adapter->total_tx_bytes = 0; 3779 adapter->total_tx_packets = 0; 3780 adapter->total_rx_bytes = 0; 3781 adapter->total_rx_packets = 0; 3782 __napi_schedule(&adapter->napi); 3783 } else { 3784 /* this really should not happen! if it does it is basically a 3785 * bug, but not a hard error, so enable ints and continue */ 3786 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3787 e1000_irq_enable(adapter); 3788 } 3789 3790 return IRQ_HANDLED; 3791} 3792 3793/** 3794 * e1000_clean - NAPI Rx polling callback 3795 * @adapter: board private structure 3796 **/ 3797static int e1000_clean(struct napi_struct *napi, int budget) 3798{ 3799 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 3800 int tx_clean_complete = 0, work_done = 0; 3801 3802 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); 3803 3804 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); 3805 3806 if (!tx_clean_complete) 3807 work_done = budget; 3808 3809 /* If budget not fully consumed, exit the polling mode */ 3810 if (work_done < budget) { 3811 if (likely(adapter->itr_setting & 3)) 3812 e1000_set_itr(adapter); 3813 napi_complete(napi); 3814 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3815 e1000_irq_enable(adapter); 3816 } 3817 3818 return work_done; 3819} 3820 3821/** 3822 * e1000_clean_tx_irq - Reclaim resources after transmit completes 3823 * @adapter: board private structure 3824 **/ 3825static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 3826 struct e1000_tx_ring *tx_ring) 3827{ 3828 struct e1000_hw *hw = &adapter->hw; 3829 struct net_device *netdev = adapter->netdev; 3830 struct e1000_tx_desc *tx_desc, *eop_desc; 3831 struct e1000_buffer *buffer_info; 3832 unsigned int i, eop; 3833 unsigned int count = 0; 3834 unsigned int total_tx_bytes=0, total_tx_packets=0; 3835 3836 i = tx_ring->next_to_clean; 3837 eop = tx_ring->buffer_info[i].next_to_watch; 3838 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3839 3840 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3841 (count < tx_ring->count)) { 3842 bool cleaned = false; 3843 rmb(); /* read buffer_info after eop_desc */ 3844 for ( ; !cleaned; count++) { 3845 tx_desc = E1000_TX_DESC(*tx_ring, i); 3846 buffer_info = &tx_ring->buffer_info[i]; 3847 cleaned = (i == eop); 3848 3849 if (cleaned) { 3850 total_tx_packets += buffer_info->segs; 3851 total_tx_bytes += buffer_info->bytecount; 3852 } 3853 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 3854 tx_desc->upper.data = 0; 3855 3856 if (unlikely(++i == tx_ring->count)) i = 0; 3857 } 3858 3859 eop = tx_ring->buffer_info[i].next_to_watch; 3860 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3861 } 3862 3863 tx_ring->next_to_clean = i; 3864 3865#define TX_WAKE_THRESHOLD 32 3866 if (unlikely(count && netif_carrier_ok(netdev) && 3867 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 3868 /* Make sure that anybody stopping the queue after this 3869 * sees the new next_to_clean. 3870 */ 3871 smp_mb(); 3872 3873 if (netif_queue_stopped(netdev) && 3874 !(test_bit(__E1000_DOWN, &adapter->flags))) { 3875 netif_wake_queue(netdev); 3876 ++adapter->restart_queue; 3877 } 3878 } 3879 3880 if (adapter->detect_tx_hung) { 3881 /* Detect a transmit hang in hardware, this serializes the 3882 * check with the clearing of time_stamp and movement of i */ 3883 adapter->detect_tx_hung = false; 3884 if (tx_ring->buffer_info[eop].time_stamp && 3885 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 3886 (adapter->tx_timeout_factor * HZ)) && 3887 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3888 3889 /* detected Tx unit hang */ 3890 e_err(drv, "Detected Tx Unit Hang\n" 3891 " Tx Queue <%lu>\n" 3892 " TDH <%x>\n" 3893 " TDT <%x>\n" 3894 " next_to_use <%x>\n" 3895 " next_to_clean <%x>\n" 3896 "buffer_info[next_to_clean]\n" 3897 " time_stamp <%lx>\n" 3898 " next_to_watch <%x>\n" 3899 " jiffies <%lx>\n" 3900 " next_to_watch.status <%x>\n", 3901 (unsigned long)((tx_ring - adapter->tx_ring) / 3902 sizeof(struct e1000_tx_ring)), 3903 readl(hw->hw_addr + tx_ring->tdh), 3904 readl(hw->hw_addr + tx_ring->tdt), 3905 tx_ring->next_to_use, 3906 tx_ring->next_to_clean, 3907 tx_ring->buffer_info[eop].time_stamp, 3908 eop, 3909 jiffies, 3910 eop_desc->upper.fields.status); 3911 e1000_dump(adapter); 3912 netif_stop_queue(netdev); 3913 } 3914 } 3915 adapter->total_tx_bytes += total_tx_bytes; 3916 adapter->total_tx_packets += total_tx_packets; 3917 netdev->stats.tx_bytes += total_tx_bytes; 3918 netdev->stats.tx_packets += total_tx_packets; 3919 return count < tx_ring->count; 3920} 3921 3922/** 3923 * e1000_rx_checksum - Receive Checksum Offload for 82543 3924 * @adapter: board private structure 3925 * @status_err: receive descriptor status and error fields 3926 * @csum: receive descriptor csum field 3927 * @sk_buff: socket buffer with received data 3928 **/ 3929 3930static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, 3931 u32 csum, struct sk_buff *skb) 3932{ 3933 struct e1000_hw *hw = &adapter->hw; 3934 u16 status = (u16)status_err; 3935 u8 errors = (u8)(status_err >> 24); 3936 3937 skb_checksum_none_assert(skb); 3938 3939 /* 82543 or newer only */ 3940 if (unlikely(hw->mac_type < e1000_82543)) return; 3941 /* Ignore Checksum bit is set */ 3942 if (unlikely(status & E1000_RXD_STAT_IXSM)) return; 3943 /* TCP/UDP checksum error bit is set */ 3944 if (unlikely(errors & E1000_RXD_ERR_TCPE)) { 3945 /* let the stack verify checksum errors */ 3946 adapter->hw_csum_err++; 3947 return; 3948 } 3949 /* TCP/UDP Checksum has not been calculated */ 3950 if (!(status & E1000_RXD_STAT_TCPCS)) 3951 return; 3952 3953 /* It must be a TCP or UDP packet with a valid checksum */ 3954 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3955 /* TCP checksum is good */ 3956 skb->ip_summed = CHECKSUM_UNNECESSARY; 3957 } 3958 adapter->hw_csum_good++; 3959} 3960 3961/** 3962 * e1000_consume_page - helper function 3963 **/ 3964static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 3965 u16 length) 3966{ 3967 bi->page = NULL; 3968 skb->len += length; 3969 skb->data_len += length; 3970 skb->truesize += PAGE_SIZE; 3971} 3972 3973/** 3974 * e1000_receive_skb - helper function to handle rx indications 3975 * @adapter: board private structure 3976 * @status: descriptor status field as written by hardware 3977 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 3978 * @skb: pointer to sk_buff to be indicated to stack 3979 */ 3980static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3981 __le16 vlan, struct sk_buff *skb) 3982{ 3983 skb->protocol = eth_type_trans(skb, adapter->netdev); 3984 3985 if (status & E1000_RXD_STAT_VP) { 3986 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 3987 3988 __vlan_hwaccel_put_tag(skb, vid); 3989 } 3990 napi_gro_receive(&adapter->napi, skb); 3991} 3992 3993/** 3994 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy 3995 * @adapter: board private structure 3996 * @rx_ring: ring to clean 3997 * @work_done: amount of napi work completed this call 3998 * @work_to_do: max amount of work allowed for this call to do 3999 * 4000 * the return value indicates whether actual cleaning was done, there 4001 * is no guarantee that everything was cleaned 4002 */ 4003static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, 4004 struct e1000_rx_ring *rx_ring, 4005 int *work_done, int work_to_do) 4006{ 4007 struct e1000_hw *hw = &adapter->hw; 4008 struct net_device *netdev = adapter->netdev; 4009 struct pci_dev *pdev = adapter->pdev; 4010 struct e1000_rx_desc *rx_desc, *next_rxd; 4011 struct e1000_buffer *buffer_info, *next_buffer; 4012 unsigned long irq_flags; 4013 u32 length; 4014 unsigned int i; 4015 int cleaned_count = 0; 4016 bool cleaned = false; 4017 unsigned int total_rx_bytes=0, total_rx_packets=0; 4018 4019 i = rx_ring->next_to_clean; 4020 rx_desc = E1000_RX_DESC(*rx_ring, i); 4021 buffer_info = &rx_ring->buffer_info[i]; 4022 4023 while (rx_desc->status & E1000_RXD_STAT_DD) { 4024 struct sk_buff *skb; 4025 u8 status; 4026 4027 if (*work_done >= work_to_do) 4028 break; 4029 (*work_done)++; 4030 rmb(); /* read descriptor and rx_buffer_info after status DD */ 4031 4032 status = rx_desc->status; 4033 skb = buffer_info->skb; 4034 buffer_info->skb = NULL; 4035 4036 if (++i == rx_ring->count) i = 0; 4037 next_rxd = E1000_RX_DESC(*rx_ring, i); 4038 prefetch(next_rxd); 4039 4040 next_buffer = &rx_ring->buffer_info[i]; 4041 4042 cleaned = true; 4043 cleaned_count++; 4044 dma_unmap_page(&pdev->dev, buffer_info->dma, 4045 buffer_info->length, DMA_FROM_DEVICE); 4046 buffer_info->dma = 0; 4047 4048 length = le16_to_cpu(rx_desc->length); 4049 4050 /* errors is only valid for DD + EOP descriptors */ 4051 if (unlikely((status & E1000_RXD_STAT_EOP) && 4052 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { 4053 u8 last_byte = *(skb->data + length - 1); 4054 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 4055 last_byte)) { 4056 spin_lock_irqsave(&adapter->stats_lock, 4057 irq_flags); 4058 e1000_tbi_adjust_stats(hw, &adapter->stats, 4059 length, skb->data); 4060 spin_unlock_irqrestore(&adapter->stats_lock, 4061 irq_flags); 4062 length--; 4063 } else { 4064 /* recycle both page and skb */ 4065 buffer_info->skb = skb; 4066 /* an error means any chain goes out the window 4067 * too */ 4068 if (rx_ring->rx_skb_top) 4069 dev_kfree_skb(rx_ring->rx_skb_top); 4070 rx_ring->rx_skb_top = NULL; 4071 goto next_desc; 4072 } 4073 } 4074 4075#define rxtop rx_ring->rx_skb_top 4076 if (!(status & E1000_RXD_STAT_EOP)) { 4077 /* this descriptor is only the beginning (or middle) */ 4078 if (!rxtop) { 4079 /* this is the beginning of a chain */ 4080 rxtop = skb; 4081 skb_fill_page_desc(rxtop, 0, buffer_info->page, 4082 0, length); 4083 } else { 4084 /* this is the middle of a chain */ 4085 skb_fill_page_desc(rxtop, 4086 skb_shinfo(rxtop)->nr_frags, 4087 buffer_info->page, 0, length); 4088 /* re-use the skb, only consumed the page */ 4089 buffer_info->skb = skb; 4090 } 4091 e1000_consume_page(buffer_info, rxtop, length); 4092 goto next_desc; 4093 } else { 4094 if (rxtop) { 4095 /* end of the chain */ 4096 skb_fill_page_desc(rxtop, 4097 skb_shinfo(rxtop)->nr_frags, 4098 buffer_info->page, 0, length); 4099 /* re-use the current skb, we only consumed the 4100 * page */ 4101 buffer_info->skb = skb; 4102 skb = rxtop; 4103 rxtop = NULL; 4104 e1000_consume_page(buffer_info, skb, length); 4105 } else { 4106 /* no chain, got EOP, this buf is the packet 4107 * copybreak to save the put_page/alloc_page */ 4108 if (length <= copybreak && 4109 skb_tailroom(skb) >= length) { 4110 u8 *vaddr; 4111 vaddr = kmap_atomic(buffer_info->page); 4112 memcpy(skb_tail_pointer(skb), vaddr, length); 4113 kunmap_atomic(vaddr); 4114 /* re-use the page, so don't erase 4115 * buffer_info->page */ 4116 skb_put(skb, length); 4117 } else { 4118 skb_fill_page_desc(skb, 0, 4119 buffer_info->page, 0, 4120 length); 4121 e1000_consume_page(buffer_info, skb, 4122 length); 4123 } 4124 } 4125 } 4126 4127 /* Receive Checksum Offload XXX recompute due to CRC strip? */ 4128 e1000_rx_checksum(adapter, 4129 (u32)(status) | 4130 ((u32)(rx_desc->errors) << 24), 4131 le16_to_cpu(rx_desc->csum), skb); 4132 4133 total_rx_bytes += (skb->len - 4); /* don't count FCS */ 4134 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4135 pskb_trim(skb, skb->len - 4); 4136 total_rx_packets++; 4137 4138 /* eth type trans needs skb->data to point to something */ 4139 if (!pskb_may_pull(skb, ETH_HLEN)) { 4140 e_err(drv, "pskb_may_pull failed.\n"); 4141 dev_kfree_skb(skb); 4142 goto next_desc; 4143 } 4144 4145 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4146 4147next_desc: 4148 rx_desc->status = 0; 4149 4150 /* return some buffers to hardware, one at a time is too slow */ 4151 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4152 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4153 cleaned_count = 0; 4154 } 4155 4156 /* use prefetched values */ 4157 rx_desc = next_rxd; 4158 buffer_info = next_buffer; 4159 } 4160 rx_ring->next_to_clean = i; 4161 4162 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4163 if (cleaned_count) 4164 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4165 4166 adapter->total_rx_packets += total_rx_packets; 4167 adapter->total_rx_bytes += total_rx_bytes; 4168 netdev->stats.rx_bytes += total_rx_bytes; 4169 netdev->stats.rx_packets += total_rx_packets; 4170 return cleaned; 4171} 4172 4173/* 4174 * this should improve performance for small packets with large amounts 4175 * of reassembly being done in the stack 4176 */ 4177static void e1000_check_copybreak(struct net_device *netdev, 4178 struct e1000_buffer *buffer_info, 4179 u32 length, struct sk_buff **skb) 4180{ 4181 struct sk_buff *new_skb; 4182 4183 if (length > copybreak) 4184 return; 4185 4186 new_skb = netdev_alloc_skb_ip_align(netdev, length); 4187 if (!new_skb) 4188 return; 4189 4190 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, 4191 (*skb)->data - NET_IP_ALIGN, 4192 length + NET_IP_ALIGN); 4193 /* save the skb in buffer_info as good */ 4194 buffer_info->skb = *skb; 4195 *skb = new_skb; 4196} 4197 4198/** 4199 * e1000_clean_rx_irq - Send received data up the network stack; legacy 4200 * @adapter: board private structure 4201 * @rx_ring: ring to clean 4202 * @work_done: amount of napi work completed this call 4203 * @work_to_do: max amount of work allowed for this call to do 4204 */ 4205static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 4206 struct e1000_rx_ring *rx_ring, 4207 int *work_done, int work_to_do) 4208{ 4209 struct e1000_hw *hw = &adapter->hw; 4210 struct net_device *netdev = adapter->netdev; 4211 struct pci_dev *pdev = adapter->pdev; 4212 struct e1000_rx_desc *rx_desc, *next_rxd; 4213 struct e1000_buffer *buffer_info, *next_buffer; 4214 unsigned long flags; 4215 u32 length; 4216 unsigned int i; 4217 int cleaned_count = 0; 4218 bool cleaned = false; 4219 unsigned int total_rx_bytes=0, total_rx_packets=0; 4220 4221 i = rx_ring->next_to_clean; 4222 rx_desc = E1000_RX_DESC(*rx_ring, i); 4223 buffer_info = &rx_ring->buffer_info[i]; 4224 4225 while (rx_desc->status & E1000_RXD_STAT_DD) { 4226 struct sk_buff *skb; 4227 u8 status; 4228 4229 if (*work_done >= work_to_do) 4230 break; 4231 (*work_done)++; 4232 rmb(); /* read descriptor and rx_buffer_info after status DD */ 4233 4234 status = rx_desc->status; 4235 skb = buffer_info->skb; 4236 buffer_info->skb = NULL; 4237 4238 prefetch(skb->data - NET_IP_ALIGN); 4239 4240 if (++i == rx_ring->count) i = 0; 4241 next_rxd = E1000_RX_DESC(*rx_ring, i); 4242 prefetch(next_rxd); 4243 4244 next_buffer = &rx_ring->buffer_info[i]; 4245 4246 cleaned = true; 4247 cleaned_count++; 4248 dma_unmap_single(&pdev->dev, buffer_info->dma, 4249 buffer_info->length, DMA_FROM_DEVICE); 4250 buffer_info->dma = 0; 4251 4252 length = le16_to_cpu(rx_desc->length); 4253 /* !EOP means multiple descriptors were used to store a single 4254 * packet, if thats the case we need to toss it. In fact, we 4255 * to toss every packet with the EOP bit clear and the next 4256 * frame that _does_ have the EOP bit set, as it is by 4257 * definition only a frame fragment 4258 */ 4259 if (unlikely(!(status & E1000_RXD_STAT_EOP))) 4260 adapter->discarding = true; 4261 4262 if (adapter->discarding) { 4263 /* All receives must fit into a single buffer */ 4264 e_dbg("Receive packet consumed multiple buffers\n"); 4265 /* recycle */ 4266 buffer_info->skb = skb; 4267 if (status & E1000_RXD_STAT_EOP) 4268 adapter->discarding = false; 4269 goto next_desc; 4270 } 4271 4272 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { 4273 u8 last_byte = *(skb->data + length - 1); 4274 if (TBI_ACCEPT(hw, status, rx_desc->errors, length, 4275 last_byte)) { 4276 spin_lock_irqsave(&adapter->stats_lock, flags); 4277 e1000_tbi_adjust_stats(hw, &adapter->stats, 4278 length, skb->data); 4279 spin_unlock_irqrestore(&adapter->stats_lock, 4280 flags); 4281 length--; 4282 } else { 4283 /* recycle */ 4284 buffer_info->skb = skb; 4285 goto next_desc; 4286 } 4287 } 4288 4289 total_rx_bytes += (length - 4); /* don't count FCS */ 4290 total_rx_packets++; 4291 4292 if (likely(!(netdev->features & NETIF_F_RXFCS))) 4293 /* adjust length to remove Ethernet CRC, this must be 4294 * done after the TBI_ACCEPT workaround above 4295 */ 4296 length -= 4; 4297 4298 e1000_check_copybreak(netdev, buffer_info, length, &skb); 4299 4300 skb_put(skb, length); 4301 4302 /* Receive Checksum Offload */ 4303 e1000_rx_checksum(adapter, 4304 (u32)(status) | 4305 ((u32)(rx_desc->errors) << 24), 4306 le16_to_cpu(rx_desc->csum), skb); 4307 4308 e1000_receive_skb(adapter, status, rx_desc->special, skb); 4309 4310next_desc: 4311 rx_desc->status = 0; 4312 4313 /* return some buffers to hardware, one at a time is too slow */ 4314 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { 4315 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4316 cleaned_count = 0; 4317 } 4318 4319 /* use prefetched values */ 4320 rx_desc = next_rxd; 4321 buffer_info = next_buffer; 4322 } 4323 rx_ring->next_to_clean = i; 4324 4325 cleaned_count = E1000_DESC_UNUSED(rx_ring); 4326 if (cleaned_count) 4327 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); 4328 4329 adapter->total_rx_packets += total_rx_packets; 4330 adapter->total_rx_bytes += total_rx_bytes; 4331 netdev->stats.rx_bytes += total_rx_bytes; 4332 netdev->stats.rx_packets += total_rx_packets; 4333 return cleaned; 4334} 4335 4336/** 4337 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers 4338 * @adapter: address of board private structure 4339 * @rx_ring: pointer to receive ring structure 4340 * @cleaned_count: number of buffers to allocate this pass 4341 **/ 4342 4343static void 4344e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, 4345 struct e1000_rx_ring *rx_ring, int cleaned_count) 4346{ 4347 struct net_device *netdev = adapter->netdev; 4348 struct pci_dev *pdev = adapter->pdev; 4349 struct e1000_rx_desc *rx_desc; 4350 struct e1000_buffer *buffer_info; 4351 struct sk_buff *skb; 4352 unsigned int i; 4353 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; 4354 4355 i = rx_ring->next_to_use; 4356 buffer_info = &rx_ring->buffer_info[i]; 4357 4358 while (cleaned_count--) { 4359 skb = buffer_info->skb; 4360 if (skb) { 4361 skb_trim(skb, 0); 4362 goto check_page; 4363 } 4364 4365 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4366 if (unlikely(!skb)) { 4367 /* Better luck next round */ 4368 adapter->alloc_rx_buff_failed++; 4369 break; 4370 } 4371 4372 /* Fix for errata 23, can't cross 64kB boundary */ 4373 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4374 struct sk_buff *oldskb = skb; 4375 e_err(rx_err, "skb align check failed: %u bytes at " 4376 "%p\n", bufsz, skb->data); 4377 /* Try again, without freeing the previous */ 4378 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4379 /* Failed allocation, critical failure */ 4380 if (!skb) { 4381 dev_kfree_skb(oldskb); 4382 adapter->alloc_rx_buff_failed++; 4383 break; 4384 } 4385 4386 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4387 /* give up */ 4388 dev_kfree_skb(skb); 4389 dev_kfree_skb(oldskb); 4390 break; /* while (cleaned_count--) */ 4391 } 4392 4393 /* Use new allocation */ 4394 dev_kfree_skb(oldskb); 4395 } 4396 buffer_info->skb = skb; 4397 buffer_info->length = adapter->rx_buffer_len; 4398check_page: 4399 /* allocate a new page if necessary */ 4400 if (!buffer_info->page) { 4401 buffer_info->page = alloc_page(GFP_ATOMIC); 4402 if (unlikely(!buffer_info->page)) { 4403 adapter->alloc_rx_buff_failed++; 4404 break; 4405 } 4406 } 4407 4408 if (!buffer_info->dma) { 4409 buffer_info->dma = dma_map_page(&pdev->dev, 4410 buffer_info->page, 0, 4411 buffer_info->length, 4412 DMA_FROM_DEVICE); 4413 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4414 put_page(buffer_info->page); 4415 dev_kfree_skb(skb); 4416 buffer_info->page = NULL; 4417 buffer_info->skb = NULL; 4418 buffer_info->dma = 0; 4419 adapter->alloc_rx_buff_failed++; 4420 break; /* while !buffer_info->skb */ 4421 } 4422 } 4423 4424 rx_desc = E1000_RX_DESC(*rx_ring, i); 4425 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4426 4427 if (unlikely(++i == rx_ring->count)) 4428 i = 0; 4429 buffer_info = &rx_ring->buffer_info[i]; 4430 } 4431 4432 if (likely(rx_ring->next_to_use != i)) { 4433 rx_ring->next_to_use = i; 4434 if (unlikely(i-- == 0)) 4435 i = (rx_ring->count - 1); 4436 4437 /* Force memory writes to complete before letting h/w 4438 * know there are new descriptors to fetch. (Only 4439 * applicable for weak-ordered memory model archs, 4440 * such as IA-64). */ 4441 wmb(); 4442 writel(i, adapter->hw.hw_addr + rx_ring->rdt); 4443 } 4444} 4445 4446/** 4447 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4448 * @adapter: address of board private structure 4449 **/ 4450 4451static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 4452 struct e1000_rx_ring *rx_ring, 4453 int cleaned_count) 4454{ 4455 struct e1000_hw *hw = &adapter->hw; 4456 struct net_device *netdev = adapter->netdev; 4457 struct pci_dev *pdev = adapter->pdev; 4458 struct e1000_rx_desc *rx_desc; 4459 struct e1000_buffer *buffer_info; 4460 struct sk_buff *skb; 4461 unsigned int i; 4462 unsigned int bufsz = adapter->rx_buffer_len; 4463 4464 i = rx_ring->next_to_use; 4465 buffer_info = &rx_ring->buffer_info[i]; 4466 4467 while (cleaned_count--) { 4468 skb = buffer_info->skb; 4469 if (skb) { 4470 skb_trim(skb, 0); 4471 goto map_skb; 4472 } 4473 4474 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4475 if (unlikely(!skb)) { 4476 /* Better luck next round */ 4477 adapter->alloc_rx_buff_failed++; 4478 break; 4479 } 4480 4481 /* Fix for errata 23, can't cross 64kB boundary */ 4482 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4483 struct sk_buff *oldskb = skb; 4484 e_err(rx_err, "skb align check failed: %u bytes at " 4485 "%p\n", bufsz, skb->data); 4486 /* Try again, without freeing the previous */ 4487 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4488 /* Failed allocation, critical failure */ 4489 if (!skb) { 4490 dev_kfree_skb(oldskb); 4491 adapter->alloc_rx_buff_failed++; 4492 break; 4493 } 4494 4495 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4496 /* give up */ 4497 dev_kfree_skb(skb); 4498 dev_kfree_skb(oldskb); 4499 adapter->alloc_rx_buff_failed++; 4500 break; /* while !buffer_info->skb */ 4501 } 4502 4503 /* Use new allocation */ 4504 dev_kfree_skb(oldskb); 4505 } 4506 buffer_info->skb = skb; 4507 buffer_info->length = adapter->rx_buffer_len; 4508map_skb: 4509 buffer_info->dma = dma_map_single(&pdev->dev, 4510 skb->data, 4511 buffer_info->length, 4512 DMA_FROM_DEVICE); 4513 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 4514 dev_kfree_skb(skb); 4515 buffer_info->skb = NULL; 4516 buffer_info->dma = 0; 4517 adapter->alloc_rx_buff_failed++; 4518 break; /* while !buffer_info->skb */ 4519 } 4520 4521 /* 4522 * XXX if it was allocated cleanly it will never map to a 4523 * boundary crossing 4524 */ 4525 4526 /* Fix for errata 23, can't cross 64kB boundary */ 4527 if (!e1000_check_64k_bound(adapter, 4528 (void *)(unsigned long)buffer_info->dma, 4529 adapter->rx_buffer_len)) { 4530 e_err(rx_err, "dma align check failed: %u bytes at " 4531 "%p\n", adapter->rx_buffer_len, 4532 (void *)(unsigned long)buffer_info->dma); 4533 dev_kfree_skb(skb); 4534 buffer_info->skb = NULL; 4535 4536 dma_unmap_single(&pdev->dev, buffer_info->dma, 4537 adapter->rx_buffer_len, 4538 DMA_FROM_DEVICE); 4539 buffer_info->dma = 0; 4540 4541 adapter->alloc_rx_buff_failed++; 4542 break; /* while !buffer_info->skb */ 4543 } 4544 rx_desc = E1000_RX_DESC(*rx_ring, i); 4545 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4546 4547 if (unlikely(++i == rx_ring->count)) 4548 i = 0; 4549 buffer_info = &rx_ring->buffer_info[i]; 4550 } 4551 4552 if (likely(rx_ring->next_to_use != i)) { 4553 rx_ring->next_to_use = i; 4554 if (unlikely(i-- == 0)) 4555 i = (rx_ring->count - 1); 4556 4557 /* Force memory writes to complete before letting h/w 4558 * know there are new descriptors to fetch. (Only 4559 * applicable for weak-ordered memory model archs, 4560 * such as IA-64). */ 4561 wmb(); 4562 writel(i, hw->hw_addr + rx_ring->rdt); 4563 } 4564} 4565 4566/** 4567 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4568 * @adapter: 4569 **/ 4570 4571static void e1000_smartspeed(struct e1000_adapter *adapter) 4572{ 4573 struct e1000_hw *hw = &adapter->hw; 4574 u16 phy_status; 4575 u16 phy_ctrl; 4576 4577 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || 4578 !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) 4579 return; 4580 4581 if (adapter->smartspeed == 0) { 4582 /* If Master/Slave config fault is asserted twice, 4583 * we assume back-to-back */ 4584 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4585 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4586 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4587 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; 4588 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4589 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4590 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4591 e1000_write_phy_reg(hw, PHY_1000T_CTRL, 4592 phy_ctrl); 4593 adapter->smartspeed++; 4594 if (!e1000_phy_setup_autoneg(hw) && 4595 !e1000_read_phy_reg(hw, PHY_CTRL, 4596 &phy_ctrl)) { 4597 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4598 MII_CR_RESTART_AUTO_NEG); 4599 e1000_write_phy_reg(hw, PHY_CTRL, 4600 phy_ctrl); 4601 } 4602 } 4603 return; 4604 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4605 /* If still no link, perhaps using 2/3 pair cable */ 4606 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4607 phy_ctrl |= CR_1000T_MS_ENABLE; 4608 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4609 if (!e1000_phy_setup_autoneg(hw) && 4610 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { 4611 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4612 MII_CR_RESTART_AUTO_NEG); 4613 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); 4614 } 4615 } 4616 /* Restart process after E1000_SMARTSPEED_MAX iterations */ 4617 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4618 adapter->smartspeed = 0; 4619} 4620 4621/** 4622 * e1000_ioctl - 4623 * @netdev: 4624 * @ifreq: 4625 * @cmd: 4626 **/ 4627 4628static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 4629{ 4630 switch (cmd) { 4631 case SIOCGMIIPHY: 4632 case SIOCGMIIREG: 4633 case SIOCSMIIREG: 4634 return e1000_mii_ioctl(netdev, ifr, cmd); 4635 default: 4636 return -EOPNOTSUPP; 4637 } 4638} 4639 4640/** 4641 * e1000_mii_ioctl - 4642 * @netdev: 4643 * @ifreq: 4644 * @cmd: 4645 **/ 4646 4647static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 4648 int cmd) 4649{ 4650 struct e1000_adapter *adapter = netdev_priv(netdev); 4651 struct e1000_hw *hw = &adapter->hw; 4652 struct mii_ioctl_data *data = if_mii(ifr); 4653 int retval; 4654 u16 mii_reg; 4655 unsigned long flags; 4656 4657 if (hw->media_type != e1000_media_type_copper) 4658 return -EOPNOTSUPP; 4659 4660 switch (cmd) { 4661 case SIOCGMIIPHY: 4662 data->phy_id = hw->phy_addr; 4663 break; 4664 case SIOCGMIIREG: 4665 spin_lock_irqsave(&adapter->stats_lock, flags); 4666 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, 4667 &data->val_out)) { 4668 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4669 return -EIO; 4670 } 4671 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4672 break; 4673 case SIOCSMIIREG: 4674 if (data->reg_num & ~(0x1F)) 4675 return -EFAULT; 4676 mii_reg = data->val_in; 4677 spin_lock_irqsave(&adapter->stats_lock, flags); 4678 if (e1000_write_phy_reg(hw, data->reg_num, 4679 mii_reg)) { 4680 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4681 return -EIO; 4682 } 4683 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4684 if (hw->media_type == e1000_media_type_copper) { 4685 switch (data->reg_num) { 4686 case PHY_CTRL: 4687 if (mii_reg & MII_CR_POWER_DOWN) 4688 break; 4689 if (mii_reg & MII_CR_AUTO_NEG_EN) { 4690 hw->autoneg = 1; 4691 hw->autoneg_advertised = 0x2F; 4692 } else { 4693 u32 speed; 4694 if (mii_reg & 0x40) 4695 speed = SPEED_1000; 4696 else if (mii_reg & 0x2000) 4697 speed = SPEED_100; 4698 else 4699 speed = SPEED_10; 4700 retval = e1000_set_spd_dplx( 4701 adapter, speed, 4702 ((mii_reg & 0x100) 4703 ? DUPLEX_FULL : 4704 DUPLEX_HALF)); 4705 if (retval) 4706 return retval; 4707 } 4708 if (netif_running(adapter->netdev)) 4709 e1000_reinit_locked(adapter); 4710 else 4711 e1000_reset(adapter); 4712 break; 4713 case M88E1000_PHY_SPEC_CTRL: 4714 case M88E1000_EXT_PHY_SPEC_CTRL: 4715 if (e1000_phy_reset(hw)) 4716 return -EIO; 4717 break; 4718 } 4719 } else { 4720 switch (data->reg_num) { 4721 case PHY_CTRL: 4722 if (mii_reg & MII_CR_POWER_DOWN) 4723 break; 4724 if (netif_running(adapter->netdev)) 4725 e1000_reinit_locked(adapter); 4726 else 4727 e1000_reset(adapter); 4728 break; 4729 } 4730 } 4731 break; 4732 default: 4733 return -EOPNOTSUPP; 4734 } 4735 return E1000_SUCCESS; 4736} 4737 4738void e1000_pci_set_mwi(struct e1000_hw *hw) 4739{ 4740 struct e1000_adapter *adapter = hw->back; 4741 int ret_val = pci_set_mwi(adapter->pdev); 4742 4743 if (ret_val) 4744 e_err(probe, "Error in setting MWI\n"); 4745} 4746 4747void e1000_pci_clear_mwi(struct e1000_hw *hw) 4748{ 4749 struct e1000_adapter *adapter = hw->back; 4750 4751 pci_clear_mwi(adapter->pdev); 4752} 4753 4754int e1000_pcix_get_mmrbc(struct e1000_hw *hw) 4755{ 4756 struct e1000_adapter *adapter = hw->back; 4757 return pcix_get_mmrbc(adapter->pdev); 4758} 4759 4760void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) 4761{ 4762 struct e1000_adapter *adapter = hw->back; 4763 pcix_set_mmrbc(adapter->pdev, mmrbc); 4764} 4765 4766void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) 4767{ 4768 outl(value, port); 4769} 4770 4771static bool e1000_vlan_used(struct e1000_adapter *adapter) 4772{ 4773 u16 vid; 4774 4775 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4776 return true; 4777 return false; 4778} 4779 4780static void __e1000_vlan_mode(struct e1000_adapter *adapter, 4781 netdev_features_t features) 4782{ 4783 struct e1000_hw *hw = &adapter->hw; 4784 u32 ctrl; 4785 4786 ctrl = er32(CTRL); 4787 if (features & NETIF_F_HW_VLAN_RX) { 4788 /* enable VLAN tag insert/strip */ 4789 ctrl |= E1000_CTRL_VME; 4790 } else { 4791 /* disable VLAN tag insert/strip */ 4792 ctrl &= ~E1000_CTRL_VME; 4793 } 4794 ew32(CTRL, ctrl); 4795} 4796static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, 4797 bool filter_on) 4798{ 4799 struct e1000_hw *hw = &adapter->hw; 4800 u32 rctl; 4801 4802 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4803 e1000_irq_disable(adapter); 4804 4805 __e1000_vlan_mode(adapter, adapter->netdev->features); 4806 if (filter_on) { 4807 /* enable VLAN receive filtering */ 4808 rctl = er32(RCTL); 4809 rctl &= ~E1000_RCTL_CFIEN; 4810 if (!(adapter->netdev->flags & IFF_PROMISC)) 4811 rctl |= E1000_RCTL_VFE; 4812 ew32(RCTL, rctl); 4813 e1000_update_mng_vlan(adapter); 4814 } else { 4815 /* disable VLAN receive filtering */ 4816 rctl = er32(RCTL); 4817 rctl &= ~E1000_RCTL_VFE; 4818 ew32(RCTL, rctl); 4819 } 4820 4821 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4822 e1000_irq_enable(adapter); 4823} 4824 4825static void e1000_vlan_mode(struct net_device *netdev, 4826 netdev_features_t features) 4827{ 4828 struct e1000_adapter *adapter = netdev_priv(netdev); 4829 4830 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4831 e1000_irq_disable(adapter); 4832 4833 __e1000_vlan_mode(adapter, features); 4834 4835 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4836 e1000_irq_enable(adapter); 4837} 4838 4839static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 4840{ 4841 struct e1000_adapter *adapter = netdev_priv(netdev); 4842 struct e1000_hw *hw = &adapter->hw; 4843 u32 vfta, index; 4844 4845 if ((hw->mng_cookie.status & 4846 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 4847 (vid == adapter->mng_vlan_id)) 4848 return 0; 4849 4850 if (!e1000_vlan_used(adapter)) 4851 e1000_vlan_filter_on_off(adapter, true); 4852 4853 /* add VID to filter table */ 4854 index = (vid >> 5) & 0x7F; 4855 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4856 vfta |= (1 << (vid & 0x1F)); 4857 e1000_write_vfta(hw, index, vfta); 4858 4859 set_bit(vid, adapter->active_vlans); 4860 4861 return 0; 4862} 4863 4864static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 4865{ 4866 struct e1000_adapter *adapter = netdev_priv(netdev); 4867 struct e1000_hw *hw = &adapter->hw; 4868 u32 vfta, index; 4869 4870 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4871 e1000_irq_disable(adapter); 4872 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4873 e1000_irq_enable(adapter); 4874 4875 /* remove VID from filter table */ 4876 index = (vid >> 5) & 0x7F; 4877 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4878 vfta &= ~(1 << (vid & 0x1F)); 4879 e1000_write_vfta(hw, index, vfta); 4880 4881 clear_bit(vid, adapter->active_vlans); 4882 4883 if (!e1000_vlan_used(adapter)) 4884 e1000_vlan_filter_on_off(adapter, false); 4885 4886 return 0; 4887} 4888 4889static void e1000_restore_vlan(struct e1000_adapter *adapter) 4890{ 4891 u16 vid; 4892 4893 if (!e1000_vlan_used(adapter)) 4894 return; 4895 4896 e1000_vlan_filter_on_off(adapter, true); 4897 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 4898 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4899} 4900 4901int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) 4902{ 4903 struct e1000_hw *hw = &adapter->hw; 4904 4905 hw->autoneg = 0; 4906 4907 /* Make sure dplx is at most 1 bit and lsb of speed is not set 4908 * for the switch() below to work */ 4909 if ((spd & 1) || (dplx & ~1)) 4910 goto err_inval; 4911 4912 /* Fiber NICs only allow 1000 gbps Full duplex */ 4913 if ((hw->media_type == e1000_media_type_fiber) && 4914 spd != SPEED_1000 && 4915 dplx != DUPLEX_FULL) 4916 goto err_inval; 4917 4918 switch (spd + dplx) { 4919 case SPEED_10 + DUPLEX_HALF: 4920 hw->forced_speed_duplex = e1000_10_half; 4921 break; 4922 case SPEED_10 + DUPLEX_FULL: 4923 hw->forced_speed_duplex = e1000_10_full; 4924 break; 4925 case SPEED_100 + DUPLEX_HALF: 4926 hw->forced_speed_duplex = e1000_100_half; 4927 break; 4928 case SPEED_100 + DUPLEX_FULL: 4929 hw->forced_speed_duplex = e1000_100_full; 4930 break; 4931 case SPEED_1000 + DUPLEX_FULL: 4932 hw->autoneg = 1; 4933 hw->autoneg_advertised = ADVERTISE_1000_FULL; 4934 break; 4935 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4936 default: 4937 goto err_inval; 4938 } 4939 return 0; 4940 4941err_inval: 4942 e_err(probe, "Unsupported Speed/Duplex configuration\n"); 4943 return -EINVAL; 4944} 4945 4946static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4947{ 4948 struct net_device *netdev = pci_get_drvdata(pdev); 4949 struct e1000_adapter *adapter = netdev_priv(netdev); 4950 struct e1000_hw *hw = &adapter->hw; 4951 u32 ctrl, ctrl_ext, rctl, status; 4952 u32 wufc = adapter->wol; 4953#ifdef CONFIG_PM 4954 int retval = 0; 4955#endif 4956 4957 netif_device_detach(netdev); 4958 4959 if (netif_running(netdev)) { 4960 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); 4961 e1000_down(adapter); 4962 } 4963 4964#ifdef CONFIG_PM 4965 retval = pci_save_state(pdev); 4966 if (retval) 4967 return retval; 4968#endif 4969 4970 status = er32(STATUS); 4971 if (status & E1000_STATUS_LU) 4972 wufc &= ~E1000_WUFC_LNKC; 4973 4974 if (wufc) { 4975 e1000_setup_rctl(adapter); 4976 e1000_set_rx_mode(netdev); 4977 4978 rctl = er32(RCTL); 4979 4980 /* turn on all-multi mode if wake on multicast is enabled */ 4981 if (wufc & E1000_WUFC_MC) 4982 rctl |= E1000_RCTL_MPE; 4983 4984 /* enable receives in the hardware */ 4985 ew32(RCTL, rctl | E1000_RCTL_EN); 4986 4987 if (hw->mac_type >= e1000_82540) { 4988 ctrl = er32(CTRL); 4989 /* advertise wake from D3Cold */ 4990 #define E1000_CTRL_ADVD3WUC 0x00100000 4991 /* phy power management enable */ 4992 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 4993 ctrl |= E1000_CTRL_ADVD3WUC | 4994 E1000_CTRL_EN_PHY_PWR_MGMT; 4995 ew32(CTRL, ctrl); 4996 } 4997 4998 if (hw->media_type == e1000_media_type_fiber || 4999 hw->media_type == e1000_media_type_internal_serdes) { 5000 /* keep the laser running in D3 */ 5001 ctrl_ext = er32(CTRL_EXT); 5002 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 5003 ew32(CTRL_EXT, ctrl_ext); 5004 } 5005 5006 ew32(WUC, E1000_WUC_PME_EN); 5007 ew32(WUFC, wufc); 5008 } else { 5009 ew32(WUC, 0); 5010 ew32(WUFC, 0); 5011 } 5012 5013 e1000_release_manageability(adapter); 5014 5015 *enable_wake = !!wufc; 5016 5017 /* make sure adapter isn't asleep if manageability is enabled */ 5018 if (adapter->en_mng_pt) 5019 *enable_wake = true; 5020 5021 if (netif_running(netdev)) 5022 e1000_free_irq(adapter); 5023 5024 pci_disable_device(pdev); 5025 5026 return 0; 5027} 5028 5029#ifdef CONFIG_PM 5030static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 5031{ 5032 int retval; 5033 bool wake; 5034 5035 retval = __e1000_shutdown(pdev, &wake); 5036 if (retval) 5037 return retval; 5038 5039 if (wake) { 5040 pci_prepare_to_sleep(pdev); 5041 } else { 5042 pci_wake_from_d3(pdev, false); 5043 pci_set_power_state(pdev, PCI_D3hot); 5044 } 5045 5046 return 0; 5047} 5048 5049static int e1000_resume(struct pci_dev *pdev) 5050{ 5051 struct net_device *netdev = pci_get_drvdata(pdev); 5052 struct e1000_adapter *adapter = netdev_priv(netdev); 5053 struct e1000_hw *hw = &adapter->hw; 5054 u32 err; 5055 5056 pci_set_power_state(pdev, PCI_D0); 5057 pci_restore_state(pdev); 5058 pci_save_state(pdev); 5059 5060 if (adapter->need_ioport) 5061 err = pci_enable_device(pdev); 5062 else 5063 err = pci_enable_device_mem(pdev); 5064 if (err) { 5065 pr_err("Cannot enable PCI device from suspend\n"); 5066 return err; 5067 } 5068 pci_set_master(pdev); 5069 5070 pci_enable_wake(pdev, PCI_D3hot, 0); 5071 pci_enable_wake(pdev, PCI_D3cold, 0); 5072 5073 if (netif_running(netdev)) { 5074 err = e1000_request_irq(adapter); 5075 if (err) 5076 return err; 5077 } 5078 5079 e1000_power_up_phy(adapter); 5080 e1000_reset(adapter); 5081 ew32(WUS, ~0); 5082 5083 e1000_init_manageability(adapter); 5084 5085 if (netif_running(netdev)) 5086 e1000_up(adapter); 5087 5088 netif_device_attach(netdev); 5089 5090 return 0; 5091} 5092#endif 5093 5094static void e1000_shutdown(struct pci_dev *pdev) 5095{ 5096 bool wake; 5097 5098 __e1000_shutdown(pdev, &wake); 5099 5100 if (system_state == SYSTEM_POWER_OFF) { 5101 pci_wake_from_d3(pdev, wake); 5102 pci_set_power_state(pdev, PCI_D3hot); 5103 } 5104} 5105 5106#ifdef CONFIG_NET_POLL_CONTROLLER 5107/* 5108 * Polling 'interrupt' - used by things like netconsole to send skbs 5109 * without having to re-enable interrupts. It's not called while 5110 * the interrupt routine is executing. 5111 */ 5112static void e1000_netpoll(struct net_device *netdev) 5113{ 5114 struct e1000_adapter *adapter = netdev_priv(netdev); 5115 5116 disable_irq(adapter->pdev->irq); 5117 e1000_intr(adapter->pdev->irq, netdev); 5118 enable_irq(adapter->pdev->irq); 5119} 5120#endif 5121 5122/** 5123 * e1000_io_error_detected - called when PCI error is detected 5124 * @pdev: Pointer to PCI device 5125 * @state: The current pci connection state 5126 * 5127 * This function is called after a PCI bus error affecting 5128 * this device has been detected. 5129 */ 5130static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, 5131 pci_channel_state_t state) 5132{ 5133 struct net_device *netdev = pci_get_drvdata(pdev); 5134 struct e1000_adapter *adapter = netdev_priv(netdev); 5135 5136 netif_device_detach(netdev); 5137 5138 if (state == pci_channel_io_perm_failure) 5139 return PCI_ERS_RESULT_DISCONNECT; 5140 5141 if (netif_running(netdev)) 5142 e1000_down(adapter); 5143 pci_disable_device(pdev); 5144 5145 /* Request a slot slot reset. */ 5146 return PCI_ERS_RESULT_NEED_RESET; 5147} 5148 5149/** 5150 * e1000_io_slot_reset - called after the pci bus has been reset. 5151 * @pdev: Pointer to PCI device 5152 * 5153 * Restart the card from scratch, as if from a cold-boot. Implementation 5154 * resembles the first-half of the e1000_resume routine. 5155 */ 5156static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 5157{ 5158 struct net_device *netdev = pci_get_drvdata(pdev); 5159 struct e1000_adapter *adapter = netdev_priv(netdev); 5160 struct e1000_hw *hw = &adapter->hw; 5161 int err; 5162 5163 if (adapter->need_ioport) 5164 err = pci_enable_device(pdev); 5165 else 5166 err = pci_enable_device_mem(pdev); 5167 if (err) { 5168 pr_err("Cannot re-enable PCI device after reset.\n"); 5169 return PCI_ERS_RESULT_DISCONNECT; 5170 } 5171 pci_set_master(pdev); 5172 5173 pci_enable_wake(pdev, PCI_D3hot, 0); 5174 pci_enable_wake(pdev, PCI_D3cold, 0); 5175 5176 e1000_reset(adapter); 5177 ew32(WUS, ~0); 5178 5179 return PCI_ERS_RESULT_RECOVERED; 5180} 5181 5182/** 5183 * e1000_io_resume - called when traffic can start flowing again. 5184 * @pdev: Pointer to PCI device 5185 * 5186 * This callback is called when the error recovery driver tells us that 5187 * its OK to resume normal operation. Implementation resembles the 5188 * second-half of the e1000_resume routine. 5189 */ 5190static void e1000_io_resume(struct pci_dev *pdev) 5191{ 5192 struct net_device *netdev = pci_get_drvdata(pdev); 5193 struct e1000_adapter *adapter = netdev_priv(netdev); 5194 5195 e1000_init_manageability(adapter); 5196 5197 if (netif_running(netdev)) { 5198 if (e1000_up(adapter)) { 5199 pr_info("can't bring device back up after reset\n"); 5200 return; 5201 } 5202 } 5203 5204 netif_device_attach(netdev); 5205} 5206 5207/* e1000_main.c */ 5208