e1000_82575.c revision 6cb7674bf2bc05c6219e33115ec76b7b401cd082
1/******************************************************************************* 2 3 Intel(R) Gigabit Ethernet Linux driver 4 Copyright(c) 2007-2013 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28/* e1000_82575 29 * e1000_82576 30 */ 31 32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 34#include <linux/types.h> 35#include <linux/if_ether.h> 36#include <linux/i2c.h> 37 38#include "e1000_mac.h" 39#include "e1000_82575.h" 40#include "e1000_i210.h" 41 42static s32 igb_get_invariants_82575(struct e1000_hw *); 43static s32 igb_acquire_phy_82575(struct e1000_hw *); 44static void igb_release_phy_82575(struct e1000_hw *); 45static s32 igb_acquire_nvm_82575(struct e1000_hw *); 46static void igb_release_nvm_82575(struct e1000_hw *); 47static s32 igb_check_for_link_82575(struct e1000_hw *); 48static s32 igb_get_cfg_done_82575(struct e1000_hw *); 49static s32 igb_init_hw_82575(struct e1000_hw *); 50static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 51static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 52static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); 53static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); 54static s32 igb_reset_hw_82575(struct e1000_hw *); 55static s32 igb_reset_hw_82580(struct e1000_hw *); 56static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 57static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); 58static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); 59static s32 igb_setup_copper_link_82575(struct e1000_hw *); 60static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 61static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 62static void igb_clear_hw_cntrs_82575(struct e1000_hw *); 63static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); 64static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, 65 u16 *); 66static s32 igb_get_phy_id_82575(struct e1000_hw *); 67static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); 68static bool igb_sgmii_active_82575(struct e1000_hw *); 69static s32 igb_reset_init_script_82575(struct e1000_hw *); 70static s32 igb_read_mac_addr_82575(struct e1000_hw *); 71static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 72static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 73static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); 74static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 75static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 76static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 77static const u16 e1000_82580_rxpbs_table[] = 78 { 36, 72, 144, 1, 2, 4, 8, 16, 79 35, 70, 140 }; 80#define E1000_82580_RXPBS_TABLE_SIZE \ 81 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) 82 83/** 84 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 85 * @hw: pointer to the HW structure 86 * 87 * Called to determine if the I2C pins are being used for I2C or as an 88 * external MDIO interface since the two options are mutually exclusive. 89 **/ 90static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) 91{ 92 u32 reg = 0; 93 bool ext_mdio = false; 94 95 switch (hw->mac.type) { 96 case e1000_82575: 97 case e1000_82576: 98 reg = rd32(E1000_MDIC); 99 ext_mdio = !!(reg & E1000_MDIC_DEST); 100 break; 101 case e1000_82580: 102 case e1000_i350: 103 case e1000_i354: 104 case e1000_i210: 105 case e1000_i211: 106 reg = rd32(E1000_MDICNFG); 107 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 108 break; 109 default: 110 break; 111 } 112 return ext_mdio; 113} 114 115/** 116 * igb_init_phy_params_82575 - Init PHY func ptrs. 117 * @hw: pointer to the HW structure 118 **/ 119static s32 igb_init_phy_params_82575(struct e1000_hw *hw) 120{ 121 struct e1000_phy_info *phy = &hw->phy; 122 s32 ret_val = 0; 123 u32 ctrl_ext; 124 125 if (hw->phy.media_type != e1000_media_type_copper) { 126 phy->type = e1000_phy_none; 127 goto out; 128 } 129 130 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 131 phy->reset_delay_us = 100; 132 133 ctrl_ext = rd32(E1000_CTRL_EXT); 134 135 if (igb_sgmii_active_82575(hw)) { 136 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 137 ctrl_ext |= E1000_CTRL_I2C_ENA; 138 } else { 139 phy->ops.reset = igb_phy_hw_reset; 140 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 141 } 142 143 wr32(E1000_CTRL_EXT, ctrl_ext); 144 igb_reset_mdicnfg_82580(hw); 145 146 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 147 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 148 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 149 } else { 150 switch (hw->mac.type) { 151 case e1000_82580: 152 case e1000_i350: 153 case e1000_i354: 154 phy->ops.read_reg = igb_read_phy_reg_82580; 155 phy->ops.write_reg = igb_write_phy_reg_82580; 156 break; 157 case e1000_i210: 158 case e1000_i211: 159 phy->ops.read_reg = igb_read_phy_reg_gs40g; 160 phy->ops.write_reg = igb_write_phy_reg_gs40g; 161 break; 162 default: 163 phy->ops.read_reg = igb_read_phy_reg_igp; 164 phy->ops.write_reg = igb_write_phy_reg_igp; 165 } 166 } 167 168 /* set lan id */ 169 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> 170 E1000_STATUS_FUNC_SHIFT; 171 172 /* Set phy->phy_addr and phy->id. */ 173 ret_val = igb_get_phy_id_82575(hw); 174 if (ret_val) 175 return ret_val; 176 177 /* Verify phy id and set remaining function pointers */ 178 switch (phy->id) { 179 case M88E1545_E_PHY_ID: 180 case I347AT4_E_PHY_ID: 181 case M88E1112_E_PHY_ID: 182 case M88E1111_I_PHY_ID: 183 phy->type = e1000_phy_m88; 184 phy->ops.check_polarity = igb_check_polarity_m88; 185 phy->ops.get_phy_info = igb_get_phy_info_m88; 186 if (phy->id != M88E1111_I_PHY_ID) 187 phy->ops.get_cable_length = 188 igb_get_cable_length_m88_gen2; 189 else 190 phy->ops.get_cable_length = igb_get_cable_length_m88; 191 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 192 break; 193 case IGP03E1000_E_PHY_ID: 194 phy->type = e1000_phy_igp_3; 195 phy->ops.get_phy_info = igb_get_phy_info_igp; 196 phy->ops.get_cable_length = igb_get_cable_length_igp_2; 197 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; 198 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 199 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 200 break; 201 case I82580_I_PHY_ID: 202 case I350_I_PHY_ID: 203 phy->type = e1000_phy_82580; 204 phy->ops.force_speed_duplex = 205 igb_phy_force_speed_duplex_82580; 206 phy->ops.get_cable_length = igb_get_cable_length_82580; 207 phy->ops.get_phy_info = igb_get_phy_info_82580; 208 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 209 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 210 break; 211 case I210_I_PHY_ID: 212 phy->type = e1000_phy_i210; 213 phy->ops.check_polarity = igb_check_polarity_m88; 214 phy->ops.get_phy_info = igb_get_phy_info_m88; 215 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; 216 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 217 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 218 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 219 break; 220 default: 221 ret_val = -E1000_ERR_PHY; 222 goto out; 223 } 224 225out: 226 return ret_val; 227} 228 229/** 230 * igb_init_nvm_params_82575 - Init NVM func ptrs. 231 * @hw: pointer to the HW structure 232 **/ 233static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 234{ 235 struct e1000_nvm_info *nvm = &hw->nvm; 236 u32 eecd = rd32(E1000_EECD); 237 u16 size; 238 239 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 240 E1000_EECD_SIZE_EX_SHIFT); 241 /* Added to a constant, "size" becomes the left-shift value 242 * for setting word_size. 243 */ 244 size += NVM_WORD_SIZE_BASE_SHIFT; 245 246 /* Just in case size is out of range, cap it to the largest 247 * EEPROM size supported 248 */ 249 if (size > 15) 250 size = 15; 251 252 nvm->word_size = 1 << size; 253 if (hw->mac.type < e1000_i210) { 254 nvm->opcode_bits = 8; 255 nvm->delay_usec = 1; 256 257 switch (nvm->override) { 258 case e1000_nvm_override_spi_large: 259 nvm->page_size = 32; 260 nvm->address_bits = 16; 261 break; 262 case e1000_nvm_override_spi_small: 263 nvm->page_size = 8; 264 nvm->address_bits = 8; 265 break; 266 default: 267 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 268 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 269 16 : 8; 270 break; 271 } 272 if (nvm->word_size == (1 << 15)) 273 nvm->page_size = 128; 274 275 nvm->type = e1000_nvm_eeprom_spi; 276 } else { 277 nvm->type = e1000_nvm_flash_hw; 278 } 279 280 /* NVM Function Pointers */ 281 switch (hw->mac.type) { 282 case e1000_82580: 283 nvm->ops.validate = igb_validate_nvm_checksum_82580; 284 nvm->ops.update = igb_update_nvm_checksum_82580; 285 nvm->ops.acquire = igb_acquire_nvm_82575; 286 nvm->ops.release = igb_release_nvm_82575; 287 if (nvm->word_size < (1 << 15)) 288 nvm->ops.read = igb_read_nvm_eerd; 289 else 290 nvm->ops.read = igb_read_nvm_spi; 291 nvm->ops.write = igb_write_nvm_spi; 292 break; 293 case e1000_i354: 294 case e1000_i350: 295 nvm->ops.validate = igb_validate_nvm_checksum_i350; 296 nvm->ops.update = igb_update_nvm_checksum_i350; 297 nvm->ops.acquire = igb_acquire_nvm_82575; 298 nvm->ops.release = igb_release_nvm_82575; 299 if (nvm->word_size < (1 << 15)) 300 nvm->ops.read = igb_read_nvm_eerd; 301 else 302 nvm->ops.read = igb_read_nvm_spi; 303 nvm->ops.write = igb_write_nvm_spi; 304 break; 305 case e1000_i210: 306 nvm->ops.validate = igb_validate_nvm_checksum_i210; 307 nvm->ops.update = igb_update_nvm_checksum_i210; 308 nvm->ops.acquire = igb_acquire_nvm_i210; 309 nvm->ops.release = igb_release_nvm_i210; 310 nvm->ops.read = igb_read_nvm_srrd_i210; 311 nvm->ops.write = igb_write_nvm_srwr_i210; 312 nvm->ops.valid_led_default = igb_valid_led_default_i210; 313 break; 314 case e1000_i211: 315 nvm->ops.acquire = igb_acquire_nvm_i210; 316 nvm->ops.release = igb_release_nvm_i210; 317 nvm->ops.read = igb_read_nvm_i211; 318 nvm->ops.valid_led_default = igb_valid_led_default_i210; 319 nvm->ops.validate = NULL; 320 nvm->ops.update = NULL; 321 nvm->ops.write = NULL; 322 break; 323 default: 324 nvm->ops.validate = igb_validate_nvm_checksum; 325 nvm->ops.update = igb_update_nvm_checksum; 326 nvm->ops.acquire = igb_acquire_nvm_82575; 327 nvm->ops.release = igb_release_nvm_82575; 328 if (nvm->word_size < (1 << 15)) 329 nvm->ops.read = igb_read_nvm_eerd; 330 else 331 nvm->ops.read = igb_read_nvm_spi; 332 nvm->ops.write = igb_write_nvm_spi; 333 break; 334 } 335 336 return 0; 337} 338 339/** 340 * igb_init_mac_params_82575 - Init MAC func ptrs. 341 * @hw: pointer to the HW structure 342 **/ 343static s32 igb_init_mac_params_82575(struct e1000_hw *hw) 344{ 345 struct e1000_mac_info *mac = &hw->mac; 346 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 347 348 /* Set mta register count */ 349 mac->mta_reg_count = 128; 350 /* Set rar entry count */ 351 switch (mac->type) { 352 case e1000_82576: 353 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 354 break; 355 case e1000_82580: 356 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 357 break; 358 case e1000_i350: 359 case e1000_i354: 360 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 361 break; 362 default: 363 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 364 break; 365 } 366 /* reset */ 367 if (mac->type >= e1000_82580) 368 mac->ops.reset_hw = igb_reset_hw_82580; 369 else 370 mac->ops.reset_hw = igb_reset_hw_82575; 371 372 if (mac->type >= e1000_i210) { 373 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; 374 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; 375 376 } else { 377 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; 378 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; 379 } 380 381 /* Set if part includes ASF firmware */ 382 mac->asf_firmware_present = true; 383 /* Set if manageability features are enabled. */ 384 mac->arc_subsystem_valid = 385 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 386 ? true : false; 387 /* enable EEE on i350 parts and later parts */ 388 if (mac->type >= e1000_i350) 389 dev_spec->eee_disable = false; 390 else 391 dev_spec->eee_disable = true; 392 /* Allow a single clear of the SW semaphore on I210 and newer */ 393 if (mac->type >= e1000_i210) 394 dev_spec->clear_semaphore_once = true; 395 /* physical interface link setup */ 396 mac->ops.setup_physical_interface = 397 (hw->phy.media_type == e1000_media_type_copper) 398 ? igb_setup_copper_link_82575 399 : igb_setup_serdes_link_82575; 400 401 return 0; 402} 403 404static s32 igb_get_invariants_82575(struct e1000_hw *hw) 405{ 406 struct e1000_mac_info *mac = &hw->mac; 407 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575; 408 s32 ret_val; 409 u32 ctrl_ext = 0; 410 411 switch (hw->device_id) { 412 case E1000_DEV_ID_82575EB_COPPER: 413 case E1000_DEV_ID_82575EB_FIBER_SERDES: 414 case E1000_DEV_ID_82575GB_QUAD_COPPER: 415 mac->type = e1000_82575; 416 break; 417 case E1000_DEV_ID_82576: 418 case E1000_DEV_ID_82576_NS: 419 case E1000_DEV_ID_82576_NS_SERDES: 420 case E1000_DEV_ID_82576_FIBER: 421 case E1000_DEV_ID_82576_SERDES: 422 case E1000_DEV_ID_82576_QUAD_COPPER: 423 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 424 case E1000_DEV_ID_82576_SERDES_QUAD: 425 mac->type = e1000_82576; 426 break; 427 case E1000_DEV_ID_82580_COPPER: 428 case E1000_DEV_ID_82580_FIBER: 429 case E1000_DEV_ID_82580_QUAD_FIBER: 430 case E1000_DEV_ID_82580_SERDES: 431 case E1000_DEV_ID_82580_SGMII: 432 case E1000_DEV_ID_82580_COPPER_DUAL: 433 case E1000_DEV_ID_DH89XXCC_SGMII: 434 case E1000_DEV_ID_DH89XXCC_SERDES: 435 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 436 case E1000_DEV_ID_DH89XXCC_SFP: 437 mac->type = e1000_82580; 438 break; 439 case E1000_DEV_ID_I350_COPPER: 440 case E1000_DEV_ID_I350_FIBER: 441 case E1000_DEV_ID_I350_SERDES: 442 case E1000_DEV_ID_I350_SGMII: 443 mac->type = e1000_i350; 444 break; 445 case E1000_DEV_ID_I210_COPPER: 446 case E1000_DEV_ID_I210_COPPER_OEM1: 447 case E1000_DEV_ID_I210_COPPER_IT: 448 case E1000_DEV_ID_I210_FIBER: 449 case E1000_DEV_ID_I210_SERDES: 450 case E1000_DEV_ID_I210_SGMII: 451 mac->type = e1000_i210; 452 break; 453 case E1000_DEV_ID_I211_COPPER: 454 mac->type = e1000_i211; 455 break; 456 case E1000_DEV_ID_I354_BACKPLANE_1GBPS: 457 case E1000_DEV_ID_I354_SGMII: 458 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: 459 mac->type = e1000_i354; 460 break; 461 default: 462 return -E1000_ERR_MAC_INIT; 463 break; 464 } 465 466 /* Set media type */ 467 /* The 82575 uses bits 22:23 for link mode. The mode can be changed 468 * based on the EEPROM. We cannot rely upon device ID. There 469 * is no distinguishable difference between fiber and internal 470 * SerDes mode on the 82575. There can be an external PHY attached 471 * on the SGMII interface. For this, we'll set sgmii_active to true. 472 */ 473 hw->phy.media_type = e1000_media_type_copper; 474 dev_spec->sgmii_active = false; 475 476 ctrl_ext = rd32(E1000_CTRL_EXT); 477 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 478 case E1000_CTRL_EXT_LINK_MODE_SGMII: 479 dev_spec->sgmii_active = true; 480 break; 481 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 482 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 483 hw->phy.media_type = e1000_media_type_internal_serdes; 484 break; 485 default: 486 break; 487 } 488 489 /* mac initialization and operations */ 490 ret_val = igb_init_mac_params_82575(hw); 491 if (ret_val) 492 goto out; 493 494 /* NVM initialization */ 495 ret_val = igb_init_nvm_params_82575(hw); 496 if (ret_val) 497 goto out; 498 499 /* if part supports SR-IOV then initialize mailbox parameters */ 500 switch (mac->type) { 501 case e1000_82576: 502 case e1000_i350: 503 igb_init_mbx_params_pf(hw); 504 break; 505 default: 506 break; 507 } 508 509 /* setup PHY parameters */ 510 ret_val = igb_init_phy_params_82575(hw); 511 512out: 513 return ret_val; 514} 515 516/** 517 * igb_acquire_phy_82575 - Acquire rights to access PHY 518 * @hw: pointer to the HW structure 519 * 520 * Acquire access rights to the correct PHY. This is a 521 * function pointer entry point called by the api module. 522 **/ 523static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 524{ 525 u16 mask = E1000_SWFW_PHY0_SM; 526 527 if (hw->bus.func == E1000_FUNC_1) 528 mask = E1000_SWFW_PHY1_SM; 529 else if (hw->bus.func == E1000_FUNC_2) 530 mask = E1000_SWFW_PHY2_SM; 531 else if (hw->bus.func == E1000_FUNC_3) 532 mask = E1000_SWFW_PHY3_SM; 533 534 return hw->mac.ops.acquire_swfw_sync(hw, mask); 535} 536 537/** 538 * igb_release_phy_82575 - Release rights to access PHY 539 * @hw: pointer to the HW structure 540 * 541 * A wrapper to release access rights to the correct PHY. This is a 542 * function pointer entry point called by the api module. 543 **/ 544static void igb_release_phy_82575(struct e1000_hw *hw) 545{ 546 u16 mask = E1000_SWFW_PHY0_SM; 547 548 if (hw->bus.func == E1000_FUNC_1) 549 mask = E1000_SWFW_PHY1_SM; 550 else if (hw->bus.func == E1000_FUNC_2) 551 mask = E1000_SWFW_PHY2_SM; 552 else if (hw->bus.func == E1000_FUNC_3) 553 mask = E1000_SWFW_PHY3_SM; 554 555 hw->mac.ops.release_swfw_sync(hw, mask); 556} 557 558/** 559 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 560 * @hw: pointer to the HW structure 561 * @offset: register offset to be read 562 * @data: pointer to the read data 563 * 564 * Reads the PHY register at offset using the serial gigabit media independent 565 * interface and stores the retrieved information in data. 566 **/ 567static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 568 u16 *data) 569{ 570 s32 ret_val = -E1000_ERR_PARAM; 571 572 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 573 hw_dbg("PHY Address %u is out of range\n", offset); 574 goto out; 575 } 576 577 ret_val = hw->phy.ops.acquire(hw); 578 if (ret_val) 579 goto out; 580 581 ret_val = igb_read_phy_reg_i2c(hw, offset, data); 582 583 hw->phy.ops.release(hw); 584 585out: 586 return ret_val; 587} 588 589/** 590 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 591 * @hw: pointer to the HW structure 592 * @offset: register offset to write to 593 * @data: data to write at register offset 594 * 595 * Writes the data to PHY register at the offset using the serial gigabit 596 * media independent interface. 597 **/ 598static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 599 u16 data) 600{ 601 s32 ret_val = -E1000_ERR_PARAM; 602 603 604 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 605 hw_dbg("PHY Address %d is out of range\n", offset); 606 goto out; 607 } 608 609 ret_val = hw->phy.ops.acquire(hw); 610 if (ret_val) 611 goto out; 612 613 ret_val = igb_write_phy_reg_i2c(hw, offset, data); 614 615 hw->phy.ops.release(hw); 616 617out: 618 return ret_val; 619} 620 621/** 622 * igb_get_phy_id_82575 - Retrieve PHY addr and id 623 * @hw: pointer to the HW structure 624 * 625 * Retrieves the PHY address and ID for both PHY's which do and do not use 626 * sgmi interface. 627 **/ 628static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 629{ 630 struct e1000_phy_info *phy = &hw->phy; 631 s32 ret_val = 0; 632 u16 phy_id; 633 u32 ctrl_ext; 634 u32 mdic; 635 636 /* For SGMII PHYs, we try the list of possible addresses until 637 * we find one that works. For non-SGMII PHYs 638 * (e.g. integrated copper PHYs), an address of 1 should 639 * work. The result of this function should mean phy->phy_addr 640 * and phy->id are set correctly. 641 */ 642 if (!(igb_sgmii_active_82575(hw))) { 643 phy->addr = 1; 644 ret_val = igb_get_phy_id(hw); 645 goto out; 646 } 647 648 if (igb_sgmii_uses_mdio_82575(hw)) { 649 switch (hw->mac.type) { 650 case e1000_82575: 651 case e1000_82576: 652 mdic = rd32(E1000_MDIC); 653 mdic &= E1000_MDIC_PHY_MASK; 654 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 655 break; 656 case e1000_82580: 657 case e1000_i350: 658 case e1000_i354: 659 case e1000_i210: 660 case e1000_i211: 661 mdic = rd32(E1000_MDICNFG); 662 mdic &= E1000_MDICNFG_PHY_MASK; 663 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 664 break; 665 default: 666 ret_val = -E1000_ERR_PHY; 667 goto out; 668 break; 669 } 670 ret_val = igb_get_phy_id(hw); 671 goto out; 672 } 673 674 /* Power on sgmii phy if it is disabled */ 675 ctrl_ext = rd32(E1000_CTRL_EXT); 676 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 677 wrfl(); 678 msleep(300); 679 680 /* The address field in the I2CCMD register is 3 bits and 0 is invalid. 681 * Therefore, we need to test 1-7 682 */ 683 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 684 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 685 if (ret_val == 0) { 686 hw_dbg("Vendor ID 0x%08X read at address %u\n", 687 phy_id, phy->addr); 688 /* At the time of this writing, The M88 part is 689 * the only supported SGMII PHY product. 690 */ 691 if (phy_id == M88_VENDOR) 692 break; 693 } else { 694 hw_dbg("PHY address %u was unreadable\n", phy->addr); 695 } 696 } 697 698 /* A valid PHY type couldn't be found. */ 699 if (phy->addr == 8) { 700 phy->addr = 0; 701 ret_val = -E1000_ERR_PHY; 702 goto out; 703 } else { 704 ret_val = igb_get_phy_id(hw); 705 } 706 707 /* restore previous sfp cage power state */ 708 wr32(E1000_CTRL_EXT, ctrl_ext); 709 710out: 711 return ret_val; 712} 713 714/** 715 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset 716 * @hw: pointer to the HW structure 717 * 718 * Resets the PHY using the serial gigabit media independent interface. 719 **/ 720static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 721{ 722 s32 ret_val; 723 724 /* This isn't a true "hard" reset, but is the only reset 725 * available to us at this time. 726 */ 727 728 hw_dbg("Soft resetting SGMII attached PHY...\n"); 729 730 /* SFP documentation requires the following to configure the SPF module 731 * to work on SGMII. No further documentation is given. 732 */ 733 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 734 if (ret_val) 735 goto out; 736 737 ret_val = igb_phy_sw_reset(hw); 738 739out: 740 return ret_val; 741} 742 743/** 744 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 745 * @hw: pointer to the HW structure 746 * @active: true to enable LPLU, false to disable 747 * 748 * Sets the LPLU D0 state according to the active flag. When 749 * activating LPLU this function also disables smart speed 750 * and vice versa. LPLU will not be activated unless the 751 * device autonegotiation advertisement meets standards of 752 * either 10 or 10/100 or 10/100/1000 at all duplexes. 753 * This is a function pointer entry point only called by 754 * PHY setup routines. 755 **/ 756static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 757{ 758 struct e1000_phy_info *phy = &hw->phy; 759 s32 ret_val; 760 u16 data; 761 762 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 763 if (ret_val) 764 goto out; 765 766 if (active) { 767 data |= IGP02E1000_PM_D0_LPLU; 768 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 769 data); 770 if (ret_val) 771 goto out; 772 773 /* When LPLU is enabled, we should disable SmartSpeed */ 774 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 775 &data); 776 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 777 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 778 data); 779 if (ret_val) 780 goto out; 781 } else { 782 data &= ~IGP02E1000_PM_D0_LPLU; 783 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 784 data); 785 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 786 * during Dx states where the power conservation is most 787 * important. During driver activity we should enable 788 * SmartSpeed, so performance is maintained. 789 */ 790 if (phy->smart_speed == e1000_smart_speed_on) { 791 ret_val = phy->ops.read_reg(hw, 792 IGP01E1000_PHY_PORT_CONFIG, &data); 793 if (ret_val) 794 goto out; 795 796 data |= IGP01E1000_PSCFR_SMART_SPEED; 797 ret_val = phy->ops.write_reg(hw, 798 IGP01E1000_PHY_PORT_CONFIG, data); 799 if (ret_val) 800 goto out; 801 } else if (phy->smart_speed == e1000_smart_speed_off) { 802 ret_val = phy->ops.read_reg(hw, 803 IGP01E1000_PHY_PORT_CONFIG, &data); 804 if (ret_val) 805 goto out; 806 807 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 808 ret_val = phy->ops.write_reg(hw, 809 IGP01E1000_PHY_PORT_CONFIG, data); 810 if (ret_val) 811 goto out; 812 } 813 } 814 815out: 816 return ret_val; 817} 818 819/** 820 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state 821 * @hw: pointer to the HW structure 822 * @active: true to enable LPLU, false to disable 823 * 824 * Sets the LPLU D0 state according to the active flag. When 825 * activating LPLU this function also disables smart speed 826 * and vice versa. LPLU will not be activated unless the 827 * device autonegotiation advertisement meets standards of 828 * either 10 or 10/100 or 10/100/1000 at all duplexes. 829 * This is a function pointer entry point only called by 830 * PHY setup routines. 831 **/ 832static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 833{ 834 struct e1000_phy_info *phy = &hw->phy; 835 s32 ret_val = 0; 836 u16 data; 837 838 data = rd32(E1000_82580_PHY_POWER_MGMT); 839 840 if (active) { 841 data |= E1000_82580_PM_D0_LPLU; 842 843 /* When LPLU is enabled, we should disable SmartSpeed */ 844 data &= ~E1000_82580_PM_SPD; 845 } else { 846 data &= ~E1000_82580_PM_D0_LPLU; 847 848 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 849 * during Dx states where the power conservation is most 850 * important. During driver activity we should enable 851 * SmartSpeed, so performance is maintained. 852 */ 853 if (phy->smart_speed == e1000_smart_speed_on) 854 data |= E1000_82580_PM_SPD; 855 else if (phy->smart_speed == e1000_smart_speed_off) 856 data &= ~E1000_82580_PM_SPD; } 857 858 wr32(E1000_82580_PHY_POWER_MGMT, data); 859 return ret_val; 860} 861 862/** 863 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 864 * @hw: pointer to the HW structure 865 * @active: boolean used to enable/disable lplu 866 * 867 * Success returns 0, Failure returns 1 868 * 869 * The low power link up (lplu) state is set to the power management level D3 870 * and SmartSpeed is disabled when active is true, else clear lplu for D3 871 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU 872 * is used during Dx states where the power conservation is most important. 873 * During driver activity, SmartSpeed should be enabled so performance is 874 * maintained. 875 **/ 876static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 877{ 878 struct e1000_phy_info *phy = &hw->phy; 879 s32 ret_val = 0; 880 u16 data; 881 882 data = rd32(E1000_82580_PHY_POWER_MGMT); 883 884 if (!active) { 885 data &= ~E1000_82580_PM_D3_LPLU; 886 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 887 * during Dx states where the power conservation is most 888 * important. During driver activity we should enable 889 * SmartSpeed, so performance is maintained. 890 */ 891 if (phy->smart_speed == e1000_smart_speed_on) 892 data |= E1000_82580_PM_SPD; 893 else if (phy->smart_speed == e1000_smart_speed_off) 894 data &= ~E1000_82580_PM_SPD; 895 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 896 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 897 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 898 data |= E1000_82580_PM_D3_LPLU; 899 /* When LPLU is enabled, we should disable SmartSpeed */ 900 data &= ~E1000_82580_PM_SPD; 901 } 902 903 wr32(E1000_82580_PHY_POWER_MGMT, data); 904 return ret_val; 905} 906 907/** 908 * igb_acquire_nvm_82575 - Request for access to EEPROM 909 * @hw: pointer to the HW structure 910 * 911 * Acquire the necessary semaphores for exclusive access to the EEPROM. 912 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 913 * Return successful if access grant bit set, else clear the request for 914 * EEPROM access and return -E1000_ERR_NVM (-1). 915 **/ 916static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) 917{ 918 s32 ret_val; 919 920 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); 921 if (ret_val) 922 goto out; 923 924 ret_val = igb_acquire_nvm(hw); 925 926 if (ret_val) 927 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 928 929out: 930 return ret_val; 931} 932 933/** 934 * igb_release_nvm_82575 - Release exclusive access to EEPROM 935 * @hw: pointer to the HW structure 936 * 937 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 938 * then release the semaphores acquired. 939 **/ 940static void igb_release_nvm_82575(struct e1000_hw *hw) 941{ 942 igb_release_nvm(hw); 943 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 944} 945 946/** 947 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 948 * @hw: pointer to the HW structure 949 * @mask: specifies which semaphore to acquire 950 * 951 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 952 * will also specify which port we're acquiring the lock for. 953 **/ 954static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 955{ 956 u32 swfw_sync; 957 u32 swmask = mask; 958 u32 fwmask = mask << 16; 959 s32 ret_val = 0; 960 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 961 962 while (i < timeout) { 963 if (igb_get_hw_semaphore(hw)) { 964 ret_val = -E1000_ERR_SWFW_SYNC; 965 goto out; 966 } 967 968 swfw_sync = rd32(E1000_SW_FW_SYNC); 969 if (!(swfw_sync & (fwmask | swmask))) 970 break; 971 972 /* Firmware currently using resource (fwmask) 973 * or other software thread using resource (swmask) 974 */ 975 igb_put_hw_semaphore(hw); 976 mdelay(5); 977 i++; 978 } 979 980 if (i == timeout) { 981 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 982 ret_val = -E1000_ERR_SWFW_SYNC; 983 goto out; 984 } 985 986 swfw_sync |= swmask; 987 wr32(E1000_SW_FW_SYNC, swfw_sync); 988 989 igb_put_hw_semaphore(hw); 990 991out: 992 return ret_val; 993} 994 995/** 996 * igb_release_swfw_sync_82575 - Release SW/FW semaphore 997 * @hw: pointer to the HW structure 998 * @mask: specifies which semaphore to acquire 999 * 1000 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 1001 * will also specify which port we're releasing the lock for. 1002 **/ 1003static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1004{ 1005 u32 swfw_sync; 1006 1007 while (igb_get_hw_semaphore(hw) != 0); 1008 /* Empty */ 1009 1010 swfw_sync = rd32(E1000_SW_FW_SYNC); 1011 swfw_sync &= ~mask; 1012 wr32(E1000_SW_FW_SYNC, swfw_sync); 1013 1014 igb_put_hw_semaphore(hw); 1015} 1016 1017/** 1018 * igb_get_cfg_done_82575 - Read config done bit 1019 * @hw: pointer to the HW structure 1020 * 1021 * Read the management control register for the config done bit for 1022 * completion status. NOTE: silicon which is EEPROM-less will fail trying 1023 * to read the config done bit, so an error is *ONLY* logged and returns 1024 * 0. If we were to return with error, EEPROM-less silicon 1025 * would not be able to be reset or change link. 1026 **/ 1027static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1028{ 1029 s32 timeout = PHY_CFG_TIMEOUT; 1030 s32 ret_val = 0; 1031 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1032 1033 if (hw->bus.func == 1) 1034 mask = E1000_NVM_CFG_DONE_PORT_1; 1035 else if (hw->bus.func == E1000_FUNC_2) 1036 mask = E1000_NVM_CFG_DONE_PORT_2; 1037 else if (hw->bus.func == E1000_FUNC_3) 1038 mask = E1000_NVM_CFG_DONE_PORT_3; 1039 1040 while (timeout) { 1041 if (rd32(E1000_EEMNGCTL) & mask) 1042 break; 1043 msleep(1); 1044 timeout--; 1045 } 1046 if (!timeout) 1047 hw_dbg("MNG configuration cycle has not completed.\n"); 1048 1049 /* If EEPROM is not marked present, init the PHY manually */ 1050 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 1051 (hw->phy.type == e1000_phy_igp_3)) 1052 igb_phy_init_script_igp3(hw); 1053 1054 return ret_val; 1055} 1056 1057/** 1058 * igb_check_for_link_82575 - Check for link 1059 * @hw: pointer to the HW structure 1060 * 1061 * If sgmii is enabled, then use the pcs register to determine link, otherwise 1062 * use the generic interface for determining link. 1063 **/ 1064static s32 igb_check_for_link_82575(struct e1000_hw *hw) 1065{ 1066 s32 ret_val; 1067 u16 speed, duplex; 1068 1069 if (hw->phy.media_type != e1000_media_type_copper) { 1070 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1071 &duplex); 1072 /* Use this flag to determine if link needs to be checked or 1073 * not. If we have link clear the flag so that we do not 1074 * continue to check for link. 1075 */ 1076 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1077 1078 /* Configure Flow Control now that Auto-Neg has completed. 1079 * First, we need to restore the desired flow control 1080 * settings because we may have had to re-autoneg with a 1081 * different link partner. 1082 */ 1083 ret_val = igb_config_fc_after_link_up(hw); 1084 if (ret_val) 1085 hw_dbg("Error configuring flow control\n"); 1086 } else { 1087 ret_val = igb_check_for_copper_link(hw); 1088 } 1089 1090 return ret_val; 1091} 1092 1093/** 1094 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown 1095 * @hw: pointer to the HW structure 1096 **/ 1097void igb_power_up_serdes_link_82575(struct e1000_hw *hw) 1098{ 1099 u32 reg; 1100 1101 1102 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1103 !igb_sgmii_active_82575(hw)) 1104 return; 1105 1106 /* Enable PCS to turn on link */ 1107 reg = rd32(E1000_PCS_CFG0); 1108 reg |= E1000_PCS_CFG_PCS_EN; 1109 wr32(E1000_PCS_CFG0, reg); 1110 1111 /* Power up the laser */ 1112 reg = rd32(E1000_CTRL_EXT); 1113 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1114 wr32(E1000_CTRL_EXT, reg); 1115 1116 /* flush the write to verify completion */ 1117 wrfl(); 1118 msleep(1); 1119} 1120 1121/** 1122 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 1123 * @hw: pointer to the HW structure 1124 * @speed: stores the current speed 1125 * @duplex: stores the current duplex 1126 * 1127 * Using the physical coding sub-layer (PCS), retrieve the current speed and 1128 * duplex, then store the values in the pointers provided. 1129 **/ 1130static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 1131 u16 *duplex) 1132{ 1133 struct e1000_mac_info *mac = &hw->mac; 1134 u32 pcs; 1135 1136 /* Set up defaults for the return values of this function */ 1137 mac->serdes_has_link = false; 1138 *speed = 0; 1139 *duplex = 0; 1140 1141 /* Read the PCS Status register for link state. For non-copper mode, 1142 * the status register is not accurate. The PCS status register is 1143 * used instead. 1144 */ 1145 pcs = rd32(E1000_PCS_LSTAT); 1146 1147 /* The link up bit determines when link is up on autoneg. The sync ok 1148 * gets set once both sides sync up and agree upon link. Stable link 1149 * can be determined by checking for both link up and link sync ok 1150 */ 1151 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 1152 mac->serdes_has_link = true; 1153 1154 /* Detect and store PCS speed */ 1155 if (pcs & E1000_PCS_LSTS_SPEED_1000) { 1156 *speed = SPEED_1000; 1157 } else if (pcs & E1000_PCS_LSTS_SPEED_100) { 1158 *speed = SPEED_100; 1159 } else { 1160 *speed = SPEED_10; 1161 } 1162 1163 /* Detect and store PCS duplex */ 1164 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { 1165 *duplex = FULL_DUPLEX; 1166 } else { 1167 *duplex = HALF_DUPLEX; 1168 } 1169 } 1170 1171 return 0; 1172} 1173 1174/** 1175 * igb_shutdown_serdes_link_82575 - Remove link during power down 1176 * @hw: pointer to the HW structure 1177 * 1178 * In the case of fiber serdes, shut down optics and PCS on driver unload 1179 * when management pass thru is not enabled. 1180 **/ 1181void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 1182{ 1183 u32 reg; 1184 1185 if (hw->phy.media_type != e1000_media_type_internal_serdes && 1186 igb_sgmii_active_82575(hw)) 1187 return; 1188 1189 if (!igb_enable_mng_pass_thru(hw)) { 1190 /* Disable PCS to turn off link */ 1191 reg = rd32(E1000_PCS_CFG0); 1192 reg &= ~E1000_PCS_CFG_PCS_EN; 1193 wr32(E1000_PCS_CFG0, reg); 1194 1195 /* shutdown the laser */ 1196 reg = rd32(E1000_CTRL_EXT); 1197 reg |= E1000_CTRL_EXT_SDP3_DATA; 1198 wr32(E1000_CTRL_EXT, reg); 1199 1200 /* flush the write to verify completion */ 1201 wrfl(); 1202 msleep(1); 1203 } 1204} 1205 1206/** 1207 * igb_reset_hw_82575 - Reset hardware 1208 * @hw: pointer to the HW structure 1209 * 1210 * This resets the hardware into a known state. This is a 1211 * function pointer entry point called by the api module. 1212 **/ 1213static s32 igb_reset_hw_82575(struct e1000_hw *hw) 1214{ 1215 u32 ctrl, icr; 1216 s32 ret_val; 1217 1218 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1219 * on the last TLP read/write transaction when MAC is reset. 1220 */ 1221 ret_val = igb_disable_pcie_master(hw); 1222 if (ret_val) 1223 hw_dbg("PCI-E Master disable polling has failed.\n"); 1224 1225 /* set the completion timeout for interface */ 1226 ret_val = igb_set_pcie_completion_timeout(hw); 1227 if (ret_val) { 1228 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1229 } 1230 1231 hw_dbg("Masking off all interrupts\n"); 1232 wr32(E1000_IMC, 0xffffffff); 1233 1234 wr32(E1000_RCTL, 0); 1235 wr32(E1000_TCTL, E1000_TCTL_PSP); 1236 wrfl(); 1237 1238 msleep(10); 1239 1240 ctrl = rd32(E1000_CTRL); 1241 1242 hw_dbg("Issuing a global reset to MAC\n"); 1243 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 1244 1245 ret_val = igb_get_auto_rd_done(hw); 1246 if (ret_val) { 1247 /* When auto config read does not complete, do not 1248 * return with an error. This can happen in situations 1249 * where there is no eeprom and prevents getting link. 1250 */ 1251 hw_dbg("Auto Read Done did not complete\n"); 1252 } 1253 1254 /* If EEPROM is not present, run manual init scripts */ 1255 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1256 igb_reset_init_script_82575(hw); 1257 1258 /* Clear any pending interrupt events. */ 1259 wr32(E1000_IMC, 0xffffffff); 1260 icr = rd32(E1000_ICR); 1261 1262 /* Install any alternate MAC address into RAR0 */ 1263 ret_val = igb_check_alt_mac_addr(hw); 1264 1265 return ret_val; 1266} 1267 1268/** 1269 * igb_init_hw_82575 - Initialize hardware 1270 * @hw: pointer to the HW structure 1271 * 1272 * This inits the hardware readying it for operation. 1273 **/ 1274static s32 igb_init_hw_82575(struct e1000_hw *hw) 1275{ 1276 struct e1000_mac_info *mac = &hw->mac; 1277 s32 ret_val; 1278 u16 i, rar_count = mac->rar_entry_count; 1279 1280 /* Initialize identification LED */ 1281 ret_val = igb_id_led_init(hw); 1282 if (ret_val) { 1283 hw_dbg("Error initializing identification LED\n"); 1284 /* This is not fatal and we should not stop init due to this */ 1285 } 1286 1287 /* Disabling VLAN filtering */ 1288 hw_dbg("Initializing the IEEE VLAN\n"); 1289 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) 1290 igb_clear_vfta_i350(hw); 1291 else 1292 igb_clear_vfta(hw); 1293 1294 /* Setup the receive address */ 1295 igb_init_rx_addrs(hw, rar_count); 1296 1297 /* Zero out the Multicast HASH table */ 1298 hw_dbg("Zeroing the MTA\n"); 1299 for (i = 0; i < mac->mta_reg_count; i++) 1300 array_wr32(E1000_MTA, i, 0); 1301 1302 /* Zero out the Unicast HASH table */ 1303 hw_dbg("Zeroing the UTA\n"); 1304 for (i = 0; i < mac->uta_reg_count; i++) 1305 array_wr32(E1000_UTA, i, 0); 1306 1307 /* Setup link and flow control */ 1308 ret_val = igb_setup_link(hw); 1309 1310 /* Clear all of the statistics registers (clear on read). It is 1311 * important that we do this after we have tried to establish link 1312 * because the symbol error count will increment wildly if there 1313 * is no link. 1314 */ 1315 igb_clear_hw_cntrs_82575(hw); 1316 return ret_val; 1317} 1318 1319/** 1320 * igb_setup_copper_link_82575 - Configure copper link settings 1321 * @hw: pointer to the HW structure 1322 * 1323 * Configures the link for auto-neg or forced speed and duplex. Then we check 1324 * for link, once link is established calls to configure collision distance 1325 * and flow control are called. 1326 **/ 1327static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) 1328{ 1329 u32 ctrl; 1330 s32 ret_val; 1331 u32 phpm_reg; 1332 1333 ctrl = rd32(E1000_CTRL); 1334 ctrl |= E1000_CTRL_SLU; 1335 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1336 wr32(E1000_CTRL, ctrl); 1337 1338 /* Clear Go Link Disconnect bit */ 1339 if (hw->mac.type >= e1000_82580) { 1340 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); 1341 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1342 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); 1343 } 1344 1345 ret_val = igb_setup_serdes_link_82575(hw); 1346 if (ret_val) 1347 goto out; 1348 1349 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1350 /* allow time for SFP cage time to power up phy */ 1351 msleep(300); 1352 1353 ret_val = hw->phy.ops.reset(hw); 1354 if (ret_val) { 1355 hw_dbg("Error resetting the PHY.\n"); 1356 goto out; 1357 } 1358 } 1359 switch (hw->phy.type) { 1360 case e1000_phy_i210: 1361 case e1000_phy_m88: 1362 switch (hw->phy.id) { 1363 case I347AT4_E_PHY_ID: 1364 case M88E1112_E_PHY_ID: 1365 case M88E1545_E_PHY_ID: 1366 case I210_I_PHY_ID: 1367 ret_val = igb_copper_link_setup_m88_gen2(hw); 1368 break; 1369 default: 1370 ret_val = igb_copper_link_setup_m88(hw); 1371 break; 1372 } 1373 break; 1374 case e1000_phy_igp_3: 1375 ret_val = igb_copper_link_setup_igp(hw); 1376 break; 1377 case e1000_phy_82580: 1378 ret_val = igb_copper_link_setup_82580(hw); 1379 break; 1380 default: 1381 ret_val = -E1000_ERR_PHY; 1382 break; 1383 } 1384 1385 if (ret_val) 1386 goto out; 1387 1388 ret_val = igb_setup_copper_link(hw); 1389out: 1390 return ret_val; 1391} 1392 1393/** 1394 * igb_setup_serdes_link_82575 - Setup link for serdes 1395 * @hw: pointer to the HW structure 1396 * 1397 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1398 * used on copper connections where the serialized gigabit media independent 1399 * interface (sgmii), or serdes fiber is being used. Configures the link 1400 * for auto-negotiation or forces speed/duplex. 1401 **/ 1402static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1403{ 1404 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1405 bool pcs_autoneg; 1406 s32 ret_val = E1000_SUCCESS; 1407 u16 data; 1408 1409 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1410 !igb_sgmii_active_82575(hw)) 1411 return ret_val; 1412 1413 1414 /* On the 82575, SerDes loopback mode persists until it is 1415 * explicitly turned off or a power cycle is performed. A read to 1416 * the register does not indicate its status. Therefore, we ensure 1417 * loopback mode is disabled during initialization. 1418 */ 1419 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1420 1421 /* power on the sfp cage if present and turn on I2C */ 1422 ctrl_ext = rd32(E1000_CTRL_EXT); 1423 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1424 ctrl_ext |= E1000_CTRL_I2C_ENA; 1425 wr32(E1000_CTRL_EXT, ctrl_ext); 1426 1427 ctrl_reg = rd32(E1000_CTRL); 1428 ctrl_reg |= E1000_CTRL_SLU; 1429 1430 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1431 /* set both sw defined pins */ 1432 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1433 1434 /* Set switch control to serdes energy detect */ 1435 reg = rd32(E1000_CONNSW); 1436 reg |= E1000_CONNSW_ENRGSRC; 1437 wr32(E1000_CONNSW, reg); 1438 } 1439 1440 reg = rd32(E1000_PCS_LCTL); 1441 1442 /* default pcs_autoneg to the same setting as mac autoneg */ 1443 pcs_autoneg = hw->mac.autoneg; 1444 1445 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1446 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1447 /* sgmii mode lets the phy handle forcing speed/duplex */ 1448 pcs_autoneg = true; 1449 /* autoneg time out should be disabled for SGMII mode */ 1450 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1451 break; 1452 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1453 /* disable PCS autoneg and support parallel detect only */ 1454 pcs_autoneg = false; 1455 default: 1456 if (hw->mac.type == e1000_82575 || 1457 hw->mac.type == e1000_82576) { 1458 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1459 if (ret_val) { 1460 printk(KERN_DEBUG "NVM Read Error\n\n"); 1461 return ret_val; 1462 } 1463 1464 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1465 pcs_autoneg = false; 1466 } 1467 1468 /* non-SGMII modes only supports a speed of 1000/Full for the 1469 * link so it is best to just force the MAC and let the pcs 1470 * link either autoneg or be forced to 1000/Full 1471 */ 1472 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1473 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1474 1475 /* set speed of 1000/Full if speed/duplex is forced */ 1476 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1477 break; 1478 } 1479 1480 wr32(E1000_CTRL, ctrl_reg); 1481 1482 /* New SerDes mode allows for forcing speed or autonegotiating speed 1483 * at 1gb. Autoneg should be default set by most drivers. This is the 1484 * mode that will be compatible with older link partners and switches. 1485 * However, both are supported by the hardware and some drivers/tools. 1486 */ 1487 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1488 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1489 1490 if (pcs_autoneg) { 1491 /* Set PCS register for autoneg */ 1492 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1493 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1494 1495 /* Disable force flow control for autoneg */ 1496 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; 1497 1498 /* Configure flow control advertisement for autoneg */ 1499 anadv_reg = rd32(E1000_PCS_ANADV); 1500 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); 1501 switch (hw->fc.requested_mode) { 1502 case e1000_fc_full: 1503 case e1000_fc_rx_pause: 1504 anadv_reg |= E1000_TXCW_ASM_DIR; 1505 anadv_reg |= E1000_TXCW_PAUSE; 1506 break; 1507 case e1000_fc_tx_pause: 1508 anadv_reg |= E1000_TXCW_ASM_DIR; 1509 break; 1510 default: 1511 break; 1512 } 1513 wr32(E1000_PCS_ANADV, anadv_reg); 1514 1515 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1516 } else { 1517 /* Set PCS register for forced link */ 1518 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1519 1520 /* Force flow control for forced link */ 1521 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1522 1523 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1524 } 1525 1526 wr32(E1000_PCS_LCTL, reg); 1527 1528 if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) 1529 igb_force_mac_fc(hw); 1530 1531 return ret_val; 1532} 1533 1534/** 1535 * igb_sgmii_active_82575 - Return sgmii state 1536 * @hw: pointer to the HW structure 1537 * 1538 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1539 * which can be enabled for use in the embedded applications. Simply 1540 * return the current state of the sgmii interface. 1541 **/ 1542static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1543{ 1544 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1545 return dev_spec->sgmii_active; 1546} 1547 1548/** 1549 * igb_reset_init_script_82575 - Inits HW defaults after reset 1550 * @hw: pointer to the HW structure 1551 * 1552 * Inits recommended HW defaults after a reset when there is no EEPROM 1553 * detected. This is only for the 82575. 1554 **/ 1555static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1556{ 1557 if (hw->mac.type == e1000_82575) { 1558 hw_dbg("Running reset init script for 82575\n"); 1559 /* SerDes configuration via SERDESCTRL */ 1560 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1561 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1562 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); 1563 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); 1564 1565 /* CCM configuration via CCMCTL register */ 1566 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); 1567 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); 1568 1569 /* PCIe lanes configuration */ 1570 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); 1571 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); 1572 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); 1573 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); 1574 1575 /* PCIe PLL Configuration */ 1576 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); 1577 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); 1578 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); 1579 } 1580 1581 return 0; 1582} 1583 1584/** 1585 * igb_read_mac_addr_82575 - Read device MAC address 1586 * @hw: pointer to the HW structure 1587 **/ 1588static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) 1589{ 1590 s32 ret_val = 0; 1591 1592 /* If there's an alternate MAC address place it in RAR0 1593 * so that it will override the Si installed default perm 1594 * address. 1595 */ 1596 ret_val = igb_check_alt_mac_addr(hw); 1597 if (ret_val) 1598 goto out; 1599 1600 ret_val = igb_read_mac_addr(hw); 1601 1602out: 1603 return ret_val; 1604} 1605 1606/** 1607 * igb_power_down_phy_copper_82575 - Remove link during PHY power down 1608 * @hw: pointer to the HW structure 1609 * 1610 * In the case of a PHY power down to save power, or to turn off link during a 1611 * driver unload, or wake on lan is not enabled, remove the link. 1612 **/ 1613void igb_power_down_phy_copper_82575(struct e1000_hw *hw) 1614{ 1615 /* If the management interface is not enabled, then power down */ 1616 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1617 igb_power_down_phy_copper(hw); 1618} 1619 1620/** 1621 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1622 * @hw: pointer to the HW structure 1623 * 1624 * Clears the hardware counters by reading the counter registers. 1625 **/ 1626static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1627{ 1628 igb_clear_hw_cntrs_base(hw); 1629 1630 rd32(E1000_PRC64); 1631 rd32(E1000_PRC127); 1632 rd32(E1000_PRC255); 1633 rd32(E1000_PRC511); 1634 rd32(E1000_PRC1023); 1635 rd32(E1000_PRC1522); 1636 rd32(E1000_PTC64); 1637 rd32(E1000_PTC127); 1638 rd32(E1000_PTC255); 1639 rd32(E1000_PTC511); 1640 rd32(E1000_PTC1023); 1641 rd32(E1000_PTC1522); 1642 1643 rd32(E1000_ALGNERRC); 1644 rd32(E1000_RXERRC); 1645 rd32(E1000_TNCRS); 1646 rd32(E1000_CEXTERR); 1647 rd32(E1000_TSCTC); 1648 rd32(E1000_TSCTFC); 1649 1650 rd32(E1000_MGTPRC); 1651 rd32(E1000_MGTPDC); 1652 rd32(E1000_MGTPTC); 1653 1654 rd32(E1000_IAC); 1655 rd32(E1000_ICRXOC); 1656 1657 rd32(E1000_ICRXPTC); 1658 rd32(E1000_ICRXATC); 1659 rd32(E1000_ICTXPTC); 1660 rd32(E1000_ICTXATC); 1661 rd32(E1000_ICTXQEC); 1662 rd32(E1000_ICTXQMTC); 1663 rd32(E1000_ICRXDMTC); 1664 1665 rd32(E1000_CBTMPC); 1666 rd32(E1000_HTDPMC); 1667 rd32(E1000_CBRMPC); 1668 rd32(E1000_RPTHC); 1669 rd32(E1000_HGPTC); 1670 rd32(E1000_HTCBDPC); 1671 rd32(E1000_HGORCL); 1672 rd32(E1000_HGORCH); 1673 rd32(E1000_HGOTCL); 1674 rd32(E1000_HGOTCH); 1675 rd32(E1000_LENERRS); 1676 1677 /* This register should not be read in copper configurations */ 1678 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1679 igb_sgmii_active_82575(hw)) 1680 rd32(E1000_SCVPC); 1681} 1682 1683/** 1684 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1685 * @hw: pointer to the HW structure 1686 * 1687 * After rx enable if managability is enabled then there is likely some 1688 * bad data at the start of the fifo and possibly in the DMA fifo. This 1689 * function clears the fifos and flushes any packets that came in as rx was 1690 * being enabled. 1691 **/ 1692void igb_rx_fifo_flush_82575(struct e1000_hw *hw) 1693{ 1694 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1695 int i, ms_wait; 1696 1697 if (hw->mac.type != e1000_82575 || 1698 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1699 return; 1700 1701 /* Disable all RX queues */ 1702 for (i = 0; i < 4; i++) { 1703 rxdctl[i] = rd32(E1000_RXDCTL(i)); 1704 wr32(E1000_RXDCTL(i), 1705 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1706 } 1707 /* Poll all queues to verify they have shut down */ 1708 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1709 msleep(1); 1710 rx_enabled = 0; 1711 for (i = 0; i < 4; i++) 1712 rx_enabled |= rd32(E1000_RXDCTL(i)); 1713 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1714 break; 1715 } 1716 1717 if (ms_wait == 10) 1718 hw_dbg("Queue disable timed out after 10ms\n"); 1719 1720 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1721 * incoming packets are rejected. Set enable and wait 2ms so that 1722 * any packet that was coming in as RCTL.EN was set is flushed 1723 */ 1724 rfctl = rd32(E1000_RFCTL); 1725 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1726 1727 rlpml = rd32(E1000_RLPML); 1728 wr32(E1000_RLPML, 0); 1729 1730 rctl = rd32(E1000_RCTL); 1731 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1732 temp_rctl |= E1000_RCTL_LPE; 1733 1734 wr32(E1000_RCTL, temp_rctl); 1735 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1736 wrfl(); 1737 msleep(2); 1738 1739 /* Enable RX queues that were previously enabled and restore our 1740 * previous state 1741 */ 1742 for (i = 0; i < 4; i++) 1743 wr32(E1000_RXDCTL(i), rxdctl[i]); 1744 wr32(E1000_RCTL, rctl); 1745 wrfl(); 1746 1747 wr32(E1000_RLPML, rlpml); 1748 wr32(E1000_RFCTL, rfctl); 1749 1750 /* Flush receive errors generated by workaround */ 1751 rd32(E1000_ROC); 1752 rd32(E1000_RNBC); 1753 rd32(E1000_MPC); 1754} 1755 1756/** 1757 * igb_set_pcie_completion_timeout - set pci-e completion timeout 1758 * @hw: pointer to the HW structure 1759 * 1760 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1761 * however the hardware default for these parts is 500us to 1ms which is less 1762 * than the 10ms recommended by the pci-e spec. To address this we need to 1763 * increase the value to either 10ms to 200ms for capability version 1 config, 1764 * or 16ms to 55ms for version 2. 1765 **/ 1766static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) 1767{ 1768 u32 gcr = rd32(E1000_GCR); 1769 s32 ret_val = 0; 1770 u16 pcie_devctl2; 1771 1772 /* only take action if timeout value is defaulted to 0 */ 1773 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1774 goto out; 1775 1776 /* if capabilities version is type 1 we can write the 1777 * timeout of 10ms to 200ms through the GCR register 1778 */ 1779 if (!(gcr & E1000_GCR_CAP_VER2)) { 1780 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 1781 goto out; 1782 } 1783 1784 /* for version 2 capabilities we need to write the config space 1785 * directly in order to set the completion timeout value for 1786 * 16ms to 55ms 1787 */ 1788 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1789 &pcie_devctl2); 1790 if (ret_val) 1791 goto out; 1792 1793 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 1794 1795 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1796 &pcie_devctl2); 1797out: 1798 /* disable completion timeout resend */ 1799 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 1800 1801 wr32(E1000_GCR, gcr); 1802 return ret_val; 1803} 1804 1805/** 1806 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 1807 * @hw: pointer to the hardware struct 1808 * @enable: state to enter, either enabled or disabled 1809 * @pf: Physical Function pool - do not set anti-spoofing for the PF 1810 * 1811 * enables/disables L2 switch anti-spoofing functionality. 1812 **/ 1813void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 1814{ 1815 u32 reg_val, reg_offset; 1816 1817 switch (hw->mac.type) { 1818 case e1000_82576: 1819 reg_offset = E1000_DTXSWC; 1820 break; 1821 case e1000_i350: 1822 case e1000_i354: 1823 reg_offset = E1000_TXSWC; 1824 break; 1825 default: 1826 return; 1827 } 1828 1829 reg_val = rd32(reg_offset); 1830 if (enable) { 1831 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | 1832 E1000_DTXSWC_VLAN_SPOOF_MASK); 1833 /* The PF can spoof - it has to in order to 1834 * support emulation mode NICs 1835 */ 1836 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 1837 } else { 1838 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 1839 E1000_DTXSWC_VLAN_SPOOF_MASK); 1840 } 1841 wr32(reg_offset, reg_val); 1842} 1843 1844/** 1845 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 1846 * @hw: pointer to the hardware struct 1847 * @enable: state to enter, either enabled or disabled 1848 * 1849 * enables/disables L2 switch loopback functionality. 1850 **/ 1851void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 1852{ 1853 u32 dtxswc; 1854 1855 switch (hw->mac.type) { 1856 case e1000_82576: 1857 dtxswc = rd32(E1000_DTXSWC); 1858 if (enable) 1859 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1860 else 1861 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1862 wr32(E1000_DTXSWC, dtxswc); 1863 break; 1864 case e1000_i354: 1865 case e1000_i350: 1866 dtxswc = rd32(E1000_TXSWC); 1867 if (enable) 1868 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1869 else 1870 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1871 wr32(E1000_TXSWC, dtxswc); 1872 break; 1873 default: 1874 /* Currently no other hardware supports loopback */ 1875 break; 1876 } 1877 1878} 1879 1880/** 1881 * igb_vmdq_set_replication_pf - enable or disable vmdq replication 1882 * @hw: pointer to the hardware struct 1883 * @enable: state to enter, either enabled or disabled 1884 * 1885 * enables/disables replication of packets across multiple pools. 1886 **/ 1887void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 1888{ 1889 u32 vt_ctl = rd32(E1000_VT_CTL); 1890 1891 if (enable) 1892 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 1893 else 1894 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 1895 1896 wr32(E1000_VT_CTL, vt_ctl); 1897} 1898 1899/** 1900 * igb_read_phy_reg_82580 - Read 82580 MDI control register 1901 * @hw: pointer to the HW structure 1902 * @offset: register offset to be read 1903 * @data: pointer to the read data 1904 * 1905 * Reads the MDI control register in the PHY at offset and stores the 1906 * information read to data. 1907 **/ 1908static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 1909{ 1910 s32 ret_val; 1911 1912 ret_val = hw->phy.ops.acquire(hw); 1913 if (ret_val) 1914 goto out; 1915 1916 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 1917 1918 hw->phy.ops.release(hw); 1919 1920out: 1921 return ret_val; 1922} 1923 1924/** 1925 * igb_write_phy_reg_82580 - Write 82580 MDI control register 1926 * @hw: pointer to the HW structure 1927 * @offset: register offset to write to 1928 * @data: data to write to register at offset 1929 * 1930 * Writes data to MDI control register in the PHY at offset. 1931 **/ 1932static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 1933{ 1934 s32 ret_val; 1935 1936 1937 ret_val = hw->phy.ops.acquire(hw); 1938 if (ret_val) 1939 goto out; 1940 1941 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 1942 1943 hw->phy.ops.release(hw); 1944 1945out: 1946 return ret_val; 1947} 1948 1949/** 1950 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 1951 * @hw: pointer to the HW structure 1952 * 1953 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 1954 * the values found in the EEPROM. This addresses an issue in which these 1955 * bits are not restored from EEPROM after reset. 1956 **/ 1957static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) 1958{ 1959 s32 ret_val = 0; 1960 u32 mdicnfg; 1961 u16 nvm_data = 0; 1962 1963 if (hw->mac.type != e1000_82580) 1964 goto out; 1965 if (!igb_sgmii_active_82575(hw)) 1966 goto out; 1967 1968 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 1969 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 1970 &nvm_data); 1971 if (ret_val) { 1972 hw_dbg("NVM Read Error\n"); 1973 goto out; 1974 } 1975 1976 mdicnfg = rd32(E1000_MDICNFG); 1977 if (nvm_data & NVM_WORD24_EXT_MDIO) 1978 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 1979 if (nvm_data & NVM_WORD24_COM_MDIO) 1980 mdicnfg |= E1000_MDICNFG_COM_MDIO; 1981 wr32(E1000_MDICNFG, mdicnfg); 1982out: 1983 return ret_val; 1984} 1985 1986/** 1987 * igb_reset_hw_82580 - Reset hardware 1988 * @hw: pointer to the HW structure 1989 * 1990 * This resets function or entire device (all ports, etc.) 1991 * to a known state. 1992 **/ 1993static s32 igb_reset_hw_82580(struct e1000_hw *hw) 1994{ 1995 s32 ret_val = 0; 1996 /* BH SW mailbox bit in SW_FW_SYNC */ 1997 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 1998 u32 ctrl, icr; 1999 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2000 2001 2002 hw->dev_spec._82575.global_device_reset = false; 2003 2004 /* due to hw errata, global device reset doesn't always 2005 * work on 82580 2006 */ 2007 if (hw->mac.type == e1000_82580) 2008 global_device_reset = false; 2009 2010 /* Get current control state. */ 2011 ctrl = rd32(E1000_CTRL); 2012 2013 /* Prevent the PCI-E bus from sticking if there is no TLP connection 2014 * on the last TLP read/write transaction when MAC is reset. 2015 */ 2016 ret_val = igb_disable_pcie_master(hw); 2017 if (ret_val) 2018 hw_dbg("PCI-E Master disable polling has failed.\n"); 2019 2020 hw_dbg("Masking off all interrupts\n"); 2021 wr32(E1000_IMC, 0xffffffff); 2022 wr32(E1000_RCTL, 0); 2023 wr32(E1000_TCTL, E1000_TCTL_PSP); 2024 wrfl(); 2025 2026 msleep(10); 2027 2028 /* Determine whether or not a global dev reset is requested */ 2029 if (global_device_reset && 2030 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) 2031 global_device_reset = false; 2032 2033 if (global_device_reset && 2034 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 2035 ctrl |= E1000_CTRL_DEV_RST; 2036 else 2037 ctrl |= E1000_CTRL_RST; 2038 2039 wr32(E1000_CTRL, ctrl); 2040 wrfl(); 2041 2042 /* Add delay to insure DEV_RST has time to complete */ 2043 if (global_device_reset) 2044 msleep(5); 2045 2046 ret_val = igb_get_auto_rd_done(hw); 2047 if (ret_val) { 2048 /* When auto config read does not complete, do not 2049 * return with an error. This can happen in situations 2050 * where there is no eeprom and prevents getting link. 2051 */ 2052 hw_dbg("Auto Read Done did not complete\n"); 2053 } 2054 2055 /* clear global device reset status bit */ 2056 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 2057 2058 /* Clear any pending interrupt events. */ 2059 wr32(E1000_IMC, 0xffffffff); 2060 icr = rd32(E1000_ICR); 2061 2062 ret_val = igb_reset_mdicnfg_82580(hw); 2063 if (ret_val) 2064 hw_dbg("Could not reset MDICNFG based on EEPROM\n"); 2065 2066 /* Install any alternate MAC address into RAR0 */ 2067 ret_val = igb_check_alt_mac_addr(hw); 2068 2069 /* Release semaphore */ 2070 if (global_device_reset) 2071 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); 2072 2073 return ret_val; 2074} 2075 2076/** 2077 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 2078 * @data: data received by reading RXPBS register 2079 * 2080 * The 82580 uses a table based approach for packet buffer allocation sizes. 2081 * This function converts the retrieved value into the correct table value 2082 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 2083 * 0x0 36 72 144 1 2 4 8 16 2084 * 0x8 35 70 140 rsv rsv rsv rsv rsv 2085 */ 2086u16 igb_rxpbs_adjust_82580(u32 data) 2087{ 2088 u16 ret_val = 0; 2089 2090 if (data < E1000_82580_RXPBS_TABLE_SIZE) 2091 ret_val = e1000_82580_rxpbs_table[data]; 2092 2093 return ret_val; 2094} 2095 2096/** 2097 * igb_validate_nvm_checksum_with_offset - Validate EEPROM 2098 * checksum 2099 * @hw: pointer to the HW structure 2100 * @offset: offset in words of the checksum protected region 2101 * 2102 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2103 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2104 **/ 2105static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 2106 u16 offset) 2107{ 2108 s32 ret_val = 0; 2109 u16 checksum = 0; 2110 u16 i, nvm_data; 2111 2112 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 2113 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2114 if (ret_val) { 2115 hw_dbg("NVM Read Error\n"); 2116 goto out; 2117 } 2118 checksum += nvm_data; 2119 } 2120 2121 if (checksum != (u16) NVM_SUM) { 2122 hw_dbg("NVM Checksum Invalid\n"); 2123 ret_val = -E1000_ERR_NVM; 2124 goto out; 2125 } 2126 2127out: 2128 return ret_val; 2129} 2130 2131/** 2132 * igb_update_nvm_checksum_with_offset - Update EEPROM 2133 * checksum 2134 * @hw: pointer to the HW structure 2135 * @offset: offset in words of the checksum protected region 2136 * 2137 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2138 * up to the checksum. Then calculates the EEPROM checksum and writes the 2139 * value to the EEPROM. 2140 **/ 2141static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2142{ 2143 s32 ret_val; 2144 u16 checksum = 0; 2145 u16 i, nvm_data; 2146 2147 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 2148 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2149 if (ret_val) { 2150 hw_dbg("NVM Read Error while updating checksum.\n"); 2151 goto out; 2152 } 2153 checksum += nvm_data; 2154 } 2155 checksum = (u16) NVM_SUM - checksum; 2156 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 2157 &checksum); 2158 if (ret_val) 2159 hw_dbg("NVM Write Error while updating checksum.\n"); 2160 2161out: 2162 return ret_val; 2163} 2164 2165/** 2166 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum 2167 * @hw: pointer to the HW structure 2168 * 2169 * Calculates the EEPROM section checksum by reading/adding each word of 2170 * the EEPROM and then verifies that the sum of the EEPROM is 2171 * equal to 0xBABA. 2172 **/ 2173static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) 2174{ 2175 s32 ret_val = 0; 2176 u16 eeprom_regions_count = 1; 2177 u16 j, nvm_data; 2178 u16 nvm_offset; 2179 2180 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2181 if (ret_val) { 2182 hw_dbg("NVM Read Error\n"); 2183 goto out; 2184 } 2185 2186 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2187 /* if checksums compatibility bit is set validate checksums 2188 * for all 4 ports. 2189 */ 2190 eeprom_regions_count = 4; 2191 } 2192 2193 for (j = 0; j < eeprom_regions_count; j++) { 2194 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2195 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2196 nvm_offset); 2197 if (ret_val != 0) 2198 goto out; 2199 } 2200 2201out: 2202 return ret_val; 2203} 2204 2205/** 2206 * igb_update_nvm_checksum_82580 - Update EEPROM checksum 2207 * @hw: pointer to the HW structure 2208 * 2209 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2210 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2211 * checksum and writes the value to the EEPROM. 2212 **/ 2213static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) 2214{ 2215 s32 ret_val; 2216 u16 j, nvm_data; 2217 u16 nvm_offset; 2218 2219 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2220 if (ret_val) { 2221 hw_dbg("NVM Read Error while updating checksum" 2222 " compatibility bit.\n"); 2223 goto out; 2224 } 2225 2226 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 2227 /* set compatibility bit to validate checksums appropriately */ 2228 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 2229 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2230 &nvm_data); 2231 if (ret_val) { 2232 hw_dbg("NVM Write Error while updating checksum" 2233 " compatibility bit.\n"); 2234 goto out; 2235 } 2236 } 2237 2238 for (j = 0; j < 4; j++) { 2239 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2240 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2241 if (ret_val) 2242 goto out; 2243 } 2244 2245out: 2246 return ret_val; 2247} 2248 2249/** 2250 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum 2251 * @hw: pointer to the HW structure 2252 * 2253 * Calculates the EEPROM section checksum by reading/adding each word of 2254 * the EEPROM and then verifies that the sum of the EEPROM is 2255 * equal to 0xBABA. 2256 **/ 2257static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) 2258{ 2259 s32 ret_val = 0; 2260 u16 j; 2261 u16 nvm_offset; 2262 2263 for (j = 0; j < 4; j++) { 2264 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2265 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2266 nvm_offset); 2267 if (ret_val != 0) 2268 goto out; 2269 } 2270 2271out: 2272 return ret_val; 2273} 2274 2275/** 2276 * igb_update_nvm_checksum_i350 - Update EEPROM checksum 2277 * @hw: pointer to the HW structure 2278 * 2279 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2280 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2281 * checksum and writes the value to the EEPROM. 2282 **/ 2283static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) 2284{ 2285 s32 ret_val = 0; 2286 u16 j; 2287 u16 nvm_offset; 2288 2289 for (j = 0; j < 4; j++) { 2290 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2291 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2292 if (ret_val != 0) 2293 goto out; 2294 } 2295 2296out: 2297 return ret_val; 2298} 2299 2300/** 2301 * __igb_access_emi_reg - Read/write EMI register 2302 * @hw: pointer to the HW structure 2303 * @addr: EMI address to program 2304 * @data: pointer to value to read/write from/to the EMI address 2305 * @read: boolean flag to indicate read or write 2306 **/ 2307static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2308 u16 *data, bool read) 2309{ 2310 s32 ret_val = E1000_SUCCESS; 2311 2312 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2313 if (ret_val) 2314 return ret_val; 2315 2316 if (read) 2317 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); 2318 else 2319 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); 2320 2321 return ret_val; 2322} 2323 2324/** 2325 * igb_read_emi_reg - Read Extended Management Interface register 2326 * @hw: pointer to the HW structure 2327 * @addr: EMI address to program 2328 * @data: value to be read from the EMI address 2329 **/ 2330s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) 2331{ 2332 return __igb_access_emi_reg(hw, addr, data, true); 2333} 2334 2335/** 2336 * igb_set_eee_i350 - Enable/disable EEE support 2337 * @hw: pointer to the HW structure 2338 * 2339 * Enable/disable EEE based on setting in dev_spec structure. 2340 * 2341 **/ 2342s32 igb_set_eee_i350(struct e1000_hw *hw) 2343{ 2344 s32 ret_val = 0; 2345 u32 ipcnfg, eeer; 2346 2347 if ((hw->mac.type < e1000_i350) || 2348 (hw->phy.media_type != e1000_media_type_copper)) 2349 goto out; 2350 ipcnfg = rd32(E1000_IPCNFG); 2351 eeer = rd32(E1000_EEER); 2352 2353 /* enable or disable per user setting */ 2354 if (!(hw->dev_spec._82575.eee_disable)) { 2355 u32 eee_su = rd32(E1000_EEE_SU); 2356 2357 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2358 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2359 E1000_EEER_LPI_FC); 2360 2361 /* This bit should not be set in normal operation. */ 2362 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2363 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 2364 2365 } else { 2366 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2367 E1000_IPCNFG_EEE_100M_AN); 2368 eeer &= ~(E1000_EEER_TX_LPI_EN | 2369 E1000_EEER_RX_LPI_EN | 2370 E1000_EEER_LPI_FC); 2371 } 2372 wr32(E1000_IPCNFG, ipcnfg); 2373 wr32(E1000_EEER, eeer); 2374 rd32(E1000_IPCNFG); 2375 rd32(E1000_EEER); 2376out: 2377 2378 return ret_val; 2379} 2380 2381/** 2382 * igb_set_eee_i354 - Enable/disable EEE support 2383 * @hw: pointer to the HW structure 2384 * 2385 * Enable/disable EEE legacy mode based on setting in dev_spec structure. 2386 * 2387 **/ 2388s32 igb_set_eee_i354(struct e1000_hw *hw) 2389{ 2390 struct e1000_phy_info *phy = &hw->phy; 2391 s32 ret_val = 0; 2392 u16 phy_data; 2393 2394 if ((hw->phy.media_type != e1000_media_type_copper) || 2395 (phy->id != M88E1545_E_PHY_ID)) 2396 goto out; 2397 2398 if (!hw->dev_spec._82575.eee_disable) { 2399 /* Switch to PHY page 18. */ 2400 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18); 2401 if (ret_val) 2402 goto out; 2403 2404 ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1, 2405 &phy_data); 2406 if (ret_val) 2407 goto out; 2408 2409 phy_data |= E1000_M88E1545_EEE_CTRL_1_MS; 2410 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1, 2411 phy_data); 2412 if (ret_val) 2413 goto out; 2414 2415 /* Return the PHY to page 0. */ 2416 ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0); 2417 if (ret_val) 2418 goto out; 2419 2420 /* Turn on EEE advertisement. */ 2421 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2422 E1000_EEE_ADV_DEV_I354, 2423 &phy_data); 2424 if (ret_val) 2425 goto out; 2426 2427 phy_data |= E1000_EEE_ADV_100_SUPPORTED | 2428 E1000_EEE_ADV_1000_SUPPORTED; 2429 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2430 E1000_EEE_ADV_DEV_I354, 2431 phy_data); 2432 } else { 2433 /* Turn off EEE advertisement. */ 2434 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2435 E1000_EEE_ADV_DEV_I354, 2436 &phy_data); 2437 if (ret_val) 2438 goto out; 2439 2440 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | 2441 E1000_EEE_ADV_1000_SUPPORTED); 2442 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2443 E1000_EEE_ADV_DEV_I354, 2444 phy_data); 2445 } 2446 2447out: 2448 return ret_val; 2449} 2450 2451/** 2452 * igb_get_eee_status_i354 - Get EEE status 2453 * @hw: pointer to the HW structure 2454 * @status: EEE status 2455 * 2456 * Get EEE status by guessing based on whether Tx or Rx LPI indications have 2457 * been received. 2458 **/ 2459s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) 2460{ 2461 struct e1000_phy_info *phy = &hw->phy; 2462 s32 ret_val = 0; 2463 u16 phy_data; 2464 2465 /* Check if EEE is supported on this device. */ 2466 if ((hw->phy.media_type != e1000_media_type_copper) || 2467 (phy->id != M88E1545_E_PHY_ID)) 2468 goto out; 2469 2470 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2471 E1000_PCS_STATUS_DEV_I354, 2472 &phy_data); 2473 if (ret_val) 2474 goto out; 2475 2476 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | 2477 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; 2478 2479out: 2480 return ret_val; 2481} 2482 2483static const u8 e1000_emc_temp_data[4] = { 2484 E1000_EMC_INTERNAL_DATA, 2485 E1000_EMC_DIODE1_DATA, 2486 E1000_EMC_DIODE2_DATA, 2487 E1000_EMC_DIODE3_DATA 2488}; 2489static const u8 e1000_emc_therm_limit[4] = { 2490 E1000_EMC_INTERNAL_THERM_LIMIT, 2491 E1000_EMC_DIODE1_THERM_LIMIT, 2492 E1000_EMC_DIODE2_THERM_LIMIT, 2493 E1000_EMC_DIODE3_THERM_LIMIT 2494}; 2495 2496/** 2497 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2498 * @hw: pointer to hardware structure 2499 * 2500 * Updates the temperatures in mac.thermal_sensor_data 2501 **/ 2502s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2503{ 2504 s32 status = E1000_SUCCESS; 2505 u16 ets_offset; 2506 u16 ets_cfg; 2507 u16 ets_sensor; 2508 u8 num_sensors; 2509 u8 sensor_index; 2510 u8 sensor_location; 2511 u8 i; 2512 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2513 2514 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2515 return E1000_NOT_IMPLEMENTED; 2516 2517 data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); 2518 2519 /* Return the internal sensor only if ETS is unsupported */ 2520 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2521 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2522 return status; 2523 2524 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2525 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2526 != NVM_ETS_TYPE_EMC) 2527 return E1000_NOT_IMPLEMENTED; 2528 2529 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2530 if (num_sensors > E1000_MAX_SENSORS) 2531 num_sensors = E1000_MAX_SENSORS; 2532 2533 for (i = 1; i < num_sensors; i++) { 2534 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2535 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2536 NVM_ETS_DATA_INDEX_SHIFT); 2537 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2538 NVM_ETS_DATA_LOC_SHIFT); 2539 2540 if (sensor_location != 0) 2541 hw->phy.ops.read_i2c_byte(hw, 2542 e1000_emc_temp_data[sensor_index], 2543 E1000_I2C_THERMAL_SENSOR_ADDR, 2544 &data->sensor[i].temp); 2545 } 2546 return status; 2547} 2548 2549/** 2550 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds 2551 * @hw: pointer to hardware structure 2552 * 2553 * Sets the thermal sensor thresholds according to the NVM map 2554 * and save off the threshold and location values into mac.thermal_sensor_data 2555 **/ 2556s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2557{ 2558 s32 status = E1000_SUCCESS; 2559 u16 ets_offset; 2560 u16 ets_cfg; 2561 u16 ets_sensor; 2562 u8 low_thresh_delta; 2563 u8 num_sensors; 2564 u8 sensor_index; 2565 u8 sensor_location; 2566 u8 therm_limit; 2567 u8 i; 2568 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2569 2570 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2571 return E1000_NOT_IMPLEMENTED; 2572 2573 memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); 2574 2575 data->sensor[0].location = 0x1; 2576 data->sensor[0].caution_thresh = 2577 (rd32(E1000_THHIGHTC) & 0xFF); 2578 data->sensor[0].max_op_thresh = 2579 (rd32(E1000_THLOWTC) & 0xFF); 2580 2581 /* Return the internal sensor only if ETS is unsupported */ 2582 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2583 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2584 return status; 2585 2586 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2587 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2588 != NVM_ETS_TYPE_EMC) 2589 return E1000_NOT_IMPLEMENTED; 2590 2591 low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> 2592 NVM_ETS_LTHRES_DELTA_SHIFT); 2593 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2594 2595 for (i = 1; i <= num_sensors; i++) { 2596 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2597 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2598 NVM_ETS_DATA_INDEX_SHIFT); 2599 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2600 NVM_ETS_DATA_LOC_SHIFT); 2601 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; 2602 2603 hw->phy.ops.write_i2c_byte(hw, 2604 e1000_emc_therm_limit[sensor_index], 2605 E1000_I2C_THERMAL_SENSOR_ADDR, 2606 therm_limit); 2607 2608 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { 2609 data->sensor[i].location = sensor_location; 2610 data->sensor[i].caution_thresh = therm_limit; 2611 data->sensor[i].max_op_thresh = therm_limit - 2612 low_thresh_delta; 2613 } 2614 } 2615 return status; 2616} 2617 2618static struct e1000_mac_operations e1000_mac_ops_82575 = { 2619 .init_hw = igb_init_hw_82575, 2620 .check_for_link = igb_check_for_link_82575, 2621 .rar_set = igb_rar_set, 2622 .read_mac_addr = igb_read_mac_addr_82575, 2623 .get_speed_and_duplex = igb_get_speed_and_duplex_copper, 2624#ifdef CONFIG_IGB_HWMON 2625 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, 2626 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, 2627#endif 2628}; 2629 2630static struct e1000_phy_operations e1000_phy_ops_82575 = { 2631 .acquire = igb_acquire_phy_82575, 2632 .get_cfg_done = igb_get_cfg_done_82575, 2633 .release = igb_release_phy_82575, 2634 .write_i2c_byte = igb_write_i2c_byte, 2635 .read_i2c_byte = igb_read_i2c_byte, 2636}; 2637 2638static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 2639 .acquire = igb_acquire_nvm_82575, 2640 .read = igb_read_nvm_eerd, 2641 .release = igb_release_nvm_82575, 2642 .write = igb_write_nvm_spi, 2643}; 2644 2645const struct e1000_info e1000_82575_info = { 2646 .get_invariants = igb_get_invariants_82575, 2647 .mac_ops = &e1000_mac_ops_82575, 2648 .phy_ops = &e1000_phy_ops_82575, 2649 .nvm_ops = &e1000_nvm_ops_82575, 2650}; 2651 2652