hw.c revision 74e13060f11dbf4028b810e34c359f64929415f3
1/* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17#include <linux/io.h> 18#include <linux/slab.h> 19#include <linux/module.h> 20#include <asm/unaligned.h> 21 22#include "hw.h" 23#include "hw-ops.h" 24#include "rc.h" 25#include "ar9003_mac.h" 26#include "ar9003_mci.h" 27#include "ar9003_phy.h" 28#include "debug.h" 29#include "ath9k.h" 30 31static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 32 33MODULE_AUTHOR("Atheros Communications"); 34MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 35MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 36MODULE_LICENSE("Dual BSD/GPL"); 37 38static int __init ath9k_init(void) 39{ 40 return 0; 41} 42module_init(ath9k_init); 43 44static void __exit ath9k_exit(void) 45{ 46 return; 47} 48module_exit(ath9k_exit); 49 50/* Private hardware callbacks */ 51 52static void ath9k_hw_init_cal_settings(struct ath_hw *ah) 53{ 54 ath9k_hw_private_ops(ah)->init_cal_settings(ah); 55} 56 57static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah, 58 struct ath9k_channel *chan) 59{ 60 return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan); 61} 62 63static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) 64{ 65 if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs) 66 return; 67 68 ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah); 69} 70 71static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah) 72{ 73 /* You will not have this callback if using the old ANI */ 74 if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs) 75 return; 76 77 ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah); 78} 79 80/********************/ 81/* Helper Functions */ 82/********************/ 83 84#ifdef CONFIG_ATH9K_DEBUGFS 85 86void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause) 87{ 88 struct ath_softc *sc = common->priv; 89 if (sync_cause) 90 sc->debug.stats.istats.sync_cause_all++; 91 if (sync_cause & AR_INTR_SYNC_RTC_IRQ) 92 sc->debug.stats.istats.sync_rtc_irq++; 93 if (sync_cause & AR_INTR_SYNC_MAC_IRQ) 94 sc->debug.stats.istats.sync_mac_irq++; 95 if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS) 96 sc->debug.stats.istats.eeprom_illegal_access++; 97 if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT) 98 sc->debug.stats.istats.apb_timeout++; 99 if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT) 100 sc->debug.stats.istats.pci_mode_conflict++; 101 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) 102 sc->debug.stats.istats.host1_fatal++; 103 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) 104 sc->debug.stats.istats.host1_perr++; 105 if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR) 106 sc->debug.stats.istats.trcv_fifo_perr++; 107 if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP) 108 sc->debug.stats.istats.radm_cpl_ep++; 109 if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT) 110 sc->debug.stats.istats.radm_cpl_dllp_abort++; 111 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT) 112 sc->debug.stats.istats.radm_cpl_tlp_abort++; 113 if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR) 114 sc->debug.stats.istats.radm_cpl_ecrc_err++; 115 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) 116 sc->debug.stats.istats.radm_cpl_timeout++; 117 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) 118 sc->debug.stats.istats.local_timeout++; 119 if (sync_cause & AR_INTR_SYNC_PM_ACCESS) 120 sc->debug.stats.istats.pm_access++; 121 if (sync_cause & AR_INTR_SYNC_MAC_AWAKE) 122 sc->debug.stats.istats.mac_awake++; 123 if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP) 124 sc->debug.stats.istats.mac_asleep++; 125 if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS) 126 sc->debug.stats.istats.mac_sleep_access++; 127} 128#endif 129 130 131static void ath9k_hw_set_clockrate(struct ath_hw *ah) 132{ 133 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 134 struct ath_common *common = ath9k_hw_common(ah); 135 unsigned int clockrate; 136 137 /* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */ 138 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) 139 clockrate = 117; 140 else if (!ah->curchan) /* should really check for CCK instead */ 141 clockrate = ATH9K_CLOCK_RATE_CCK; 142 else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ) 143 clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM; 144 else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK) 145 clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM; 146 else 147 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM; 148 149 if (conf_is_ht40(conf)) 150 clockrate *= 2; 151 152 if (ah->curchan) { 153 if (IS_CHAN_HALF_RATE(ah->curchan)) 154 clockrate /= 2; 155 if (IS_CHAN_QUARTER_RATE(ah->curchan)) 156 clockrate /= 4; 157 } 158 159 common->clockrate = clockrate; 160} 161 162static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) 163{ 164 struct ath_common *common = ath9k_hw_common(ah); 165 166 return usecs * common->clockrate; 167} 168 169bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) 170{ 171 int i; 172 173 BUG_ON(timeout < AH_TIME_QUANTUM); 174 175 for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) { 176 if ((REG_READ(ah, reg) & mask) == val) 177 return true; 178 179 udelay(AH_TIME_QUANTUM); 180 } 181 182 ath_dbg(ath9k_hw_common(ah), ANY, 183 "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", 184 timeout, reg, REG_READ(ah, reg), mask, val); 185 186 return false; 187} 188EXPORT_SYMBOL(ath9k_hw_wait); 189 190void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan, 191 int hw_delay) 192{ 193 if (IS_CHAN_B(chan)) 194 hw_delay = (4 * hw_delay) / 22; 195 else 196 hw_delay /= 10; 197 198 if (IS_CHAN_HALF_RATE(chan)) 199 hw_delay *= 2; 200 else if (IS_CHAN_QUARTER_RATE(chan)) 201 hw_delay *= 4; 202 203 udelay(hw_delay + BASE_ACTIVATE_DELAY); 204} 205 206void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array, 207 int column, unsigned int *writecnt) 208{ 209 int r; 210 211 ENABLE_REGWRITE_BUFFER(ah); 212 for (r = 0; r < array->ia_rows; r++) { 213 REG_WRITE(ah, INI_RA(array, r, 0), 214 INI_RA(array, r, column)); 215 DO_DELAY(*writecnt); 216 } 217 REGWRITE_BUFFER_FLUSH(ah); 218} 219 220u32 ath9k_hw_reverse_bits(u32 val, u32 n) 221{ 222 u32 retval; 223 int i; 224 225 for (i = 0, retval = 0; i < n; i++) { 226 retval = (retval << 1) | (val & 1); 227 val >>= 1; 228 } 229 return retval; 230} 231 232u16 ath9k_hw_computetxtime(struct ath_hw *ah, 233 u8 phy, int kbps, 234 u32 frameLen, u16 rateix, 235 bool shortPreamble) 236{ 237 u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime; 238 239 if (kbps == 0) 240 return 0; 241 242 switch (phy) { 243 case WLAN_RC_PHY_CCK: 244 phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS; 245 if (shortPreamble) 246 phyTime >>= 1; 247 numBits = frameLen << 3; 248 txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps); 249 break; 250 case WLAN_RC_PHY_OFDM: 251 if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) { 252 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; 253 numBits = OFDM_PLCP_BITS + (frameLen << 3); 254 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); 255 txTime = OFDM_SIFS_TIME_QUARTER 256 + OFDM_PREAMBLE_TIME_QUARTER 257 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); 258 } else if (ah->curchan && 259 IS_CHAN_HALF_RATE(ah->curchan)) { 260 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; 261 numBits = OFDM_PLCP_BITS + (frameLen << 3); 262 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); 263 txTime = OFDM_SIFS_TIME_HALF + 264 OFDM_PREAMBLE_TIME_HALF 265 + (numSymbols * OFDM_SYMBOL_TIME_HALF); 266 } else { 267 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000; 268 numBits = OFDM_PLCP_BITS + (frameLen << 3); 269 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); 270 txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME 271 + (numSymbols * OFDM_SYMBOL_TIME); 272 } 273 break; 274 default: 275 ath_err(ath9k_hw_common(ah), 276 "Unknown phy %u (rate ix %u)\n", phy, rateix); 277 txTime = 0; 278 break; 279 } 280 281 return txTime; 282} 283EXPORT_SYMBOL(ath9k_hw_computetxtime); 284 285void ath9k_hw_get_channel_centers(struct ath_hw *ah, 286 struct ath9k_channel *chan, 287 struct chan_centers *centers) 288{ 289 int8_t extoff; 290 291 if (!IS_CHAN_HT40(chan)) { 292 centers->ctl_center = centers->ext_center = 293 centers->synth_center = chan->channel; 294 return; 295 } 296 297 if ((chan->chanmode == CHANNEL_A_HT40PLUS) || 298 (chan->chanmode == CHANNEL_G_HT40PLUS)) { 299 centers->synth_center = 300 chan->channel + HT40_CHANNEL_CENTER_SHIFT; 301 extoff = 1; 302 } else { 303 centers->synth_center = 304 chan->channel - HT40_CHANNEL_CENTER_SHIFT; 305 extoff = -1; 306 } 307 308 centers->ctl_center = 309 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); 310 /* 25 MHz spacing is supported by hw but not on upper layers */ 311 centers->ext_center = 312 centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT); 313} 314 315/******************/ 316/* Chip Revisions */ 317/******************/ 318 319static void ath9k_hw_read_revisions(struct ath_hw *ah) 320{ 321 u32 val; 322 323 switch (ah->hw_version.devid) { 324 case AR5416_AR9100_DEVID: 325 ah->hw_version.macVersion = AR_SREV_VERSION_9100; 326 break; 327 case AR9300_DEVID_AR9330: 328 ah->hw_version.macVersion = AR_SREV_VERSION_9330; 329 if (ah->get_mac_revision) { 330 ah->hw_version.macRev = ah->get_mac_revision(); 331 } else { 332 val = REG_READ(ah, AR_SREV); 333 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); 334 } 335 return; 336 case AR9300_DEVID_AR9340: 337 ah->hw_version.macVersion = AR_SREV_VERSION_9340; 338 val = REG_READ(ah, AR_SREV); 339 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); 340 return; 341 case AR9300_DEVID_QCA955X: 342 ah->hw_version.macVersion = AR_SREV_VERSION_9550; 343 return; 344 } 345 346 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 347 348 if (val == 0xFF) { 349 val = REG_READ(ah, AR_SREV); 350 ah->hw_version.macVersion = 351 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; 352 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2); 353 354 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) 355 ah->is_pciexpress = true; 356 else 357 ah->is_pciexpress = (val & 358 AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; 359 } else { 360 if (!AR_SREV_9100(ah)) 361 ah->hw_version.macVersion = MS(val, AR_SREV_VERSION); 362 363 ah->hw_version.macRev = val & AR_SREV_REVISION; 364 365 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) 366 ah->is_pciexpress = true; 367 } 368} 369 370/************************************/ 371/* HW Attach, Detach, Init Routines */ 372/************************************/ 373 374static void ath9k_hw_disablepcie(struct ath_hw *ah) 375{ 376 if (!AR_SREV_5416(ah)) 377 return; 378 379 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); 380 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); 381 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); 382 REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824); 383 REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579); 384 REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000); 385 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); 386 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); 387 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); 388 389 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 390} 391 392/* This should work for all families including legacy */ 393static bool ath9k_hw_chip_test(struct ath_hw *ah) 394{ 395 struct ath_common *common = ath9k_hw_common(ah); 396 u32 regAddr[2] = { AR_STA_ID0 }; 397 u32 regHold[2]; 398 static const u32 patternData[4] = { 399 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999 400 }; 401 int i, j, loop_max; 402 403 if (!AR_SREV_9300_20_OR_LATER(ah)) { 404 loop_max = 2; 405 regAddr[1] = AR_PHY_BASE + (8 << 2); 406 } else 407 loop_max = 1; 408 409 for (i = 0; i < loop_max; i++) { 410 u32 addr = regAddr[i]; 411 u32 wrData, rdData; 412 413 regHold[i] = REG_READ(ah, addr); 414 for (j = 0; j < 0x100; j++) { 415 wrData = (j << 16) | j; 416 REG_WRITE(ah, addr, wrData); 417 rdData = REG_READ(ah, addr); 418 if (rdData != wrData) { 419 ath_err(common, 420 "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", 421 addr, wrData, rdData); 422 return false; 423 } 424 } 425 for (j = 0; j < 4; j++) { 426 wrData = patternData[j]; 427 REG_WRITE(ah, addr, wrData); 428 rdData = REG_READ(ah, addr); 429 if (wrData != rdData) { 430 ath_err(common, 431 "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", 432 addr, wrData, rdData); 433 return false; 434 } 435 } 436 REG_WRITE(ah, regAddr[i], regHold[i]); 437 } 438 udelay(100); 439 440 return true; 441} 442 443static void ath9k_hw_init_config(struct ath_hw *ah) 444{ 445 int i; 446 447 ah->config.dma_beacon_response_time = 1; 448 ah->config.sw_beacon_response_time = 6; 449 ah->config.additional_swba_backoff = 0; 450 ah->config.ack_6mb = 0x0; 451 ah->config.cwm_ignore_extcca = 0; 452 ah->config.pcie_clock_req = 0; 453 ah->config.pcie_waen = 0; 454 ah->config.analog_shiftreg = 1; 455 456 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 457 ah->config.spurchans[i][0] = AR_NO_SPUR; 458 ah->config.spurchans[i][1] = AR_NO_SPUR; 459 } 460 461 ah->config.rx_intr_mitigation = true; 462 ah->config.pcieSerDesWrite = true; 463 464 /* 465 * We need this for PCI devices only (Cardbus, PCI, miniPCI) 466 * _and_ if on non-uniprocessor systems (Multiprocessor/HT). 467 * This means we use it for all AR5416 devices, and the few 468 * minor PCI AR9280 devices out there. 469 * 470 * Serialization is required because these devices do not handle 471 * well the case of two concurrent reads/writes due to the latency 472 * involved. During one read/write another read/write can be issued 473 * on another CPU while the previous read/write may still be working 474 * on our hardware, if we hit this case the hardware poops in a loop. 475 * We prevent this by serializing reads and writes. 476 * 477 * This issue is not present on PCI-Express devices or pre-AR5416 478 * devices (legacy, 802.11abg). 479 */ 480 if (num_possible_cpus() > 1) 481 ah->config.serialize_regmode = SER_REG_MODE_AUTO; 482} 483 484static void ath9k_hw_init_defaults(struct ath_hw *ah) 485{ 486 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 487 488 regulatory->country_code = CTRY_DEFAULT; 489 regulatory->power_limit = MAX_RATE_POWER; 490 491 ah->hw_version.magic = AR5416_MAGIC; 492 ah->hw_version.subvendorid = 0; 493 494 ah->atim_window = 0; 495 ah->sta_id1_defaults = 496 AR_STA_ID1_CRPT_MIC_ENABLE | 497 AR_STA_ID1_MCAST_KSRCH; 498 if (AR_SREV_9100(ah)) 499 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; 500 ah->slottime = ATH9K_SLOT_TIME_9; 501 ah->globaltxtimeout = (u32) -1; 502 ah->power_mode = ATH9K_PM_UNDEFINED; 503 ah->htc_reset_init = true; 504} 505 506static int ath9k_hw_init_macaddr(struct ath_hw *ah) 507{ 508 struct ath_common *common = ath9k_hw_common(ah); 509 u32 sum; 510 int i; 511 u16 eeval; 512 static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW }; 513 514 sum = 0; 515 for (i = 0; i < 3; i++) { 516 eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]); 517 sum += eeval; 518 common->macaddr[2 * i] = eeval >> 8; 519 common->macaddr[2 * i + 1] = eeval & 0xff; 520 } 521 if (sum == 0 || sum == 0xffff * 3) 522 return -EADDRNOTAVAIL; 523 524 return 0; 525} 526 527static int ath9k_hw_post_init(struct ath_hw *ah) 528{ 529 struct ath_common *common = ath9k_hw_common(ah); 530 int ecode; 531 532 if (common->bus_ops->ath_bus_type != ATH_USB) { 533 if (!ath9k_hw_chip_test(ah)) 534 return -ENODEV; 535 } 536 537 if (!AR_SREV_9300_20_OR_LATER(ah)) { 538 ecode = ar9002_hw_rf_claim(ah); 539 if (ecode != 0) 540 return ecode; 541 } 542 543 ecode = ath9k_hw_eeprom_init(ah); 544 if (ecode != 0) 545 return ecode; 546 547 ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n", 548 ah->eep_ops->get_eeprom_ver(ah), 549 ah->eep_ops->get_eeprom_rev(ah)); 550 551 ath9k_hw_ani_init(ah); 552 553 return 0; 554} 555 556static int ath9k_hw_attach_ops(struct ath_hw *ah) 557{ 558 if (!AR_SREV_9300_20_OR_LATER(ah)) 559 return ar9002_hw_attach_ops(ah); 560 561 ar9003_hw_attach_ops(ah); 562 return 0; 563} 564 565/* Called for all hardware families */ 566static int __ath9k_hw_init(struct ath_hw *ah) 567{ 568 struct ath_common *common = ath9k_hw_common(ah); 569 int r = 0; 570 571 ath9k_hw_read_revisions(ah); 572 573 /* 574 * Read back AR_WA into a permanent copy and set bits 14 and 17. 575 * We need to do this to avoid RMW of this register. We cannot 576 * read the reg when chip is asleep. 577 */ 578 ah->WARegVal = REG_READ(ah, AR_WA); 579 ah->WARegVal |= (AR_WA_D3_L1_DISABLE | 580 AR_WA_ASPM_TIMER_BASED_DISABLE); 581 582 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 583 ath_err(common, "Couldn't reset chip\n"); 584 return -EIO; 585 } 586 587 if (AR_SREV_9462(ah)) 588 ah->WARegVal &= ~AR_WA_D3_L1_DISABLE; 589 590 if (AR_SREV_9565(ah)) { 591 ah->WARegVal |= AR_WA_BIT22; 592 REG_WRITE(ah, AR_WA, ah->WARegVal); 593 } 594 595 ath9k_hw_init_defaults(ah); 596 ath9k_hw_init_config(ah); 597 598 r = ath9k_hw_attach_ops(ah); 599 if (r) 600 return r; 601 602 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 603 ath_err(common, "Couldn't wakeup chip\n"); 604 return -EIO; 605 } 606 607 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) { 608 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || 609 ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) && 610 !ah->is_pciexpress)) { 611 ah->config.serialize_regmode = 612 SER_REG_MODE_ON; 613 } else { 614 ah->config.serialize_regmode = 615 SER_REG_MODE_OFF; 616 } 617 } 618 619 ath_dbg(common, RESET, "serialize_regmode is %d\n", 620 ah->config.serialize_regmode); 621 622 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) 623 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1; 624 else 625 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; 626 627 switch (ah->hw_version.macVersion) { 628 case AR_SREV_VERSION_5416_PCI: 629 case AR_SREV_VERSION_5416_PCIE: 630 case AR_SREV_VERSION_9160: 631 case AR_SREV_VERSION_9100: 632 case AR_SREV_VERSION_9280: 633 case AR_SREV_VERSION_9285: 634 case AR_SREV_VERSION_9287: 635 case AR_SREV_VERSION_9271: 636 case AR_SREV_VERSION_9300: 637 case AR_SREV_VERSION_9330: 638 case AR_SREV_VERSION_9485: 639 case AR_SREV_VERSION_9340: 640 case AR_SREV_VERSION_9462: 641 case AR_SREV_VERSION_9550: 642 case AR_SREV_VERSION_9565: 643 break; 644 default: 645 ath_err(common, 646 "Mac Chip Rev 0x%02x.%x is not supported by this driver\n", 647 ah->hw_version.macVersion, ah->hw_version.macRev); 648 return -EOPNOTSUPP; 649 } 650 651 if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) || 652 AR_SREV_9330(ah) || AR_SREV_9550(ah)) 653 ah->is_pciexpress = false; 654 655 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); 656 ath9k_hw_init_cal_settings(ah); 657 658 ah->ani_function = ATH9K_ANI_ALL; 659 if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah)) 660 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; 661 if (!AR_SREV_9300_20_OR_LATER(ah)) 662 ah->ani_function &= ~ATH9K_ANI_MRC_CCK; 663 664 if (!ah->is_pciexpress) 665 ath9k_hw_disablepcie(ah); 666 667 r = ath9k_hw_post_init(ah); 668 if (r) 669 return r; 670 671 ath9k_hw_init_mode_gain_regs(ah); 672 r = ath9k_hw_fill_cap_info(ah); 673 if (r) 674 return r; 675 676 r = ath9k_hw_init_macaddr(ah); 677 if (r) { 678 ath_err(common, "Failed to initialize MAC address\n"); 679 return r; 680 } 681 682 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) 683 ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S); 684 else 685 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); 686 687 if (AR_SREV_9330(ah)) 688 ah->bb_watchdog_timeout_ms = 85; 689 else 690 ah->bb_watchdog_timeout_ms = 25; 691 692 common->state = ATH_HW_INITIALIZED; 693 694 return 0; 695} 696 697int ath9k_hw_init(struct ath_hw *ah) 698{ 699 int ret; 700 struct ath_common *common = ath9k_hw_common(ah); 701 702 /* These are all the AR5008/AR9001/AR9002/AR9003 hardware family of chipsets */ 703 switch (ah->hw_version.devid) { 704 case AR5416_DEVID_PCI: 705 case AR5416_DEVID_PCIE: 706 case AR5416_AR9100_DEVID: 707 case AR9160_DEVID_PCI: 708 case AR9280_DEVID_PCI: 709 case AR9280_DEVID_PCIE: 710 case AR9285_DEVID_PCIE: 711 case AR9287_DEVID_PCI: 712 case AR9287_DEVID_PCIE: 713 case AR2427_DEVID_PCIE: 714 case AR9300_DEVID_PCIE: 715 case AR9300_DEVID_AR9485_PCIE: 716 case AR9300_DEVID_AR9330: 717 case AR9300_DEVID_AR9340: 718 case AR9300_DEVID_QCA955X: 719 case AR9300_DEVID_AR9580: 720 case AR9300_DEVID_AR9462: 721 case AR9485_DEVID_AR1111: 722 case AR9300_DEVID_AR9565: 723 break; 724 default: 725 if (common->bus_ops->ath_bus_type == ATH_USB) 726 break; 727 ath_err(common, "Hardware device ID 0x%04x not supported\n", 728 ah->hw_version.devid); 729 return -EOPNOTSUPP; 730 } 731 732 ret = __ath9k_hw_init(ah); 733 if (ret) { 734 ath_err(common, 735 "Unable to initialize hardware; initialization status: %d\n", 736 ret); 737 return ret; 738 } 739 740 return 0; 741} 742EXPORT_SYMBOL(ath9k_hw_init); 743 744static void ath9k_hw_init_qos(struct ath_hw *ah) 745{ 746 ENABLE_REGWRITE_BUFFER(ah); 747 748 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); 749 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); 750 751 REG_WRITE(ah, AR_QOS_NO_ACK, 752 SM(2, AR_QOS_NO_ACK_TWO_BIT) | 753 SM(5, AR_QOS_NO_ACK_BIT_OFF) | 754 SM(0, AR_QOS_NO_ACK_BYTE_OFF)); 755 756 REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL); 757 REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF); 758 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF); 759 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF); 760 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); 761 762 REGWRITE_BUFFER_FLUSH(ah); 763} 764 765u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) 766{ 767 struct ath_common *common = ath9k_hw_common(ah); 768 int i = 0; 769 770 REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); 771 udelay(100); 772 REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); 773 774 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) { 775 776 udelay(100); 777 778 if (WARN_ON_ONCE(i >= 100)) { 779 ath_err(common, "PLL4 meaurement not done\n"); 780 break; 781 } 782 783 i++; 784 } 785 786 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; 787} 788EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); 789 790static void ath9k_hw_init_pll(struct ath_hw *ah, 791 struct ath9k_channel *chan) 792{ 793 u32 pll; 794 795 if (AR_SREV_9485(ah) || AR_SREV_9565(ah)) { 796 /* program BB PLL ki and kd value, ki=0x4, kd=0x40 */ 797 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 798 AR_CH0_BB_DPLL2_PLL_PWD, 0x1); 799 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 800 AR_CH0_DPLL2_KD, 0x40); 801 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 802 AR_CH0_DPLL2_KI, 0x4); 803 804 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, 805 AR_CH0_BB_DPLL1_REFDIV, 0x5); 806 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, 807 AR_CH0_BB_DPLL1_NINI, 0x58); 808 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1, 809 AR_CH0_BB_DPLL1_NFRAC, 0x0); 810 811 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 812 AR_CH0_BB_DPLL2_OUTDIV, 0x1); 813 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 814 AR_CH0_BB_DPLL2_LOCAL_PLL, 0x1); 815 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 816 AR_CH0_BB_DPLL2_EN_NEGTRIG, 0x1); 817 818 /* program BB PLL phase_shift to 0x6 */ 819 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, 820 AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x6); 821 822 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, 823 AR_CH0_BB_DPLL2_PLL_PWD, 0x0); 824 udelay(1000); 825 } else if (AR_SREV_9330(ah)) { 826 u32 ddr_dpll2, pll_control2, kd; 827 828 if (ah->is_clk_25mhz) { 829 ddr_dpll2 = 0x18e82f01; 830 pll_control2 = 0xe04a3d; 831 kd = 0x1d; 832 } else { 833 ddr_dpll2 = 0x19e82f01; 834 pll_control2 = 0x886666; 835 kd = 0x3d; 836 } 837 838 /* program DDR PLL ki and kd value */ 839 REG_WRITE(ah, AR_CH0_DDR_DPLL2, ddr_dpll2); 840 841 /* program DDR PLL phase_shift */ 842 REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3, 843 AR_CH0_DPLL3_PHASE_SHIFT, 0x1); 844 845 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); 846 udelay(1000); 847 848 /* program refdiv, nint, frac to RTC register */ 849 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, pll_control2); 850 851 /* program BB PLL kd and ki value */ 852 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, kd); 853 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x06); 854 855 /* program BB PLL phase_shift */ 856 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, 857 AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1); 858 } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { 859 u32 regval, pll2_divint, pll2_divfrac, refdiv; 860 861 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); 862 udelay(1000); 863 864 REG_SET_BIT(ah, AR_PHY_PLL_MODE, 0x1 << 16); 865 udelay(100); 866 867 if (ah->is_clk_25mhz) { 868 pll2_divint = 0x54; 869 pll2_divfrac = 0x1eb85; 870 refdiv = 3; 871 } else { 872 if (AR_SREV_9340(ah)) { 873 pll2_divint = 88; 874 pll2_divfrac = 0; 875 refdiv = 5; 876 } else { 877 pll2_divint = 0x11; 878 pll2_divfrac = 0x26666; 879 refdiv = 1; 880 } 881 } 882 883 regval = REG_READ(ah, AR_PHY_PLL_MODE); 884 regval |= (0x1 << 16); 885 REG_WRITE(ah, AR_PHY_PLL_MODE, regval); 886 udelay(100); 887 888 REG_WRITE(ah, AR_PHY_PLL_CONTROL, (refdiv << 27) | 889 (pll2_divint << 18) | pll2_divfrac); 890 udelay(100); 891 892 regval = REG_READ(ah, AR_PHY_PLL_MODE); 893 if (AR_SREV_9340(ah)) 894 regval = (regval & 0x80071fff) | (0x1 << 30) | 895 (0x1 << 13) | (0x4 << 26) | (0x18 << 19); 896 else 897 regval = (regval & 0x80071fff) | (0x3 << 30) | 898 (0x1 << 13) | (0x4 << 26) | (0x60 << 19); 899 REG_WRITE(ah, AR_PHY_PLL_MODE, regval); 900 REG_WRITE(ah, AR_PHY_PLL_MODE, 901 REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff); 902 udelay(1000); 903 } 904 905 pll = ath9k_hw_compute_pll_control(ah, chan); 906 if (AR_SREV_9565(ah)) 907 pll |= 0x40000; 908 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 909 910 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) || 911 AR_SREV_9550(ah)) 912 udelay(1000); 913 914 /* Switch the core clock for ar9271 to 117Mhz */ 915 if (AR_SREV_9271(ah)) { 916 udelay(500); 917 REG_WRITE(ah, 0x50040, 0x304); 918 } 919 920 udelay(RTC_PLL_SETTLE_DELAY); 921 922 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 923 924 if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { 925 if (ah->is_clk_25mhz) { 926 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); 927 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); 928 REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); 929 } else { 930 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); 931 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); 932 REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); 933 } 934 udelay(100); 935 } 936} 937 938static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, 939 enum nl80211_iftype opmode) 940{ 941 u32 sync_default = AR_INTR_SYNC_DEFAULT; 942 u32 imr_reg = AR_IMR_TXERR | 943 AR_IMR_TXURN | 944 AR_IMR_RXERR | 945 AR_IMR_RXORN | 946 AR_IMR_BCNMISC; 947 948 if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) 949 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 950 951 if (AR_SREV_9300_20_OR_LATER(ah)) { 952 imr_reg |= AR_IMR_RXOK_HP; 953 if (ah->config.rx_intr_mitigation) 954 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 955 else 956 imr_reg |= AR_IMR_RXOK_LP; 957 958 } else { 959 if (ah->config.rx_intr_mitigation) 960 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 961 else 962 imr_reg |= AR_IMR_RXOK; 963 } 964 965 if (ah->config.tx_intr_mitigation) 966 imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR; 967 else 968 imr_reg |= AR_IMR_TXOK; 969 970 ENABLE_REGWRITE_BUFFER(ah); 971 972 REG_WRITE(ah, AR_IMR, imr_reg); 973 ah->imrs2_reg |= AR_IMR_S2_GTT; 974 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 975 976 if (!AR_SREV_9100(ah)) { 977 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); 978 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); 979 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); 980 } 981 982 REGWRITE_BUFFER_FLUSH(ah); 983 984 if (AR_SREV_9300_20_OR_LATER(ah)) { 985 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0); 986 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0); 987 REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0); 988 REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0); 989 } 990} 991 992static void ath9k_hw_set_sifs_time(struct ath_hw *ah, u32 us) 993{ 994 u32 val = ath9k_hw_mac_to_clks(ah, us - 2); 995 val = min(val, (u32) 0xFFFF); 996 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, val); 997} 998 999static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) 1000{ 1001 u32 val = ath9k_hw_mac_to_clks(ah, us); 1002 val = min(val, (u32) 0xFFFF); 1003 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val); 1004} 1005 1006static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) 1007{ 1008 u32 val = ath9k_hw_mac_to_clks(ah, us); 1009 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK)); 1010 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val); 1011} 1012 1013static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) 1014{ 1015 u32 val = ath9k_hw_mac_to_clks(ah, us); 1016 val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS)); 1017 REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val); 1018} 1019 1020static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) 1021{ 1022 if (tu > 0xFFFF) { 1023 ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n", 1024 tu); 1025 ah->globaltxtimeout = (u32) -1; 1026 return false; 1027 } else { 1028 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu); 1029 ah->globaltxtimeout = tu; 1030 return true; 1031 } 1032} 1033 1034void ath9k_hw_init_global_settings(struct ath_hw *ah) 1035{ 1036 struct ath_common *common = ath9k_hw_common(ah); 1037 struct ieee80211_conf *conf = &common->hw->conf; 1038 const struct ath9k_channel *chan = ah->curchan; 1039 int acktimeout, ctstimeout, ack_offset = 0; 1040 int slottime; 1041 int sifstime; 1042 int rx_lat = 0, tx_lat = 0, eifs = 0; 1043 u32 reg; 1044 1045 ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n", 1046 ah->misc_mode); 1047 1048 if (!chan) 1049 return; 1050 1051 if (ah->misc_mode != 0) 1052 REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode); 1053 1054 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1055 rx_lat = 41; 1056 else 1057 rx_lat = 37; 1058 tx_lat = 54; 1059 1060 if (IS_CHAN_5GHZ(chan)) 1061 sifstime = 16; 1062 else 1063 sifstime = 10; 1064 1065 if (IS_CHAN_HALF_RATE(chan)) { 1066 eifs = 175; 1067 rx_lat *= 2; 1068 tx_lat *= 2; 1069 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1070 tx_lat += 11; 1071 1072 sifstime *= 2; 1073 ack_offset = 16; 1074 slottime = 13; 1075 } else if (IS_CHAN_QUARTER_RATE(chan)) { 1076 eifs = 340; 1077 rx_lat = (rx_lat * 4) - 1; 1078 tx_lat *= 4; 1079 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1080 tx_lat += 22; 1081 1082 sifstime *= 4; 1083 ack_offset = 32; 1084 slottime = 21; 1085 } else { 1086 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { 1087 eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO; 1088 reg = AR_USEC_ASYNC_FIFO; 1089 } else { 1090 eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/ 1091 common->clockrate; 1092 reg = REG_READ(ah, AR_USEC); 1093 } 1094 rx_lat = MS(reg, AR_USEC_RX_LAT); 1095 tx_lat = MS(reg, AR_USEC_TX_LAT); 1096 1097 slottime = ah->slottime; 1098 } 1099 1100 /* As defined by IEEE 802.11-2007 17.3.8.6 */ 1101 slottime += 3 * ah->coverage_class; 1102 acktimeout = slottime + sifstime + ack_offset; 1103 ctstimeout = acktimeout; 1104 1105 /* 1106 * Workaround for early ACK timeouts, add an offset to match the 1107 * initval's 64us ack timeout value. Use 48us for the CTS timeout. 1108 * This was initially only meant to work around an issue with delayed 1109 * BA frames in some implementations, but it has been found to fix ACK 1110 * timeout issues in other cases as well. 1111 */ 1112 if (conf->chandef.chan && 1113 conf->chandef.chan->band == IEEE80211_BAND_2GHZ && 1114 !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) { 1115 acktimeout += 64 - sifstime - ah->slottime; 1116 ctstimeout += 48 - sifstime - ah->slottime; 1117 } 1118 1119 1120 ath9k_hw_set_sifs_time(ah, sifstime); 1121 ath9k_hw_setslottime(ah, slottime); 1122 ath9k_hw_set_ack_timeout(ah, acktimeout); 1123 ath9k_hw_set_cts_timeout(ah, ctstimeout); 1124 if (ah->globaltxtimeout != (u32) -1) 1125 ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout); 1126 1127 REG_WRITE(ah, AR_D_GBL_IFS_EIFS, ath9k_hw_mac_to_clks(ah, eifs)); 1128 REG_RMW(ah, AR_USEC, 1129 (common->clockrate - 1) | 1130 SM(rx_lat, AR_USEC_RX_LAT) | 1131 SM(tx_lat, AR_USEC_TX_LAT), 1132 AR_USEC_TX_LAT | AR_USEC_RX_LAT | AR_USEC_USEC); 1133 1134} 1135EXPORT_SYMBOL(ath9k_hw_init_global_settings); 1136 1137void ath9k_hw_deinit(struct ath_hw *ah) 1138{ 1139 struct ath_common *common = ath9k_hw_common(ah); 1140 1141 if (common->state < ATH_HW_INITIALIZED) 1142 return; 1143 1144 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 1145} 1146EXPORT_SYMBOL(ath9k_hw_deinit); 1147 1148/*******/ 1149/* INI */ 1150/*******/ 1151 1152u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan) 1153{ 1154 u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); 1155 1156 if (IS_CHAN_B(chan)) 1157 ctl |= CTL_11B; 1158 else if (IS_CHAN_G(chan)) 1159 ctl |= CTL_11G; 1160 else 1161 ctl |= CTL_11A; 1162 1163 return ctl; 1164} 1165 1166/****************************************/ 1167/* Reset and Channel Switching Routines */ 1168/****************************************/ 1169 1170static inline void ath9k_hw_set_dma(struct ath_hw *ah) 1171{ 1172 struct ath_common *common = ath9k_hw_common(ah); 1173 int txbuf_size; 1174 1175 ENABLE_REGWRITE_BUFFER(ah); 1176 1177 /* 1178 * set AHB_MODE not to do cacheline prefetches 1179 */ 1180 if (!AR_SREV_9300_20_OR_LATER(ah)) 1181 REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN); 1182 1183 /* 1184 * let mac dma reads be in 128 byte chunks 1185 */ 1186 REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK); 1187 1188 REGWRITE_BUFFER_FLUSH(ah); 1189 1190 /* 1191 * Restore TX Trigger Level to its pre-reset value. 1192 * The initial value depends on whether aggregation is enabled, and is 1193 * adjusted whenever underruns are detected. 1194 */ 1195 if (!AR_SREV_9300_20_OR_LATER(ah)) 1196 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level); 1197 1198 ENABLE_REGWRITE_BUFFER(ah); 1199 1200 /* 1201 * let mac dma writes be in 128 byte chunks 1202 */ 1203 REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK); 1204 1205 /* 1206 * Setup receive FIFO threshold to hold off TX activities 1207 */ 1208 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); 1209 1210 if (AR_SREV_9300_20_OR_LATER(ah)) { 1211 REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1); 1212 REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1); 1213 1214 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 1215 ah->caps.rx_status_len); 1216 } 1217 1218 /* 1219 * reduce the number of usable entries in PCU TXBUF to avoid 1220 * wrap around issues. 1221 */ 1222 if (AR_SREV_9285(ah)) { 1223 /* For AR9285 the number of Fifos are reduced to half. 1224 * So set the usable tx buf size also to half to 1225 * avoid data/delimiter underruns 1226 */ 1227 txbuf_size = AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE; 1228 } else if (AR_SREV_9340_13_OR_LATER(ah)) { 1229 /* Uses fewer entries for AR934x v1.3+ to prevent rx overruns */ 1230 txbuf_size = AR_9340_PCU_TXBUF_CTRL_USABLE_SIZE; 1231 } else { 1232 txbuf_size = AR_PCU_TXBUF_CTRL_USABLE_SIZE; 1233 } 1234 1235 if (!AR_SREV_9271(ah)) 1236 REG_WRITE(ah, AR_PCU_TXBUF_CTRL, txbuf_size); 1237 1238 REGWRITE_BUFFER_FLUSH(ah); 1239 1240 if (AR_SREV_9300_20_OR_LATER(ah)) 1241 ath9k_hw_reset_txstatus_ring(ah); 1242} 1243 1244static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) 1245{ 1246 u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC; 1247 u32 set = AR_STA_ID1_KSRCH_MODE; 1248 1249 switch (opmode) { 1250 case NL80211_IFTYPE_ADHOC: 1251 set |= AR_STA_ID1_ADHOC; 1252 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 1253 break; 1254 case NL80211_IFTYPE_MESH_POINT: 1255 case NL80211_IFTYPE_AP: 1256 set |= AR_STA_ID1_STA_AP; 1257 /* fall through */ 1258 case NL80211_IFTYPE_STATION: 1259 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 1260 break; 1261 default: 1262 if (!ah->is_monitoring) 1263 set = 0; 1264 break; 1265 } 1266 REG_RMW(ah, AR_STA_ID1, set, mask); 1267} 1268 1269void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, 1270 u32 *coef_mantissa, u32 *coef_exponent) 1271{ 1272 u32 coef_exp, coef_man; 1273 1274 for (coef_exp = 31; coef_exp > 0; coef_exp--) 1275 if ((coef_scaled >> coef_exp) & 0x1) 1276 break; 1277 1278 coef_exp = 14 - (coef_exp - COEF_SCALE_S); 1279 1280 coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1)); 1281 1282 *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp); 1283 *coef_exponent = coef_exp - 16; 1284} 1285 1286static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) 1287{ 1288 u32 rst_flags; 1289 u32 tmpReg; 1290 1291 if (AR_SREV_9100(ah)) { 1292 REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK, 1293 AR_RTC_DERIVED_CLK_PERIOD, 1); 1294 (void)REG_READ(ah, AR_RTC_DERIVED_CLK); 1295 } 1296 1297 ENABLE_REGWRITE_BUFFER(ah); 1298 1299 if (AR_SREV_9300_20_OR_LATER(ah)) { 1300 REG_WRITE(ah, AR_WA, ah->WARegVal); 1301 udelay(10); 1302 } 1303 1304 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1305 AR_RTC_FORCE_WAKE_ON_INT); 1306 1307 if (AR_SREV_9100(ah)) { 1308 rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD | 1309 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; 1310 } else { 1311 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); 1312 if (AR_SREV_9340(ah)) 1313 tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT; 1314 else 1315 tmpReg &= AR_INTR_SYNC_LOCAL_TIMEOUT | 1316 AR_INTR_SYNC_RADM_CPL_TIMEOUT; 1317 1318 if (tmpReg) { 1319 u32 val; 1320 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 1321 1322 val = AR_RC_HOSTIF; 1323 if (!AR_SREV_9300_20_OR_LATER(ah)) 1324 val |= AR_RC_AHB; 1325 REG_WRITE(ah, AR_RC, val); 1326 1327 } else if (!AR_SREV_9300_20_OR_LATER(ah)) 1328 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1329 1330 rst_flags = AR_RTC_RC_MAC_WARM; 1331 if (type == ATH9K_RESET_COLD) 1332 rst_flags |= AR_RTC_RC_MAC_COLD; 1333 } 1334 1335 if (AR_SREV_9330(ah)) { 1336 int npend = 0; 1337 int i; 1338 1339 /* AR9330 WAR: 1340 * call external reset function to reset WMAC if: 1341 * - doing a cold reset 1342 * - we have pending frames in the TX queues 1343 */ 1344 1345 for (i = 0; i < AR_NUM_QCU; i++) { 1346 npend = ath9k_hw_numtxpending(ah, i); 1347 if (npend) 1348 break; 1349 } 1350 1351 if (ah->external_reset && 1352 (npend || type == ATH9K_RESET_COLD)) { 1353 int reset_err = 0; 1354 1355 ath_dbg(ath9k_hw_common(ah), RESET, 1356 "reset MAC via external reset\n"); 1357 1358 reset_err = ah->external_reset(); 1359 if (reset_err) { 1360 ath_err(ath9k_hw_common(ah), 1361 "External reset failed, err=%d\n", 1362 reset_err); 1363 return false; 1364 } 1365 1366 REG_WRITE(ah, AR_RTC_RESET, 1); 1367 } 1368 } 1369 1370 if (ath9k_hw_mci_is_enabled(ah)) 1371 ar9003_mci_check_gpm_offset(ah); 1372 1373 REG_WRITE(ah, AR_RTC_RC, rst_flags); 1374 1375 REGWRITE_BUFFER_FLUSH(ah); 1376 1377 udelay(50); 1378 1379 REG_WRITE(ah, AR_RTC_RC, 0); 1380 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { 1381 ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n"); 1382 return false; 1383 } 1384 1385 if (!AR_SREV_9100(ah)) 1386 REG_WRITE(ah, AR_RC, 0); 1387 1388 if (AR_SREV_9100(ah)) 1389 udelay(50); 1390 1391 return true; 1392} 1393 1394static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) 1395{ 1396 ENABLE_REGWRITE_BUFFER(ah); 1397 1398 if (AR_SREV_9300_20_OR_LATER(ah)) { 1399 REG_WRITE(ah, AR_WA, ah->WARegVal); 1400 udelay(10); 1401 } 1402 1403 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1404 AR_RTC_FORCE_WAKE_ON_INT); 1405 1406 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) 1407 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1408 1409 REG_WRITE(ah, AR_RTC_RESET, 0); 1410 1411 REGWRITE_BUFFER_FLUSH(ah); 1412 1413 if (!AR_SREV_9300_20_OR_LATER(ah)) 1414 udelay(2); 1415 1416 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) 1417 REG_WRITE(ah, AR_RC, 0); 1418 1419 REG_WRITE(ah, AR_RTC_RESET, 1); 1420 1421 if (!ath9k_hw_wait(ah, 1422 AR_RTC_STATUS, 1423 AR_RTC_STATUS_M, 1424 AR_RTC_STATUS_ON, 1425 AH_WAIT_TIMEOUT)) { 1426 ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n"); 1427 return false; 1428 } 1429 1430 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); 1431} 1432 1433static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) 1434{ 1435 bool ret = false; 1436 1437 if (AR_SREV_9300_20_OR_LATER(ah)) { 1438 REG_WRITE(ah, AR_WA, ah->WARegVal); 1439 udelay(10); 1440 } 1441 1442 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 1443 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); 1444 1445 if (!ah->reset_power_on) 1446 type = ATH9K_RESET_POWER_ON; 1447 1448 switch (type) { 1449 case ATH9K_RESET_POWER_ON: 1450 ret = ath9k_hw_set_reset_power_on(ah); 1451 if (ret) 1452 ah->reset_power_on = true; 1453 break; 1454 case ATH9K_RESET_WARM: 1455 case ATH9K_RESET_COLD: 1456 ret = ath9k_hw_set_reset(ah, type); 1457 break; 1458 default: 1459 break; 1460 } 1461 1462 return ret; 1463} 1464 1465static bool ath9k_hw_chip_reset(struct ath_hw *ah, 1466 struct ath9k_channel *chan) 1467{ 1468 int reset_type = ATH9K_RESET_WARM; 1469 1470 if (AR_SREV_9280(ah)) { 1471 if (ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) 1472 reset_type = ATH9K_RESET_POWER_ON; 1473 else 1474 reset_type = ATH9K_RESET_COLD; 1475 } else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) || 1476 (REG_READ(ah, AR_CR) & AR_CR_RXE)) 1477 reset_type = ATH9K_RESET_COLD; 1478 1479 if (!ath9k_hw_set_reset_reg(ah, reset_type)) 1480 return false; 1481 1482 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1483 return false; 1484 1485 ah->chip_fullsleep = false; 1486 1487 if (AR_SREV_9330(ah)) 1488 ar9003_hw_internal_regulator_apply(ah); 1489 ath9k_hw_init_pll(ah, chan); 1490 ath9k_hw_set_rfmode(ah, chan); 1491 1492 return true; 1493} 1494 1495static bool ath9k_hw_channel_change(struct ath_hw *ah, 1496 struct ath9k_channel *chan) 1497{ 1498 struct ath_common *common = ath9k_hw_common(ah); 1499 u32 qnum; 1500 int r; 1501 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1502 bool band_switch, mode_diff; 1503 u8 ini_reloaded; 1504 1505 band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) != 1506 (ah->curchan->channelFlags & (CHANNEL_2GHZ | 1507 CHANNEL_5GHZ)); 1508 mode_diff = (chan->chanmode != ah->curchan->chanmode); 1509 1510 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1511 if (ath9k_hw_numtxpending(ah, qnum)) { 1512 ath_dbg(common, QUEUE, 1513 "Transmit frames pending on queue %d\n", qnum); 1514 return false; 1515 } 1516 } 1517 1518 if (!ath9k_hw_rfbus_req(ah)) { 1519 ath_err(common, "Could not kill baseband RX\n"); 1520 return false; 1521 } 1522 1523 if (edma && (band_switch || mode_diff)) { 1524 ath9k_hw_mark_phy_inactive(ah); 1525 udelay(5); 1526 1527 ath9k_hw_init_pll(ah, NULL); 1528 1529 if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) { 1530 ath_err(common, "Failed to do fast channel change\n"); 1531 return false; 1532 } 1533 } 1534 1535 ath9k_hw_set_channel_regs(ah, chan); 1536 1537 r = ath9k_hw_rf_set_freq(ah, chan); 1538 if (r) { 1539 ath_err(common, "Failed to set channel\n"); 1540 return false; 1541 } 1542 ath9k_hw_set_clockrate(ah); 1543 ath9k_hw_apply_txpower(ah, chan, false); 1544 ath9k_hw_rfbus_done(ah); 1545 1546 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1547 ath9k_hw_set_delta_slope(ah, chan); 1548 1549 ath9k_hw_spur_mitigate_freq(ah, chan); 1550 1551 if (edma && (band_switch || mode_diff)) { 1552 ah->ah_flags |= AH_FASTCC; 1553 if (band_switch || ini_reloaded) 1554 ah->eep_ops->set_board_values(ah, chan); 1555 1556 ath9k_hw_init_bb(ah, chan); 1557 1558 if (band_switch || ini_reloaded) 1559 ath9k_hw_init_cal(ah, chan); 1560 ah->ah_flags &= ~AH_FASTCC; 1561 } 1562 1563 return true; 1564} 1565 1566static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) 1567{ 1568 u32 gpio_mask = ah->gpio_mask; 1569 int i; 1570 1571 for (i = 0; gpio_mask; i++, gpio_mask >>= 1) { 1572 if (!(gpio_mask & 1)) 1573 continue; 1574 1575 ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 1576 ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); 1577 } 1578} 1579 1580static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states, 1581 int *hang_state, int *hang_pos) 1582{ 1583 static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */ 1584 u32 chain_state, dcs_pos, i; 1585 1586 for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) { 1587 chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f; 1588 for (i = 0; i < 3; i++) { 1589 if (chain_state == dcu_chain_state[i]) { 1590 *hang_state = chain_state; 1591 *hang_pos = dcs_pos; 1592 return true; 1593 } 1594 } 1595 } 1596 return false; 1597} 1598 1599#define DCU_COMPLETE_STATE 1 1600#define DCU_COMPLETE_STATE_MASK 0x3 1601#define NUM_STATUS_READS 50 1602static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah) 1603{ 1604 u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4; 1605 u32 i, hang_pos, hang_state, num_state = 6; 1606 1607 comp_state = REG_READ(ah, AR_DMADBG_6); 1608 1609 if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) { 1610 ath_dbg(ath9k_hw_common(ah), RESET, 1611 "MAC Hang signature not found at DCU complete\n"); 1612 return false; 1613 } 1614 1615 chain_state = REG_READ(ah, dcs_reg); 1616 if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos)) 1617 goto hang_check_iter; 1618 1619 dcs_reg = AR_DMADBG_5; 1620 num_state = 4; 1621 chain_state = REG_READ(ah, dcs_reg); 1622 if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos)) 1623 goto hang_check_iter; 1624 1625 ath_dbg(ath9k_hw_common(ah), RESET, 1626 "MAC Hang signature 1 not found\n"); 1627 return false; 1628 1629hang_check_iter: 1630 ath_dbg(ath9k_hw_common(ah), RESET, 1631 "DCU registers: chain %08x complete %08x Hang: state %d pos %d\n", 1632 chain_state, comp_state, hang_state, hang_pos); 1633 1634 for (i = 0; i < NUM_STATUS_READS; i++) { 1635 chain_state = REG_READ(ah, dcs_reg); 1636 chain_state = (chain_state >> (5 * hang_pos)) & 0x1f; 1637 comp_state = REG_READ(ah, AR_DMADBG_6); 1638 1639 if (((comp_state & DCU_COMPLETE_STATE_MASK) != 1640 DCU_COMPLETE_STATE) || 1641 (chain_state != hang_state)) 1642 return false; 1643 } 1644 1645 ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n"); 1646 1647 return true; 1648} 1649 1650bool ath9k_hw_check_alive(struct ath_hw *ah) 1651{ 1652 int count = 50; 1653 u32 reg; 1654 1655 if (AR_SREV_9300(ah)) 1656 return !ath9k_hw_detect_mac_hang(ah); 1657 1658 if (AR_SREV_9285_12_OR_LATER(ah)) 1659 return true; 1660 1661 do { 1662 reg = REG_READ(ah, AR_OBS_BUS_1); 1663 1664 if ((reg & 0x7E7FFFEF) == 0x00702400) 1665 continue; 1666 1667 switch (reg & 0x7E000B00) { 1668 case 0x1E000000: 1669 case 0x52000B00: 1670 case 0x18000B00: 1671 continue; 1672 default: 1673 return true; 1674 } 1675 } while (count-- > 0); 1676 1677 return false; 1678} 1679EXPORT_SYMBOL(ath9k_hw_check_alive); 1680 1681static void ath9k_hw_init_mfp(struct ath_hw *ah) 1682{ 1683 /* Setup MFP options for CCMP */ 1684 if (AR_SREV_9280_20_OR_LATER(ah)) { 1685 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt 1686 * frames when constructing CCMP AAD. */ 1687 REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT, 1688 0xc7ff); 1689 ah->sw_mgmt_crypto = false; 1690 } else if (AR_SREV_9160_10_OR_LATER(ah)) { 1691 /* Disable hardware crypto for management frames */ 1692 REG_CLR_BIT(ah, AR_PCU_MISC_MODE2, 1693 AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE); 1694 REG_SET_BIT(ah, AR_PCU_MISC_MODE2, 1695 AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT); 1696 ah->sw_mgmt_crypto = true; 1697 } else { 1698 ah->sw_mgmt_crypto = true; 1699 } 1700} 1701 1702static void ath9k_hw_reset_opmode(struct ath_hw *ah, 1703 u32 macStaId1, u32 saveDefAntenna) 1704{ 1705 struct ath_common *common = ath9k_hw_common(ah); 1706 1707 ENABLE_REGWRITE_BUFFER(ah); 1708 1709 REG_RMW(ah, AR_STA_ID1, macStaId1 1710 | AR_STA_ID1_RTS_USE_DEF 1711 | (ah->config.ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) 1712 | ah->sta_id1_defaults, 1713 ~AR_STA_ID1_SADH_MASK); 1714 ath_hw_setbssidmask(common); 1715 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); 1716 ath9k_hw_write_associd(ah); 1717 REG_WRITE(ah, AR_ISR, ~0); 1718 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 1719 1720 REGWRITE_BUFFER_FLUSH(ah); 1721 1722 ath9k_hw_set_operating_mode(ah, ah->opmode); 1723} 1724 1725static void ath9k_hw_init_queues(struct ath_hw *ah) 1726{ 1727 int i; 1728 1729 ENABLE_REGWRITE_BUFFER(ah); 1730 1731 for (i = 0; i < AR_NUM_DCU; i++) 1732 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); 1733 1734 REGWRITE_BUFFER_FLUSH(ah); 1735 1736 ah->intr_txqs = 0; 1737 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 1738 ath9k_hw_resettxqueue(ah, i); 1739} 1740 1741/* 1742 * For big endian systems turn on swapping for descriptors 1743 */ 1744static void ath9k_hw_init_desc(struct ath_hw *ah) 1745{ 1746 struct ath_common *common = ath9k_hw_common(ah); 1747 1748 if (AR_SREV_9100(ah)) { 1749 u32 mask; 1750 mask = REG_READ(ah, AR_CFG); 1751 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { 1752 ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n", 1753 mask); 1754 } else { 1755 mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; 1756 REG_WRITE(ah, AR_CFG, mask); 1757 ath_dbg(common, RESET, "Setting CFG 0x%x\n", 1758 REG_READ(ah, AR_CFG)); 1759 } 1760 } else { 1761 if (common->bus_ops->ath_bus_type == ATH_USB) { 1762 /* Configure AR9271 target WLAN */ 1763 if (AR_SREV_9271(ah)) 1764 REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB); 1765 else 1766 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); 1767 } 1768#ifdef __BIG_ENDIAN 1769 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) || 1770 AR_SREV_9550(ah)) 1771 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0); 1772 else 1773 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); 1774#endif 1775 } 1776} 1777 1778/* 1779 * Fast channel change: 1780 * (Change synthesizer based on channel freq without resetting chip) 1781 * 1782 * Don't do FCC when 1783 * - Flag is not set 1784 * - Chip is just coming out of full sleep 1785 * - Channel to be set is same as current channel 1786 * - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel) 1787 */ 1788static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan) 1789{ 1790 struct ath_common *common = ath9k_hw_common(ah); 1791 int ret; 1792 1793 if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI) 1794 goto fail; 1795 1796 if (ah->chip_fullsleep) 1797 goto fail; 1798 1799 if (!ah->curchan) 1800 goto fail; 1801 1802 if (chan->channel == ah->curchan->channel) 1803 goto fail; 1804 1805 if ((ah->curchan->channelFlags | chan->channelFlags) & 1806 (CHANNEL_HALF | CHANNEL_QUARTER)) 1807 goto fail; 1808 1809 if ((chan->channelFlags & CHANNEL_ALL) != 1810 (ah->curchan->channelFlags & CHANNEL_ALL)) 1811 goto fail; 1812 1813 if (!ath9k_hw_check_alive(ah)) 1814 goto fail; 1815 1816 /* 1817 * For AR9462, make sure that calibration data for 1818 * re-using are present. 1819 */ 1820 if (AR_SREV_9462(ah) && (ah->caldata && 1821 (!ah->caldata->done_txiqcal_once || 1822 !ah->caldata->done_txclcal_once || 1823 !ah->caldata->rtt_done))) 1824 goto fail; 1825 1826 ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n", 1827 ah->curchan->channel, chan->channel); 1828 1829 ret = ath9k_hw_channel_change(ah, chan); 1830 if (!ret) 1831 goto fail; 1832 1833 if (ath9k_hw_mci_is_enabled(ah)) 1834 ar9003_mci_2g5g_switch(ah, false); 1835 1836 ath9k_hw_loadnf(ah, ah->curchan); 1837 ath9k_hw_start_nfcal(ah, true); 1838 1839 if (AR_SREV_9271(ah)) 1840 ar9002_hw_load_ani_reg(ah, chan); 1841 1842 return 0; 1843fail: 1844 return -EINVAL; 1845} 1846 1847int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 1848 struct ath9k_hw_cal_data *caldata, bool fastcc) 1849{ 1850 struct ath_common *common = ath9k_hw_common(ah); 1851 u32 saveLedState; 1852 u32 saveDefAntenna; 1853 u32 macStaId1; 1854 u64 tsf = 0; 1855 int r; 1856 bool start_mci_reset = false; 1857 bool save_fullsleep = ah->chip_fullsleep; 1858 1859 if (ath9k_hw_mci_is_enabled(ah)) { 1860 start_mci_reset = ar9003_mci_start_reset(ah, chan); 1861 if (start_mci_reset) 1862 return 0; 1863 } 1864 1865 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1866 return -EIO; 1867 1868 if (ah->curchan && !ah->chip_fullsleep) 1869 ath9k_hw_getnf(ah, ah->curchan); 1870 1871 ah->caldata = caldata; 1872 if (caldata && (chan->channel != caldata->channel || 1873 chan->channelFlags != caldata->channelFlags || 1874 chan->chanmode != caldata->chanmode)) { 1875 /* Operating channel changed, reset channel calibration data */ 1876 memset(caldata, 0, sizeof(*caldata)); 1877 ath9k_init_nfcal_hist_buffer(ah, chan); 1878 } else if (caldata) { 1879 caldata->paprd_packet_sent = false; 1880 } 1881 ah->noise = ath9k_hw_getchan_noise(ah, chan); 1882 1883 if (fastcc) { 1884 r = ath9k_hw_do_fastcc(ah, chan); 1885 if (!r) 1886 return r; 1887 } 1888 1889 if (ath9k_hw_mci_is_enabled(ah)) 1890 ar9003_mci_stop_bt(ah, save_fullsleep); 1891 1892 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); 1893 if (saveDefAntenna == 0) 1894 saveDefAntenna = 1; 1895 1896 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; 1897 1898 /* For chips on which RTC reset is done, save TSF before it gets cleared */ 1899 if (AR_SREV_9100(ah) || 1900 (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))) 1901 tsf = ath9k_hw_gettsf64(ah); 1902 1903 saveLedState = REG_READ(ah, AR_CFG_LED) & 1904 (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | 1905 AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW); 1906 1907 ath9k_hw_mark_phy_inactive(ah); 1908 1909 ah->paprd_table_write_done = false; 1910 1911 /* Only required on the first reset */ 1912 if (AR_SREV_9271(ah) && ah->htc_reset_init) { 1913 REG_WRITE(ah, 1914 AR9271_RESET_POWER_DOWN_CONTROL, 1915 AR9271_RADIO_RF_RST); 1916 udelay(50); 1917 } 1918 1919 if (!ath9k_hw_chip_reset(ah, chan)) { 1920 ath_err(common, "Chip reset failed\n"); 1921 return -EINVAL; 1922 } 1923 1924 /* Only required on the first reset */ 1925 if (AR_SREV_9271(ah) && ah->htc_reset_init) { 1926 ah->htc_reset_init = false; 1927 REG_WRITE(ah, 1928 AR9271_RESET_POWER_DOWN_CONTROL, 1929 AR9271_GATE_MAC_CTL); 1930 udelay(50); 1931 } 1932 1933 /* Restore TSF */ 1934 if (tsf) 1935 ath9k_hw_settsf64(ah, tsf); 1936 1937 if (AR_SREV_9280_20_OR_LATER(ah)) 1938 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); 1939 1940 if (!AR_SREV_9300_20_OR_LATER(ah)) 1941 ar9002_hw_enable_async_fifo(ah); 1942 1943 r = ath9k_hw_process_ini(ah, chan); 1944 if (r) 1945 return r; 1946 1947 if (ath9k_hw_mci_is_enabled(ah)) 1948 ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep); 1949 1950 /* 1951 * Some AR91xx SoC devices frequently fail to accept TSF writes 1952 * right after the chip reset. When that happens, write a new 1953 * value after the initvals have been applied, with an offset 1954 * based on measured time difference 1955 */ 1956 if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) { 1957 tsf += 1500; 1958 ath9k_hw_settsf64(ah, tsf); 1959 } 1960 1961 ath9k_hw_init_mfp(ah); 1962 1963 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1964 ath9k_hw_set_delta_slope(ah, chan); 1965 1966 ath9k_hw_spur_mitigate_freq(ah, chan); 1967 ah->eep_ops->set_board_values(ah, chan); 1968 1969 ath9k_hw_reset_opmode(ah, macStaId1, saveDefAntenna); 1970 1971 r = ath9k_hw_rf_set_freq(ah, chan); 1972 if (r) 1973 return r; 1974 1975 ath9k_hw_set_clockrate(ah); 1976 1977 ath9k_hw_init_queues(ah); 1978 ath9k_hw_init_interrupt_masks(ah, ah->opmode); 1979 ath9k_hw_ani_cache_ini_regs(ah); 1980 ath9k_hw_init_qos(ah); 1981 1982 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1983 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); 1984 1985 ath9k_hw_init_global_settings(ah); 1986 1987 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { 1988 REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER, 1989 AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768); 1990 REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN, 1991 AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL); 1992 REG_SET_BIT(ah, AR_PCU_MISC_MODE2, 1993 AR_PCU_MISC_MODE2_ENABLE_AGGWEP); 1994 } 1995 1996 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM); 1997 1998 ath9k_hw_set_dma(ah); 1999 2000 if (!ath9k_hw_mci_is_enabled(ah)) 2001 REG_WRITE(ah, AR_OBS, 8); 2002 2003 if (ah->config.rx_intr_mitigation) { 2004 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); 2005 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); 2006 } 2007 2008 if (ah->config.tx_intr_mitigation) { 2009 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300); 2010 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750); 2011 } 2012 2013 ath9k_hw_init_bb(ah, chan); 2014 2015 if (caldata) { 2016 caldata->done_txiqcal_once = false; 2017 caldata->done_txclcal_once = false; 2018 } 2019 if (!ath9k_hw_init_cal(ah, chan)) 2020 return -EIO; 2021 2022 if (ath9k_hw_mci_is_enabled(ah) && ar9003_mci_end_reset(ah, chan, caldata)) 2023 return -EIO; 2024 2025 ENABLE_REGWRITE_BUFFER(ah); 2026 2027 ath9k_hw_restore_chainmask(ah); 2028 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); 2029 2030 REGWRITE_BUFFER_FLUSH(ah); 2031 2032 ath9k_hw_init_desc(ah); 2033 2034 if (ath9k_hw_btcoex_is_enabled(ah)) 2035 ath9k_hw_btcoex_enable(ah); 2036 2037 if (ath9k_hw_mci_is_enabled(ah)) 2038 ar9003_mci_check_bt(ah); 2039 2040 ath9k_hw_loadnf(ah, chan); 2041 ath9k_hw_start_nfcal(ah, true); 2042 2043 if (AR_SREV_9300_20_OR_LATER(ah)) { 2044 ar9003_hw_bb_watchdog_config(ah); 2045 ar9003_hw_disable_phy_restart(ah); 2046 } 2047 2048 ath9k_hw_apply_gpio_override(ah); 2049 2050 if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv) 2051 REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON); 2052 2053 return 0; 2054} 2055EXPORT_SYMBOL(ath9k_hw_reset); 2056 2057/******************************/ 2058/* Power Management (Chipset) */ 2059/******************************/ 2060 2061/* 2062 * Notify Power Mgt is disabled in self-generated frames. 2063 * If requested, force chip to sleep. 2064 */ 2065static void ath9k_set_power_sleep(struct ath_hw *ah) 2066{ 2067 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2068 2069 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 2070 REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff); 2071 REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff); 2072 REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff); 2073 /* xxx Required for WLAN only case ? */ 2074 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0); 2075 udelay(100); 2076 } 2077 2078 /* 2079 * Clear the RTC force wake bit to allow the 2080 * mac to go to sleep. 2081 */ 2082 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 2083 2084 if (ath9k_hw_mci_is_enabled(ah)) 2085 udelay(100); 2086 2087 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah)) 2088 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 2089 2090 /* Shutdown chip. Active low */ 2091 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) { 2092 REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN); 2093 udelay(2); 2094 } 2095 2096 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ 2097 if (AR_SREV_9300_20_OR_LATER(ah)) 2098 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); 2099} 2100 2101/* 2102 * Notify Power Management is enabled in self-generating 2103 * frames. If request, set power mode of chip to 2104 * auto/normal. Duration in units of 128us (1/8 TU). 2105 */ 2106static void ath9k_set_power_network_sleep(struct ath_hw *ah) 2107{ 2108 struct ath9k_hw_capabilities *pCap = &ah->caps; 2109 2110 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2111 2112 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 2113 /* Set WakeOnInterrupt bit; clear ForceWake bit */ 2114 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 2115 AR_RTC_FORCE_WAKE_ON_INT); 2116 } else { 2117 2118 /* When chip goes into network sleep, it could be waken 2119 * up by MCI_INT interrupt caused by BT's HW messages 2120 * (LNA_xxx, CONT_xxx) which chould be in a very fast 2121 * rate (~100us). This will cause chip to leave and 2122 * re-enter network sleep mode frequently, which in 2123 * consequence will have WLAN MCI HW to generate lots of 2124 * SYS_WAKING and SYS_SLEEPING messages which will make 2125 * BT CPU to busy to process. 2126 */ 2127 if (ath9k_hw_mci_is_enabled(ah)) 2128 REG_CLR_BIT(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 2129 AR_MCI_INTERRUPT_RX_HW_MSG_MASK); 2130 /* 2131 * Clear the RTC force wake bit to allow the 2132 * mac to go to sleep. 2133 */ 2134 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN); 2135 2136 if (ath9k_hw_mci_is_enabled(ah)) 2137 udelay(30); 2138 } 2139 2140 /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */ 2141 if (AR_SREV_9300_20_OR_LATER(ah)) 2142 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); 2143} 2144 2145static bool ath9k_hw_set_power_awake(struct ath_hw *ah) 2146{ 2147 u32 val; 2148 int i; 2149 2150 /* Set Bits 14 and 17 of AR_WA before powering on the chip. */ 2151 if (AR_SREV_9300_20_OR_LATER(ah)) { 2152 REG_WRITE(ah, AR_WA, ah->WARegVal); 2153 udelay(10); 2154 } 2155 2156 if ((REG_READ(ah, AR_RTC_STATUS) & 2157 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) { 2158 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 2159 return false; 2160 } 2161 if (!AR_SREV_9300_20_OR_LATER(ah)) 2162 ath9k_hw_init_pll(ah, NULL); 2163 } 2164 if (AR_SREV_9100(ah)) 2165 REG_SET_BIT(ah, AR_RTC_RESET, 2166 AR_RTC_RESET_EN); 2167 2168 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, 2169 AR_RTC_FORCE_WAKE_EN); 2170 udelay(50); 2171 2172 for (i = POWER_UP_TIME / 50; i > 0; i--) { 2173 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; 2174 if (val == AR_RTC_STATUS_ON) 2175 break; 2176 udelay(50); 2177 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, 2178 AR_RTC_FORCE_WAKE_EN); 2179 } 2180 if (i == 0) { 2181 ath_err(ath9k_hw_common(ah), 2182 "Failed to wakeup in %uus\n", 2183 POWER_UP_TIME / 20); 2184 return false; 2185 } 2186 2187 if (ath9k_hw_mci_is_enabled(ah)) 2188 ar9003_mci_set_power_awake(ah); 2189 2190 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2191 2192 return true; 2193} 2194 2195bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) 2196{ 2197 struct ath_common *common = ath9k_hw_common(ah); 2198 int status = true; 2199 static const char *modes[] = { 2200 "AWAKE", 2201 "FULL-SLEEP", 2202 "NETWORK SLEEP", 2203 "UNDEFINED" 2204 }; 2205 2206 if (ah->power_mode == mode) 2207 return status; 2208 2209 ath_dbg(common, RESET, "%s -> %s\n", 2210 modes[ah->power_mode], modes[mode]); 2211 2212 switch (mode) { 2213 case ATH9K_PM_AWAKE: 2214 status = ath9k_hw_set_power_awake(ah); 2215 break; 2216 case ATH9K_PM_FULL_SLEEP: 2217 if (ath9k_hw_mci_is_enabled(ah)) 2218 ar9003_mci_set_full_sleep(ah); 2219 2220 ath9k_set_power_sleep(ah); 2221 ah->chip_fullsleep = true; 2222 break; 2223 case ATH9K_PM_NETWORK_SLEEP: 2224 ath9k_set_power_network_sleep(ah); 2225 break; 2226 default: 2227 ath_err(common, "Unknown power mode %u\n", mode); 2228 return false; 2229 } 2230 ah->power_mode = mode; 2231 2232 /* 2233 * XXX: If this warning never comes up after a while then 2234 * simply keep the ATH_DBG_WARN_ON_ONCE() but make 2235 * ath9k_hw_setpower() return type void. 2236 */ 2237 2238 if (!(ah->ah_flags & AH_UNPLUGGED)) 2239 ATH_DBG_WARN_ON_ONCE(!status); 2240 2241 return status; 2242} 2243EXPORT_SYMBOL(ath9k_hw_setpower); 2244 2245/*******************/ 2246/* Beacon Handling */ 2247/*******************/ 2248 2249void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) 2250{ 2251 int flags = 0; 2252 2253 ENABLE_REGWRITE_BUFFER(ah); 2254 2255 switch (ah->opmode) { 2256 case NL80211_IFTYPE_ADHOC: 2257 REG_SET_BIT(ah, AR_TXCFG, 2258 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); 2259 REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon + 2260 TU_TO_USEC(ah->atim_window ? ah->atim_window : 1)); 2261 flags |= AR_NDP_TIMER_EN; 2262 case NL80211_IFTYPE_MESH_POINT: 2263 case NL80211_IFTYPE_AP: 2264 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon); 2265 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon - 2266 TU_TO_USEC(ah->config.dma_beacon_response_time)); 2267 REG_WRITE(ah, AR_NEXT_SWBA, next_beacon - 2268 TU_TO_USEC(ah->config.sw_beacon_response_time)); 2269 flags |= 2270 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 2271 break; 2272 default: 2273 ath_dbg(ath9k_hw_common(ah), BEACON, 2274 "%s: unsupported opmode: %d\n", __func__, ah->opmode); 2275 return; 2276 break; 2277 } 2278 2279 REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period); 2280 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period); 2281 REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period); 2282 REG_WRITE(ah, AR_NDP_PERIOD, beacon_period); 2283 2284 REGWRITE_BUFFER_FLUSH(ah); 2285 2286 REG_SET_BIT(ah, AR_TIMER_MODE, flags); 2287} 2288EXPORT_SYMBOL(ath9k_hw_beaconinit); 2289 2290void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 2291 const struct ath9k_beacon_state *bs) 2292{ 2293 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; 2294 struct ath9k_hw_capabilities *pCap = &ah->caps; 2295 struct ath_common *common = ath9k_hw_common(ah); 2296 2297 ENABLE_REGWRITE_BUFFER(ah); 2298 2299 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); 2300 2301 REG_WRITE(ah, AR_BEACON_PERIOD, 2302 TU_TO_USEC(bs->bs_intval)); 2303 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, 2304 TU_TO_USEC(bs->bs_intval)); 2305 2306 REGWRITE_BUFFER_FLUSH(ah); 2307 2308 REG_RMW_FIELD(ah, AR_RSSI_THR, 2309 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); 2310 2311 beaconintval = bs->bs_intval; 2312 2313 if (bs->bs_sleepduration > beaconintval) 2314 beaconintval = bs->bs_sleepduration; 2315 2316 dtimperiod = bs->bs_dtimperiod; 2317 if (bs->bs_sleepduration > dtimperiod) 2318 dtimperiod = bs->bs_sleepduration; 2319 2320 if (beaconintval == dtimperiod) 2321 nextTbtt = bs->bs_nextdtim; 2322 else 2323 nextTbtt = bs->bs_nexttbtt; 2324 2325 ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim); 2326 ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt); 2327 ath_dbg(common, BEACON, "beacon period %d\n", beaconintval); 2328 ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod); 2329 2330 ENABLE_REGWRITE_BUFFER(ah); 2331 2332 REG_WRITE(ah, AR_NEXT_DTIM, 2333 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); 2334 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP)); 2335 2336 REG_WRITE(ah, AR_SLEEP1, 2337 SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT) 2338 | AR_SLEEP1_ASSUME_DTIM); 2339 2340 if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP) 2341 beacontimeout = (BEACON_TIMEOUT_VAL << 3); 2342 else 2343 beacontimeout = MIN_BEACON_TIMEOUT_VAL; 2344 2345 REG_WRITE(ah, AR_SLEEP2, 2346 SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT)); 2347 2348 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval)); 2349 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); 2350 2351 REGWRITE_BUFFER_FLUSH(ah); 2352 2353 REG_SET_BIT(ah, AR_TIMER_MODE, 2354 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | 2355 AR_DTIM_TIMER_EN); 2356 2357 /* TSF Out of Range Threshold */ 2358 REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold); 2359} 2360EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers); 2361 2362/*******************/ 2363/* HW Capabilities */ 2364/*******************/ 2365 2366static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask) 2367{ 2368 eeprom_chainmask &= chip_chainmask; 2369 if (eeprom_chainmask) 2370 return eeprom_chainmask; 2371 else 2372 return chip_chainmask; 2373} 2374 2375/** 2376 * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset 2377 * @ah: the atheros hardware data structure 2378 * 2379 * We enable DFS support upstream on chipsets which have passed a series 2380 * of tests. The testing requirements are going to be documented. Desired 2381 * test requirements are documented at: 2382 * 2383 * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs 2384 * 2385 * Once a new chipset gets properly tested an individual commit can be used 2386 * to document the testing for DFS for that chipset. 2387 */ 2388static bool ath9k_hw_dfs_tested(struct ath_hw *ah) 2389{ 2390 2391 switch (ah->hw_version.macVersion) { 2392 /* for temporary testing DFS with 9280 */ 2393 case AR_SREV_VERSION_9280: 2394 /* AR9580 will likely be our first target to get testing on */ 2395 case AR_SREV_VERSION_9580: 2396 return true; 2397 default: 2398 return false; 2399 } 2400} 2401 2402int ath9k_hw_fill_cap_info(struct ath_hw *ah) 2403{ 2404 struct ath9k_hw_capabilities *pCap = &ah->caps; 2405 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 2406 struct ath_common *common = ath9k_hw_common(ah); 2407 unsigned int chip_chainmask; 2408 2409 u16 eeval; 2410 u8 ant_div_ctl1, tx_chainmask, rx_chainmask; 2411 2412 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); 2413 regulatory->current_rd = eeval; 2414 2415 if (ah->opmode != NL80211_IFTYPE_AP && 2416 ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) { 2417 if (regulatory->current_rd == 0x64 || 2418 regulatory->current_rd == 0x65) 2419 regulatory->current_rd += 5; 2420 else if (regulatory->current_rd == 0x41) 2421 regulatory->current_rd = 0x43; 2422 ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n", 2423 regulatory->current_rd); 2424 } 2425 2426 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); 2427 if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) { 2428 ath_err(common, 2429 "no band has been marked as supported in EEPROM\n"); 2430 return -EINVAL; 2431 } 2432 2433 if (eeval & AR5416_OPFLAGS_11A) 2434 pCap->hw_caps |= ATH9K_HW_CAP_5GHZ; 2435 2436 if (eeval & AR5416_OPFLAGS_11G) 2437 pCap->hw_caps |= ATH9K_HW_CAP_2GHZ; 2438 2439 if (AR_SREV_9485(ah) || 2440 AR_SREV_9285(ah) || 2441 AR_SREV_9330(ah) || 2442 AR_SREV_9565(ah)) 2443 chip_chainmask = 1; 2444 else if (AR_SREV_9462(ah)) 2445 chip_chainmask = 3; 2446 else if (!AR_SREV_9280_20_OR_LATER(ah)) 2447 chip_chainmask = 7; 2448 else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah)) 2449 chip_chainmask = 3; 2450 else 2451 chip_chainmask = 7; 2452 2453 pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); 2454 /* 2455 * For AR9271 we will temporarilly uses the rx chainmax as read from 2456 * the EEPROM. 2457 */ 2458 if ((ah->hw_version.devid == AR5416_DEVID_PCI) && 2459 !(eeval & AR5416_OPFLAGS_11A) && 2460 !(AR_SREV_9271(ah))) 2461 /* CB71: GPIO 0 is pulled down to indicate 3 rx chains */ 2462 pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7; 2463 else if (AR_SREV_9100(ah)) 2464 pCap->rx_chainmask = 0x7; 2465 else 2466 /* Use rx_chainmask from EEPROM. */ 2467 pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); 2468 2469 pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask); 2470 pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask); 2471 ah->txchainmask = pCap->tx_chainmask; 2472 ah->rxchainmask = pCap->rx_chainmask; 2473 2474 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA; 2475 2476 /* enable key search for every frame in an aggregate */ 2477 if (AR_SREV_9300_20_OR_LATER(ah)) 2478 ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH; 2479 2480 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM; 2481 2482 if (ah->hw_version.devid != AR2427_DEVID_PCIE) 2483 pCap->hw_caps |= ATH9K_HW_CAP_HT; 2484 else 2485 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 2486 2487 if (AR_SREV_9271(ah)) 2488 pCap->num_gpio_pins = AR9271_NUM_GPIO; 2489 else if (AR_DEVID_7010(ah)) 2490 pCap->num_gpio_pins = AR7010_NUM_GPIO; 2491 else if (AR_SREV_9300_20_OR_LATER(ah)) 2492 pCap->num_gpio_pins = AR9300_NUM_GPIO; 2493 else if (AR_SREV_9287_11_OR_LATER(ah)) 2494 pCap->num_gpio_pins = AR9287_NUM_GPIO; 2495 else if (AR_SREV_9285_12_OR_LATER(ah)) 2496 pCap->num_gpio_pins = AR9285_NUM_GPIO; 2497 else if (AR_SREV_9280_20_OR_LATER(ah)) 2498 pCap->num_gpio_pins = AR928X_NUM_GPIO; 2499 else 2500 pCap->num_gpio_pins = AR_NUM_GPIO; 2501 2502 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) 2503 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; 2504 else 2505 pCap->rts_aggr_limit = (8 * 1024); 2506 2507#ifdef CONFIG_ATH9K_RFKILL 2508 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); 2509 if (ah->rfsilent & EEP_RFSILENT_ENABLED) { 2510 ah->rfkill_gpio = 2511 MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL); 2512 ah->rfkill_polarity = 2513 MS(ah->rfsilent, EEP_RFSILENT_POLARITY); 2514 2515 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; 2516 } 2517#endif 2518 if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah)) 2519 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP; 2520 else 2521 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; 2522 2523 if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) 2524 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; 2525 else 2526 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; 2527 2528 if (AR_SREV_9300_20_OR_LATER(ah)) { 2529 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK; 2530 if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah) && !AR_SREV_9565(ah)) 2531 pCap->hw_caps |= ATH9K_HW_CAP_LDPC; 2532 2533 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; 2534 pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH; 2535 pCap->rx_status_len = sizeof(struct ar9003_rxs); 2536 pCap->tx_desc_len = sizeof(struct ar9003_txc); 2537 pCap->txs_len = sizeof(struct ar9003_txs); 2538 } else { 2539 pCap->tx_desc_len = sizeof(struct ath_desc); 2540 if (AR_SREV_9280_20(ah)) 2541 pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK; 2542 } 2543 2544 if (AR_SREV_9300_20_OR_LATER(ah)) 2545 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; 2546 2547 if (AR_SREV_9300_20_OR_LATER(ah)) 2548 ah->ent_mode = REG_READ(ah, AR_ENT_OTP); 2549 2550 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) 2551 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; 2552 2553 if (AR_SREV_9285(ah)) 2554 if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) { 2555 ant_div_ctl1 = 2556 ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 2557 if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) 2558 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; 2559 } 2560 if (AR_SREV_9300_20_OR_LATER(ah)) { 2561 if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE)) 2562 pCap->hw_caps |= ATH9K_HW_CAP_APM; 2563 } 2564 2565 2566 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) { 2567 ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); 2568 /* 2569 * enable the diversity-combining algorithm only when 2570 * both enable_lna_div and enable_fast_div are set 2571 * Table for Diversity 2572 * ant_div_alt_lnaconf bit 0-1 2573 * ant_div_main_lnaconf bit 2-3 2574 * ant_div_alt_gaintb bit 4 2575 * ant_div_main_gaintb bit 5 2576 * enable_ant_div_lnadiv bit 6 2577 * enable_ant_fast_div bit 7 2578 */ 2579 if ((ant_div_ctl1 >> 0x6) == 0x3) 2580 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; 2581 } 2582 2583 if (ath9k_hw_dfs_tested(ah)) 2584 pCap->hw_caps |= ATH9K_HW_CAP_DFS; 2585 2586 tx_chainmask = pCap->tx_chainmask; 2587 rx_chainmask = pCap->rx_chainmask; 2588 while (tx_chainmask || rx_chainmask) { 2589 if (tx_chainmask & BIT(0)) 2590 pCap->max_txchains++; 2591 if (rx_chainmask & BIT(0)) 2592 pCap->max_rxchains++; 2593 2594 tx_chainmask >>= 1; 2595 rx_chainmask >>= 1; 2596 } 2597 2598 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 2599 if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE)) 2600 pCap->hw_caps |= ATH9K_HW_CAP_MCI; 2601 2602 if (AR_SREV_9462_20_OR_LATER(ah)) 2603 pCap->hw_caps |= ATH9K_HW_CAP_RTT; 2604 } 2605 2606 if (AR_SREV_9462(ah)) 2607 pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE; 2608 2609 if (AR_SREV_9300_20_OR_LATER(ah) && 2610 ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) 2611 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; 2612 2613 return 0; 2614} 2615 2616/****************************/ 2617/* GPIO / RFKILL / Antennae */ 2618/****************************/ 2619 2620static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, 2621 u32 gpio, u32 type) 2622{ 2623 int addr; 2624 u32 gpio_shift, tmp; 2625 2626 if (gpio > 11) 2627 addr = AR_GPIO_OUTPUT_MUX3; 2628 else if (gpio > 5) 2629 addr = AR_GPIO_OUTPUT_MUX2; 2630 else 2631 addr = AR_GPIO_OUTPUT_MUX1; 2632 2633 gpio_shift = (gpio % 6) * 5; 2634 2635 if (AR_SREV_9280_20_OR_LATER(ah) 2636 || (addr != AR_GPIO_OUTPUT_MUX1)) { 2637 REG_RMW(ah, addr, (type << gpio_shift), 2638 (0x1f << gpio_shift)); 2639 } else { 2640 tmp = REG_READ(ah, addr); 2641 tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0); 2642 tmp &= ~(0x1f << gpio_shift); 2643 tmp |= (type << gpio_shift); 2644 REG_WRITE(ah, addr, tmp); 2645 } 2646} 2647 2648void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio) 2649{ 2650 u32 gpio_shift; 2651 2652 BUG_ON(gpio >= ah->caps.num_gpio_pins); 2653 2654 if (AR_DEVID_7010(ah)) { 2655 gpio_shift = gpio; 2656 REG_RMW(ah, AR7010_GPIO_OE, 2657 (AR7010_GPIO_OE_AS_INPUT << gpio_shift), 2658 (AR7010_GPIO_OE_MASK << gpio_shift)); 2659 return; 2660 } 2661 2662 gpio_shift = gpio << 1; 2663 REG_RMW(ah, 2664 AR_GPIO_OE_OUT, 2665 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), 2666 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 2667} 2668EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input); 2669 2670u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) 2671{ 2672#define MS_REG_READ(x, y) \ 2673 (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y))) 2674 2675 if (gpio >= ah->caps.num_gpio_pins) 2676 return 0xffffffff; 2677 2678 if (AR_DEVID_7010(ah)) { 2679 u32 val; 2680 val = REG_READ(ah, AR7010_GPIO_IN); 2681 return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; 2682 } else if (AR_SREV_9300_20_OR_LATER(ah)) 2683 return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) & 2684 AR_GPIO_BIT(gpio)) != 0; 2685 else if (AR_SREV_9271(ah)) 2686 return MS_REG_READ(AR9271, gpio) != 0; 2687 else if (AR_SREV_9287_11_OR_LATER(ah)) 2688 return MS_REG_READ(AR9287, gpio) != 0; 2689 else if (AR_SREV_9285_12_OR_LATER(ah)) 2690 return MS_REG_READ(AR9285, gpio) != 0; 2691 else if (AR_SREV_9280_20_OR_LATER(ah)) 2692 return MS_REG_READ(AR928X, gpio) != 0; 2693 else 2694 return MS_REG_READ(AR, gpio) != 0; 2695} 2696EXPORT_SYMBOL(ath9k_hw_gpio_get); 2697 2698void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, 2699 u32 ah_signal_type) 2700{ 2701 u32 gpio_shift; 2702 2703 if (AR_DEVID_7010(ah)) { 2704 gpio_shift = gpio; 2705 REG_RMW(ah, AR7010_GPIO_OE, 2706 (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift), 2707 (AR7010_GPIO_OE_MASK << gpio_shift)); 2708 return; 2709 } 2710 2711 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); 2712 gpio_shift = 2 * gpio; 2713 REG_RMW(ah, 2714 AR_GPIO_OE_OUT, 2715 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), 2716 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 2717} 2718EXPORT_SYMBOL(ath9k_hw_cfg_output); 2719 2720void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) 2721{ 2722 if (AR_DEVID_7010(ah)) { 2723 val = val ? 0 : 1; 2724 REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio), 2725 AR_GPIO_BIT(gpio)); 2726 return; 2727 } 2728 2729 if (AR_SREV_9271(ah)) 2730 val = ~val; 2731 2732 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 2733 AR_GPIO_BIT(gpio)); 2734} 2735EXPORT_SYMBOL(ath9k_hw_set_gpio); 2736 2737void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) 2738{ 2739 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); 2740} 2741EXPORT_SYMBOL(ath9k_hw_setantenna); 2742 2743/*********************/ 2744/* General Operation */ 2745/*********************/ 2746 2747u32 ath9k_hw_getrxfilter(struct ath_hw *ah) 2748{ 2749 u32 bits = REG_READ(ah, AR_RX_FILTER); 2750 u32 phybits = REG_READ(ah, AR_PHY_ERR); 2751 2752 if (phybits & AR_PHY_ERR_RADAR) 2753 bits |= ATH9K_RX_FILTER_PHYRADAR; 2754 if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING)) 2755 bits |= ATH9K_RX_FILTER_PHYERR; 2756 2757 return bits; 2758} 2759EXPORT_SYMBOL(ath9k_hw_getrxfilter); 2760 2761void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) 2762{ 2763 u32 phybits; 2764 2765 ENABLE_REGWRITE_BUFFER(ah); 2766 2767 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) 2768 bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER; 2769 2770 REG_WRITE(ah, AR_RX_FILTER, bits); 2771 2772 phybits = 0; 2773 if (bits & ATH9K_RX_FILTER_PHYRADAR) 2774 phybits |= AR_PHY_ERR_RADAR; 2775 if (bits & ATH9K_RX_FILTER_PHYERR) 2776 phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING; 2777 REG_WRITE(ah, AR_PHY_ERR, phybits); 2778 2779 if (phybits) 2780 REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA); 2781 else 2782 REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA); 2783 2784 REGWRITE_BUFFER_FLUSH(ah); 2785} 2786EXPORT_SYMBOL(ath9k_hw_setrxfilter); 2787 2788bool ath9k_hw_phy_disable(struct ath_hw *ah) 2789{ 2790 if (ath9k_hw_mci_is_enabled(ah)) 2791 ar9003_mci_bt_gain_ctrl(ah); 2792 2793 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) 2794 return false; 2795 2796 ath9k_hw_init_pll(ah, NULL); 2797 ah->htc_reset_init = true; 2798 return true; 2799} 2800EXPORT_SYMBOL(ath9k_hw_phy_disable); 2801 2802bool ath9k_hw_disable(struct ath_hw *ah) 2803{ 2804 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 2805 return false; 2806 2807 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD)) 2808 return false; 2809 2810 ath9k_hw_init_pll(ah, NULL); 2811 return true; 2812} 2813EXPORT_SYMBOL(ath9k_hw_disable); 2814 2815static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan) 2816{ 2817 enum eeprom_param gain_param; 2818 2819 if (IS_CHAN_2GHZ(chan)) 2820 gain_param = EEP_ANTENNA_GAIN_2G; 2821 else 2822 gain_param = EEP_ANTENNA_GAIN_5G; 2823 2824 return ah->eep_ops->get_eeprom(ah, gain_param); 2825} 2826 2827void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan, 2828 bool test) 2829{ 2830 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 2831 struct ieee80211_channel *channel; 2832 int chan_pwr, new_pwr, max_gain; 2833 int ant_gain, ant_reduction = 0; 2834 2835 if (!chan) 2836 return; 2837 2838 channel = chan->chan; 2839 chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER); 2840 new_pwr = min_t(int, chan_pwr, reg->power_limit); 2841 max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2; 2842 2843 ant_gain = get_antenna_gain(ah, chan); 2844 if (ant_gain > max_gain) 2845 ant_reduction = ant_gain - max_gain; 2846 2847 ah->eep_ops->set_txpower(ah, chan, 2848 ath9k_regd_get_ctl(reg, chan), 2849 ant_reduction, new_pwr, test); 2850} 2851 2852void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) 2853{ 2854 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 2855 struct ath9k_channel *chan = ah->curchan; 2856 struct ieee80211_channel *channel = chan->chan; 2857 2858 reg->power_limit = min_t(u32, limit, MAX_RATE_POWER); 2859 if (test) 2860 channel->max_power = MAX_RATE_POWER / 2; 2861 2862 ath9k_hw_apply_txpower(ah, chan, test); 2863 2864 if (test) 2865 channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2); 2866} 2867EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); 2868 2869void ath9k_hw_setopmode(struct ath_hw *ah) 2870{ 2871 ath9k_hw_set_operating_mode(ah, ah->opmode); 2872} 2873EXPORT_SYMBOL(ath9k_hw_setopmode); 2874 2875void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1) 2876{ 2877 REG_WRITE(ah, AR_MCAST_FIL0, filter0); 2878 REG_WRITE(ah, AR_MCAST_FIL1, filter1); 2879} 2880EXPORT_SYMBOL(ath9k_hw_setmcastfilter); 2881 2882void ath9k_hw_write_associd(struct ath_hw *ah) 2883{ 2884 struct ath_common *common = ath9k_hw_common(ah); 2885 2886 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid)); 2887 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) | 2888 ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S)); 2889} 2890EXPORT_SYMBOL(ath9k_hw_write_associd); 2891 2892#define ATH9K_MAX_TSF_READ 10 2893 2894u64 ath9k_hw_gettsf64(struct ath_hw *ah) 2895{ 2896 u32 tsf_lower, tsf_upper1, tsf_upper2; 2897 int i; 2898 2899 tsf_upper1 = REG_READ(ah, AR_TSF_U32); 2900 for (i = 0; i < ATH9K_MAX_TSF_READ; i++) { 2901 tsf_lower = REG_READ(ah, AR_TSF_L32); 2902 tsf_upper2 = REG_READ(ah, AR_TSF_U32); 2903 if (tsf_upper2 == tsf_upper1) 2904 break; 2905 tsf_upper1 = tsf_upper2; 2906 } 2907 2908 WARN_ON( i == ATH9K_MAX_TSF_READ ); 2909 2910 return (((u64)tsf_upper1 << 32) | tsf_lower); 2911} 2912EXPORT_SYMBOL(ath9k_hw_gettsf64); 2913 2914void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64) 2915{ 2916 REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); 2917 REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); 2918} 2919EXPORT_SYMBOL(ath9k_hw_settsf64); 2920 2921void ath9k_hw_reset_tsf(struct ath_hw *ah) 2922{ 2923 if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, 2924 AH_TSF_WRITE_TIMEOUT)) 2925 ath_dbg(ath9k_hw_common(ah), RESET, 2926 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); 2927 2928 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); 2929} 2930EXPORT_SYMBOL(ath9k_hw_reset_tsf); 2931 2932void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set) 2933{ 2934 if (set) 2935 ah->misc_mode |= AR_PCU_TX_ADD_TSF; 2936 else 2937 ah->misc_mode &= ~AR_PCU_TX_ADD_TSF; 2938} 2939EXPORT_SYMBOL(ath9k_hw_set_tsfadjust); 2940 2941void ath9k_hw_set11nmac2040(struct ath_hw *ah) 2942{ 2943 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 2944 u32 macmode; 2945 2946 if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca) 2947 macmode = AR_2040_JOINED_RX_CLEAR; 2948 else 2949 macmode = 0; 2950 2951 REG_WRITE(ah, AR_2040_MODE, macmode); 2952} 2953 2954/* HW Generic timers configuration */ 2955 2956static const struct ath_gen_timer_configuration gen_tmr_configuration[] = 2957{ 2958 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, 2959 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, 2960 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, 2961 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, 2962 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, 2963 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, 2964 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, 2965 {AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080}, 2966 {AR_NEXT_NDP2_TIMER, AR_NDP2_PERIOD, AR_NDP2_TIMER_MODE, 0x0001}, 2967 {AR_NEXT_NDP2_TIMER + 1*4, AR_NDP2_PERIOD + 1*4, 2968 AR_NDP2_TIMER_MODE, 0x0002}, 2969 {AR_NEXT_NDP2_TIMER + 2*4, AR_NDP2_PERIOD + 2*4, 2970 AR_NDP2_TIMER_MODE, 0x0004}, 2971 {AR_NEXT_NDP2_TIMER + 3*4, AR_NDP2_PERIOD + 3*4, 2972 AR_NDP2_TIMER_MODE, 0x0008}, 2973 {AR_NEXT_NDP2_TIMER + 4*4, AR_NDP2_PERIOD + 4*4, 2974 AR_NDP2_TIMER_MODE, 0x0010}, 2975 {AR_NEXT_NDP2_TIMER + 5*4, AR_NDP2_PERIOD + 5*4, 2976 AR_NDP2_TIMER_MODE, 0x0020}, 2977 {AR_NEXT_NDP2_TIMER + 6*4, AR_NDP2_PERIOD + 6*4, 2978 AR_NDP2_TIMER_MODE, 0x0040}, 2979 {AR_NEXT_NDP2_TIMER + 7*4, AR_NDP2_PERIOD + 7*4, 2980 AR_NDP2_TIMER_MODE, 0x0080} 2981}; 2982 2983/* HW generic timer primitives */ 2984 2985/* compute and clear index of rightmost 1 */ 2986static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask) 2987{ 2988 u32 b; 2989 2990 b = *mask; 2991 b &= (0-b); 2992 *mask &= ~b; 2993 b *= debruijn32; 2994 b >>= 27; 2995 2996 return timer_table->gen_timer_index[b]; 2997} 2998 2999u32 ath9k_hw_gettsf32(struct ath_hw *ah) 3000{ 3001 return REG_READ(ah, AR_TSF_L32); 3002} 3003EXPORT_SYMBOL(ath9k_hw_gettsf32); 3004 3005struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 3006 void (*trigger)(void *), 3007 void (*overflow)(void *), 3008 void *arg, 3009 u8 timer_index) 3010{ 3011 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3012 struct ath_gen_timer *timer; 3013 3014 timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); 3015 if (timer == NULL) 3016 return NULL; 3017 3018 /* allocate a hardware generic timer slot */ 3019 timer_table->timers[timer_index] = timer; 3020 timer->index = timer_index; 3021 timer->trigger = trigger; 3022 timer->overflow = overflow; 3023 timer->arg = arg; 3024 3025 return timer; 3026} 3027EXPORT_SYMBOL(ath_gen_timer_alloc); 3028 3029void ath9k_hw_gen_timer_start(struct ath_hw *ah, 3030 struct ath_gen_timer *timer, 3031 u32 trig_timeout, 3032 u32 timer_period) 3033{ 3034 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3035 u32 tsf, timer_next; 3036 3037 BUG_ON(!timer_period); 3038 3039 set_bit(timer->index, &timer_table->timer_mask.timer_bits); 3040 3041 tsf = ath9k_hw_gettsf32(ah); 3042 3043 timer_next = tsf + trig_timeout; 3044 3045 ath_dbg(ath9k_hw_common(ah), BTCOEX, 3046 "current tsf %x period %x timer_next %x\n", 3047 tsf, timer_period, timer_next); 3048 3049 /* 3050 * Program generic timer registers 3051 */ 3052 REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr, 3053 timer_next); 3054 REG_WRITE(ah, gen_tmr_configuration[timer->index].period_addr, 3055 timer_period); 3056 REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, 3057 gen_tmr_configuration[timer->index].mode_mask); 3058 3059 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 3060 /* 3061 * Starting from AR9462, each generic timer can select which tsf 3062 * to use. But we still follow the old rule, 0 - 7 use tsf and 3063 * 8 - 15 use tsf2. 3064 */ 3065 if ((timer->index < AR_GEN_TIMER_BANK_1_LEN)) 3066 REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL, 3067 (1 << timer->index)); 3068 else 3069 REG_SET_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL, 3070 (1 << timer->index)); 3071 } 3072 3073 /* Enable both trigger and thresh interrupt masks */ 3074 REG_SET_BIT(ah, AR_IMR_S5, 3075 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | 3076 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); 3077} 3078EXPORT_SYMBOL(ath9k_hw_gen_timer_start); 3079 3080void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) 3081{ 3082 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3083 3084 if ((timer->index < AR_FIRST_NDP_TIMER) || 3085 (timer->index >= ATH_MAX_GEN_TIMER)) { 3086 return; 3087 } 3088 3089 /* Clear generic timer enable bits. */ 3090 REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr, 3091 gen_tmr_configuration[timer->index].mode_mask); 3092 3093 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) { 3094 /* 3095 * Need to switch back to TSF if it was using TSF2. 3096 */ 3097 if ((timer->index >= AR_GEN_TIMER_BANK_1_LEN)) { 3098 REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL, 3099 (1 << timer->index)); 3100 } 3101 } 3102 3103 /* Disable both trigger and thresh interrupt masks */ 3104 REG_CLR_BIT(ah, AR_IMR_S5, 3105 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | 3106 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); 3107 3108 clear_bit(timer->index, &timer_table->timer_mask.timer_bits); 3109} 3110EXPORT_SYMBOL(ath9k_hw_gen_timer_stop); 3111 3112void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer) 3113{ 3114 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3115 3116 /* free the hardware generic timer slot */ 3117 timer_table->timers[timer->index] = NULL; 3118 kfree(timer); 3119} 3120EXPORT_SYMBOL(ath_gen_timer_free); 3121 3122/* 3123 * Generic Timer Interrupts handling 3124 */ 3125void ath_gen_timer_isr(struct ath_hw *ah) 3126{ 3127 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3128 struct ath_gen_timer *timer; 3129 struct ath_common *common = ath9k_hw_common(ah); 3130 u32 trigger_mask, thresh_mask, index; 3131 3132 /* get hardware generic timer interrupt status */ 3133 trigger_mask = ah->intr_gen_timer_trigger; 3134 thresh_mask = ah->intr_gen_timer_thresh; 3135 trigger_mask &= timer_table->timer_mask.val; 3136 thresh_mask &= timer_table->timer_mask.val; 3137 3138 trigger_mask &= ~thresh_mask; 3139 3140 while (thresh_mask) { 3141 index = rightmost_index(timer_table, &thresh_mask); 3142 timer = timer_table->timers[index]; 3143 BUG_ON(!timer); 3144 ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n", 3145 index); 3146 timer->overflow(timer->arg); 3147 } 3148 3149 while (trigger_mask) { 3150 index = rightmost_index(timer_table, &trigger_mask); 3151 timer = timer_table->timers[index]; 3152 BUG_ON(!timer); 3153 ath_dbg(common, BTCOEX, 3154 "Gen timer[%d] trigger\n", index); 3155 timer->trigger(timer->arg); 3156 } 3157} 3158EXPORT_SYMBOL(ath_gen_timer_isr); 3159 3160/********/ 3161/* HTC */ 3162/********/ 3163 3164static struct { 3165 u32 version; 3166 const char * name; 3167} ath_mac_bb_names[] = { 3168 /* Devices with external radios */ 3169 { AR_SREV_VERSION_5416_PCI, "5416" }, 3170 { AR_SREV_VERSION_5416_PCIE, "5418" }, 3171 { AR_SREV_VERSION_9100, "9100" }, 3172 { AR_SREV_VERSION_9160, "9160" }, 3173 /* Single-chip solutions */ 3174 { AR_SREV_VERSION_9280, "9280" }, 3175 { AR_SREV_VERSION_9285, "9285" }, 3176 { AR_SREV_VERSION_9287, "9287" }, 3177 { AR_SREV_VERSION_9271, "9271" }, 3178 { AR_SREV_VERSION_9300, "9300" }, 3179 { AR_SREV_VERSION_9330, "9330" }, 3180 { AR_SREV_VERSION_9340, "9340" }, 3181 { AR_SREV_VERSION_9485, "9485" }, 3182 { AR_SREV_VERSION_9462, "9462" }, 3183 { AR_SREV_VERSION_9550, "9550" }, 3184 { AR_SREV_VERSION_9565, "9565" }, 3185}; 3186 3187/* For devices with external radios */ 3188static struct { 3189 u16 version; 3190 const char * name; 3191} ath_rf_names[] = { 3192 { 0, "5133" }, 3193 { AR_RAD5133_SREV_MAJOR, "5133" }, 3194 { AR_RAD5122_SREV_MAJOR, "5122" }, 3195 { AR_RAD2133_SREV_MAJOR, "2133" }, 3196 { AR_RAD2122_SREV_MAJOR, "2122" } 3197}; 3198 3199/* 3200 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. 3201 */ 3202static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version) 3203{ 3204 int i; 3205 3206 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) { 3207 if (ath_mac_bb_names[i].version == mac_bb_version) { 3208 return ath_mac_bb_names[i].name; 3209 } 3210 } 3211 3212 return "????"; 3213} 3214 3215/* 3216 * Return the RF name. "????" is returned if the RF is unknown. 3217 * Used for devices with external radios. 3218 */ 3219static const char *ath9k_hw_rf_name(u16 rf_version) 3220{ 3221 int i; 3222 3223 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) { 3224 if (ath_rf_names[i].version == rf_version) { 3225 return ath_rf_names[i].name; 3226 } 3227 } 3228 3229 return "????"; 3230} 3231 3232void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len) 3233{ 3234 int used; 3235 3236 /* chipsets >= AR9280 are single-chip */ 3237 if (AR_SREV_9280_20_OR_LATER(ah)) { 3238 used = snprintf(hw_name, len, 3239 "Atheros AR%s Rev:%x", 3240 ath9k_hw_mac_bb_name(ah->hw_version.macVersion), 3241 ah->hw_version.macRev); 3242 } 3243 else { 3244 used = snprintf(hw_name, len, 3245 "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x", 3246 ath9k_hw_mac_bb_name(ah->hw_version.macVersion), 3247 ah->hw_version.macRev, 3248 ath9k_hw_rf_name((ah->hw_version.analog5GhzRev & 3249 AR_RADIO_SREV_MAJOR)), 3250 ah->hw_version.phyRev); 3251 } 3252 3253 hw_name[used] = '\0'; 3254} 3255EXPORT_SYMBOL(ath9k_hw_name); 3256