iwl-5000.c revision e1623446bb1de1834ff1c57b3e8ed341d5d4a927
1/****************************************************************************** 2 * 3 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of version 2 of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 17 * 18 * The full GNU General Public License is included in this distribution in the 19 * file called LICENSE. 20 * 21 * Contact Information: 22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * 24 *****************************************************************************/ 25 26#include <linux/kernel.h> 27#include <linux/module.h> 28#include <linux/init.h> 29#include <linux/pci.h> 30#include <linux/dma-mapping.h> 31#include <linux/delay.h> 32#include <linux/skbuff.h> 33#include <linux/netdevice.h> 34#include <linux/wireless.h> 35#include <net/mac80211.h> 36#include <linux/etherdevice.h> 37#include <asm/unaligned.h> 38 39#include "iwl-eeprom.h" 40#include "iwl-dev.h" 41#include "iwl-core.h" 42#include "iwl-io.h" 43#include "iwl-sta.h" 44#include "iwl-helpers.h" 45#include "iwl-5000-hw.h" 46 47/* Highest firmware API version supported */ 48#define IWL5000_UCODE_API_MAX 1 49#define IWL5150_UCODE_API_MAX 1 50 51/* Lowest firmware API version supported */ 52#define IWL5000_UCODE_API_MIN 1 53#define IWL5150_UCODE_API_MIN 1 54 55#define IWL5000_FW_PRE "iwlwifi-5000-" 56#define _IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode" 57#define IWL5000_MODULE_FIRMWARE(api) _IWL5000_MODULE_FIRMWARE(api) 58 59#define IWL5150_FW_PRE "iwlwifi-5150-" 60#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode" 61#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api) 62 63static const u16 iwl5000_default_queue_to_tx_fifo[] = { 64 IWL_TX_FIFO_AC3, 65 IWL_TX_FIFO_AC2, 66 IWL_TX_FIFO_AC1, 67 IWL_TX_FIFO_AC0, 68 IWL50_CMD_FIFO_NUM, 69 IWL_TX_FIFO_HCCA_1, 70 IWL_TX_FIFO_HCCA_2 71}; 72 73/* FIXME: same implementation as 4965 */ 74static int iwl5000_apm_stop_master(struct iwl_priv *priv) 75{ 76 unsigned long flags; 77 78 spin_lock_irqsave(&priv->lock, flags); 79 80 /* set stop master bit */ 81 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 82 83 iwl_poll_direct_bit(priv, CSR_RESET, 84 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 85 86 spin_unlock_irqrestore(&priv->lock, flags); 87 IWL_DEBUG_INFO(priv, "stop master\n"); 88 89 return 0; 90} 91 92 93static int iwl5000_apm_init(struct iwl_priv *priv) 94{ 95 int ret = 0; 96 97 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 98 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 99 100 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */ 101 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 102 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 103 104 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 105 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 106 107 /* enable HAP INTA to move device L1a -> L0s */ 108 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 109 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 110 111 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 112 113 /* set "initialization complete" bit to move adapter 114 * D0U* --> D0A* state */ 115 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 116 117 /* wait for clock stabilization */ 118 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, 119 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 120 if (ret < 0) { 121 IWL_DEBUG_INFO(priv, "Failed to init the card\n"); 122 return ret; 123 } 124 125 ret = iwl_grab_nic_access(priv); 126 if (ret) 127 return ret; 128 129 /* enable DMA */ 130 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 131 132 udelay(20); 133 134 /* disable L1-Active */ 135 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 136 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 137 138 iwl_release_nic_access(priv); 139 140 return ret; 141} 142 143/* FIXME: this is identical to 4965 */ 144static void iwl5000_apm_stop(struct iwl_priv *priv) 145{ 146 unsigned long flags; 147 148 iwl5000_apm_stop_master(priv); 149 150 spin_lock_irqsave(&priv->lock, flags); 151 152 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 153 154 udelay(10); 155 156 /* clear "init complete" move adapter D0A* --> D0U state */ 157 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 158 159 spin_unlock_irqrestore(&priv->lock, flags); 160} 161 162 163static int iwl5000_apm_reset(struct iwl_priv *priv) 164{ 165 int ret = 0; 166 unsigned long flags; 167 168 iwl5000_apm_stop_master(priv); 169 170 spin_lock_irqsave(&priv->lock, flags); 171 172 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 173 174 udelay(10); 175 176 177 /* FIXME: put here L1A -L0S w/a */ 178 179 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 180 181 /* set "initialization complete" bit to move adapter 182 * D0U* --> D0A* state */ 183 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 184 185 /* wait for clock stabilization */ 186 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, 187 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 188 if (ret < 0) { 189 IWL_DEBUG_INFO(priv, "Failed to init the card\n"); 190 goto out; 191 } 192 193 ret = iwl_grab_nic_access(priv); 194 if (ret) 195 goto out; 196 197 /* enable DMA */ 198 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 199 200 udelay(20); 201 202 /* disable L1-Active */ 203 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 204 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 205 206 iwl_release_nic_access(priv); 207 208out: 209 spin_unlock_irqrestore(&priv->lock, flags); 210 211 return ret; 212} 213 214 215static void iwl5000_nic_config(struct iwl_priv *priv) 216{ 217 unsigned long flags; 218 u16 radio_cfg; 219 u16 link; 220 221 spin_lock_irqsave(&priv->lock, flags); 222 223 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link); 224 225 /* L1 is enabled by BIOS */ 226 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN) 227 /* disable L0S disabled L1A enabled */ 228 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 229 else 230 /* L0S enabled L1A disabled */ 231 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 232 233 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 234 235 /* write radio config values to register */ 236 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_5000_RF_CFG_TYPE_MAX) 237 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 238 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 239 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 240 EEPROM_RF_CFG_DASH_MSK(radio_cfg)); 241 242 /* set CSR_HW_CONFIG_REG for uCode use */ 243 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 244 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 245 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 246 247 /* W/A : NIC is stuck in a reset state after Early PCIe power off 248 * (PCIe power is lost before PERST# is asserted), 249 * causing ME FW to lose ownership and not being able to obtain it back. 250 */ 251 iwl_grab_nic_access(priv); 252 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 253 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 254 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 255 iwl_release_nic_access(priv); 256 257 spin_unlock_irqrestore(&priv->lock, flags); 258} 259 260 261 262/* 263 * EEPROM 264 */ 265static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) 266{ 267 u16 offset = 0; 268 269 if ((address & INDIRECT_ADDRESS) == 0) 270 return address; 271 272 switch (address & INDIRECT_TYPE_MSK) { 273 case INDIRECT_HOST: 274 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST); 275 break; 276 case INDIRECT_GENERAL: 277 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL); 278 break; 279 case INDIRECT_REGULATORY: 280 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY); 281 break; 282 case INDIRECT_CALIBRATION: 283 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION); 284 break; 285 case INDIRECT_PROCESS_ADJST: 286 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST); 287 break; 288 case INDIRECT_OTHERS: 289 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS); 290 break; 291 default: 292 IWL_ERR(priv, "illegal indirect type: 0x%X\n", 293 address & INDIRECT_TYPE_MSK); 294 break; 295 } 296 297 /* translate the offset from words to byte */ 298 return (address & ADDRESS_MSK) + (offset << 1); 299} 300 301static u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv) 302{ 303 struct iwl_eeprom_calib_hdr { 304 u8 version; 305 u8 pa_type; 306 u16 voltage; 307 } *hdr; 308 309 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, 310 EEPROM_5000_CALIB_ALL); 311 return hdr->version; 312 313} 314 315static void iwl5000_gain_computation(struct iwl_priv *priv, 316 u32 average_noise[NUM_RX_CHAINS], 317 u16 min_average_noise_antenna_i, 318 u32 min_average_noise) 319{ 320 int i; 321 s32 delta_g; 322 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 323 324 /* Find Gain Code for the antennas B and C */ 325 for (i = 1; i < NUM_RX_CHAINS; i++) { 326 if ((data->disconn_array[i])) { 327 data->delta_gain_code[i] = 0; 328 continue; 329 } 330 delta_g = (1000 * ((s32)average_noise[0] - 331 (s32)average_noise[i])) / 1500; 332 /* bound gain by 2 bits value max, 3rd bit is sign */ 333 data->delta_gain_code[i] = 334 min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE); 335 336 if (delta_g < 0) 337 /* set negative sign */ 338 data->delta_gain_code[i] |= (1 << 2); 339 } 340 341 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", 342 data->delta_gain_code[1], data->delta_gain_code[2]); 343 344 if (!data->radio_write) { 345 struct iwl_calib_chain_noise_gain_cmd cmd; 346 347 memset(&cmd, 0, sizeof(cmd)); 348 349 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD; 350 cmd.hdr.first_group = 0; 351 cmd.hdr.groups_num = 1; 352 cmd.hdr.data_valid = 1; 353 cmd.delta_gain_1 = data->delta_gain_code[1]; 354 cmd.delta_gain_2 = data->delta_gain_code[2]; 355 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, 356 sizeof(cmd), &cmd, NULL); 357 358 data->radio_write = 1; 359 data->state = IWL_CHAIN_NOISE_CALIBRATED; 360 } 361 362 data->chain_noise_a = 0; 363 data->chain_noise_b = 0; 364 data->chain_noise_c = 0; 365 data->chain_signal_a = 0; 366 data->chain_signal_b = 0; 367 data->chain_signal_c = 0; 368 data->beacon_count = 0; 369} 370 371static void iwl5000_chain_noise_reset(struct iwl_priv *priv) 372{ 373 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 374 int ret; 375 376 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 377 struct iwl_calib_chain_noise_reset_cmd cmd; 378 memset(&cmd, 0, sizeof(cmd)); 379 380 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD; 381 cmd.hdr.first_group = 0; 382 cmd.hdr.groups_num = 1; 383 cmd.hdr.data_valid = 1; 384 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 385 sizeof(cmd), &cmd); 386 if (ret) 387 IWL_ERR(priv, 388 "Could not send REPLY_PHY_CALIBRATION_CMD\n"); 389 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 390 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); 391 } 392} 393 394static void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 395 __le32 *tx_flags) 396{ 397 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || 398 (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) 399 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; 400 else 401 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK; 402} 403 404static struct iwl_sensitivity_ranges iwl5000_sensitivity = { 405 .min_nrg_cck = 95, 406 .max_nrg_cck = 0, 407 .auto_corr_min_ofdm = 90, 408 .auto_corr_min_ofdm_mrc = 170, 409 .auto_corr_min_ofdm_x1 = 120, 410 .auto_corr_min_ofdm_mrc_x1 = 240, 411 412 .auto_corr_max_ofdm = 120, 413 .auto_corr_max_ofdm_mrc = 210, 414 .auto_corr_max_ofdm_x1 = 155, 415 .auto_corr_max_ofdm_mrc_x1 = 290, 416 417 .auto_corr_min_cck = 125, 418 .auto_corr_max_cck = 200, 419 .auto_corr_min_cck_mrc = 170, 420 .auto_corr_max_cck_mrc = 400, 421 .nrg_th_cck = 95, 422 .nrg_th_ofdm = 95, 423}; 424 425static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, 426 size_t offset) 427{ 428 u32 address = eeprom_indirect_address(priv, offset); 429 BUG_ON(address >= priv->cfg->eeprom_size); 430 return &priv->eeprom[address]; 431} 432 433static s32 iwl5150_get_ct_threshold(struct iwl_priv *priv) 434{ 435 const s32 volt2temp_coef = -5; 436 u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv, 437 EEPROM_5000_TEMPERATURE); 438 /* offset = temperate - voltage / coef */ 439 s32 offset = temp_calib[0] - temp_calib[1] / volt2temp_coef; 440 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD) - offset; 441 return threshold * volt2temp_coef; 442} 443 444/* 445 * Calibration 446 */ 447static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) 448{ 449 struct iwl_calib_xtal_freq_cmd cmd; 450 u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); 451 452 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; 453 cmd.hdr.first_group = 0; 454 cmd.hdr.groups_num = 1; 455 cmd.hdr.data_valid = 1; 456 cmd.cap_pin1 = (u8)xtal_calib[0]; 457 cmd.cap_pin2 = (u8)xtal_calib[1]; 458 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], 459 (u8 *)&cmd, sizeof(cmd)); 460} 461 462static int iwl5000_send_calib_cfg(struct iwl_priv *priv) 463{ 464 struct iwl_calib_cfg_cmd calib_cfg_cmd; 465 struct iwl_host_cmd cmd = { 466 .id = CALIBRATION_CFG_CMD, 467 .len = sizeof(struct iwl_calib_cfg_cmd), 468 .data = &calib_cfg_cmd, 469 }; 470 471 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); 472 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; 473 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL; 474 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL; 475 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL; 476 477 return iwl_send_cmd(priv, &cmd); 478} 479 480static void iwl5000_rx_calib_result(struct iwl_priv *priv, 481 struct iwl_rx_mem_buffer *rxb) 482{ 483 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 484 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; 485 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK; 486 int index; 487 488 /* reduce the size of the length field itself */ 489 len -= 4; 490 491 /* Define the order in which the results will be sent to the runtime 492 * uCode. iwl_send_calib_results sends them in a row according to their 493 * index. We sort them here */ 494 switch (hdr->op_code) { 495 case IWL_PHY_CALIBRATE_DC_CMD: 496 index = IWL_CALIB_DC; 497 break; 498 case IWL_PHY_CALIBRATE_LO_CMD: 499 index = IWL_CALIB_LO; 500 break; 501 case IWL_PHY_CALIBRATE_TX_IQ_CMD: 502 index = IWL_CALIB_TX_IQ; 503 break; 504 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD: 505 index = IWL_CALIB_TX_IQ_PERD; 506 break; 507 case IWL_PHY_CALIBRATE_BASE_BAND_CMD: 508 index = IWL_CALIB_BASE_BAND; 509 break; 510 default: 511 IWL_ERR(priv, "Unknown calibration notification %d\n", 512 hdr->op_code); 513 return; 514 } 515 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len); 516} 517 518static void iwl5000_rx_calib_complete(struct iwl_priv *priv, 519 struct iwl_rx_mem_buffer *rxb) 520{ 521 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n"); 522 queue_work(priv->workqueue, &priv->restart); 523} 524 525/* 526 * ucode 527 */ 528static int iwl5000_load_section(struct iwl_priv *priv, 529 struct fw_desc *image, 530 u32 dst_addr) 531{ 532 int ret = 0; 533 unsigned long flags; 534 535 dma_addr_t phy_addr = image->p_addr; 536 u32 byte_cnt = image->len; 537 538 spin_lock_irqsave(&priv->lock, flags); 539 ret = iwl_grab_nic_access(priv); 540 if (ret) { 541 spin_unlock_irqrestore(&priv->lock, flags); 542 return ret; 543 } 544 545 iwl_write_direct32(priv, 546 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 547 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 548 549 iwl_write_direct32(priv, 550 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); 551 552 iwl_write_direct32(priv, 553 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 554 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 555 556 iwl_write_direct32(priv, 557 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 558 (iwl_get_dma_hi_addr(phy_addr) 559 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 560 561 iwl_write_direct32(priv, 562 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 563 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 564 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 565 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 566 567 iwl_write_direct32(priv, 568 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 569 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 570 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 571 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 572 573 iwl_release_nic_access(priv); 574 spin_unlock_irqrestore(&priv->lock, flags); 575 return 0; 576} 577 578static int iwl5000_load_given_ucode(struct iwl_priv *priv, 579 struct fw_desc *inst_image, 580 struct fw_desc *data_image) 581{ 582 int ret = 0; 583 584 ret = iwl5000_load_section(priv, inst_image, 585 IWL50_RTC_INST_LOWER_BOUND); 586 if (ret) 587 return ret; 588 589 IWL_DEBUG_INFO(priv, "INST uCode section being loaded...\n"); 590 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 591 priv->ucode_write_complete, 5 * HZ); 592 if (ret == -ERESTARTSYS) { 593 IWL_ERR(priv, "Could not load the INST uCode section due " 594 "to interrupt\n"); 595 return ret; 596 } 597 if (!ret) { 598 IWL_ERR(priv, "Could not load the INST uCode section\n"); 599 return -ETIMEDOUT; 600 } 601 602 priv->ucode_write_complete = 0; 603 604 ret = iwl5000_load_section( 605 priv, data_image, IWL50_RTC_DATA_LOWER_BOUND); 606 if (ret) 607 return ret; 608 609 IWL_DEBUG_INFO(priv, "DATA uCode section being loaded...\n"); 610 611 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 612 priv->ucode_write_complete, 5 * HZ); 613 if (ret == -ERESTARTSYS) { 614 IWL_ERR(priv, "Could not load the INST uCode section due " 615 "to interrupt\n"); 616 return ret; 617 } else if (!ret) { 618 IWL_ERR(priv, "Could not load the DATA uCode section\n"); 619 return -ETIMEDOUT; 620 } else 621 ret = 0; 622 623 priv->ucode_write_complete = 0; 624 625 return ret; 626} 627 628static int iwl5000_load_ucode(struct iwl_priv *priv) 629{ 630 int ret = 0; 631 632 /* check whether init ucode should be loaded, or rather runtime ucode */ 633 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) { 634 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n"); 635 ret = iwl5000_load_given_ucode(priv, 636 &priv->ucode_init, &priv->ucode_init_data); 637 if (!ret) { 638 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n"); 639 priv->ucode_type = UCODE_INIT; 640 } 641 } else { 642 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. " 643 "Loading runtime ucode...\n"); 644 ret = iwl5000_load_given_ucode(priv, 645 &priv->ucode_code, &priv->ucode_data); 646 if (!ret) { 647 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n"); 648 priv->ucode_type = UCODE_RT; 649 } 650 } 651 652 return ret; 653} 654 655static void iwl5000_init_alive_start(struct iwl_priv *priv) 656{ 657 int ret = 0; 658 659 /* Check alive response for "valid" sign from uCode */ 660 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { 661 /* We had an error bringing up the hardware, so take it 662 * all the way back down so we can try again */ 663 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); 664 goto restart; 665 } 666 667 /* initialize uCode was loaded... verify inst image. 668 * This is a paranoid check, because we would not have gotten the 669 * "initialize" alive if code weren't properly loaded. */ 670 if (iwl_verify_ucode(priv)) { 671 /* Runtime instruction load was bad; 672 * take it all the way back down so we can try again */ 673 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); 674 goto restart; 675 } 676 677 iwl_clear_stations_table(priv); 678 ret = priv->cfg->ops->lib->alive_notify(priv); 679 if (ret) { 680 IWL_WARN(priv, 681 "Could not complete ALIVE transition: %d\n", ret); 682 goto restart; 683 } 684 685 iwl5000_send_calib_cfg(priv); 686 return; 687 688restart: 689 /* real restart (first load init_ucode) */ 690 queue_work(priv->workqueue, &priv->restart); 691} 692 693static void iwl5000_set_wr_ptrs(struct iwl_priv *priv, 694 int txq_id, u32 index) 695{ 696 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 697 (index & 0xff) | (txq_id << 8)); 698 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index); 699} 700 701static void iwl5000_tx_queue_set_status(struct iwl_priv *priv, 702 struct iwl_tx_queue *txq, 703 int tx_fifo_id, int scd_retry) 704{ 705 int txq_id = txq->q.id; 706 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; 707 708 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id), 709 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 710 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) | 711 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) | 712 IWL50_SCD_QUEUE_STTS_REG_MSK); 713 714 txq->sched_retry = scd_retry; 715 716 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", 717 active ? "Activate" : "Deactivate", 718 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 719} 720 721static int iwl5000_send_wimax_coex(struct iwl_priv *priv) 722{ 723 struct iwl_wimax_coex_cmd coex_cmd; 724 725 memset(&coex_cmd, 0, sizeof(coex_cmd)); 726 727 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD, 728 sizeof(coex_cmd), &coex_cmd); 729} 730 731static int iwl5000_alive_notify(struct iwl_priv *priv) 732{ 733 u32 a; 734 unsigned long flags; 735 int ret; 736 int i, chan; 737 u32 reg_val; 738 739 spin_lock_irqsave(&priv->lock, flags); 740 741 ret = iwl_grab_nic_access(priv); 742 if (ret) { 743 spin_unlock_irqrestore(&priv->lock, flags); 744 return ret; 745 } 746 747 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR); 748 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET; 749 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET; 750 a += 4) 751 iwl_write_targ_mem(priv, a, 0); 752 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; 753 a += 4) 754 iwl_write_targ_mem(priv, a, 0); 755 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 756 iwl_write_targ_mem(priv, a, 0); 757 758 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, 759 priv->scd_bc_tbls.dma >> 10); 760 761 /* Enable DMA channel */ 762 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++) 763 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 764 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 765 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 766 767 /* Update FH chicken bits */ 768 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); 769 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, 770 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 771 772 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, 773 IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num)); 774 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0); 775 776 /* initiate the queues */ 777 for (i = 0; i < priv->hw_params.max_txq_num; i++) { 778 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0); 779 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); 780 iwl_write_targ_mem(priv, priv->scd_base_addr + 781 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0); 782 iwl_write_targ_mem(priv, priv->scd_base_addr + 783 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) + 784 sizeof(u32), 785 ((SCD_WIN_SIZE << 786 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 787 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 788 ((SCD_FRAME_LIMIT << 789 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 790 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 791 } 792 793 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK, 794 IWL_MASK(0, priv->hw_params.max_txq_num)); 795 796 /* Activate all Tx DMA/FIFO channels */ 797 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); 798 799 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 800 801 /* map qos queues to fifos one-to-one */ 802 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { 803 int ac = iwl5000_default_queue_to_tx_fifo[i]; 804 iwl_txq_ctx_activate(priv, i); 805 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 806 } 807 /* TODO - need to initialize those FIFOs inside the loop above, 808 * not only mark them as active */ 809 iwl_txq_ctx_activate(priv, 4); 810 iwl_txq_ctx_activate(priv, 7); 811 iwl_txq_ctx_activate(priv, 8); 812 iwl_txq_ctx_activate(priv, 9); 813 814 iwl_release_nic_access(priv); 815 spin_unlock_irqrestore(&priv->lock, flags); 816 817 818 iwl5000_send_wimax_coex(priv); 819 820 iwl5000_set_Xtal_calib(priv); 821 iwl_send_calib_results(priv); 822 823 return 0; 824} 825 826static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) 827{ 828 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) || 829 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 830 IWL_ERR(priv, 831 "invalid queues_num, should be between %d and %d\n", 832 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES); 833 return -EINVAL; 834 } 835 836 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 837 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 838 priv->hw_params.scd_bc_tbls_size = 839 IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl); 840 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 841 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 842 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 843 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; 844 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; 845 priv->hw_params.max_bsm_size = 0; 846 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_2GHZ) | 847 BIT(IEEE80211_BAND_5GHZ); 848 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 849 850 priv->hw_params.sens = &iwl5000_sensitivity; 851 852 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 853 case CSR_HW_REV_TYPE_5100: 854 priv->hw_params.tx_chains_num = 1; 855 priv->hw_params.rx_chains_num = 2; 856 priv->hw_params.valid_tx_ant = ANT_B; 857 priv->hw_params.valid_rx_ant = ANT_AB; 858 break; 859 case CSR_HW_REV_TYPE_5150: 860 priv->hw_params.tx_chains_num = 1; 861 priv->hw_params.rx_chains_num = 2; 862 priv->hw_params.valid_tx_ant = ANT_A; 863 priv->hw_params.valid_rx_ant = ANT_AB; 864 break; 865 case CSR_HW_REV_TYPE_5300: 866 case CSR_HW_REV_TYPE_5350: 867 priv->hw_params.tx_chains_num = 3; 868 priv->hw_params.rx_chains_num = 3; 869 priv->hw_params.valid_tx_ant = ANT_ABC; 870 priv->hw_params.valid_rx_ant = ANT_ABC; 871 break; 872 } 873 874 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 875 case CSR_HW_REV_TYPE_5100: 876 case CSR_HW_REV_TYPE_5300: 877 case CSR_HW_REV_TYPE_5350: 878 /* 5X00 and 5350 wants in Celsius */ 879 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; 880 break; 881 case CSR_HW_REV_TYPE_5150: 882 /* 5150 wants in Kelvin */ 883 priv->hw_params.ct_kill_threshold = 884 iwl5150_get_ct_threshold(priv); 885 break; 886 } 887 888 /* Set initial calibration set */ 889 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 890 case CSR_HW_REV_TYPE_5100: 891 case CSR_HW_REV_TYPE_5300: 892 case CSR_HW_REV_TYPE_5350: 893 priv->hw_params.calib_init_cfg = 894 BIT(IWL_CALIB_XTAL) | 895 BIT(IWL_CALIB_LO) | 896 BIT(IWL_CALIB_TX_IQ) | 897 BIT(IWL_CALIB_TX_IQ_PERD) | 898 BIT(IWL_CALIB_BASE_BAND); 899 break; 900 case CSR_HW_REV_TYPE_5150: 901 priv->hw_params.calib_init_cfg = 902 BIT(IWL_CALIB_DC) | 903 BIT(IWL_CALIB_LO) | 904 BIT(IWL_CALIB_TX_IQ) | 905 BIT(IWL_CALIB_BASE_BAND); 906 907 break; 908 } 909 910 911 return 0; 912} 913 914/** 915 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 916 */ 917static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 918 struct iwl_tx_queue *txq, 919 u16 byte_cnt) 920{ 921 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 922 int write_ptr = txq->q.write_ptr; 923 int txq_id = txq->q.id; 924 u8 sec_ctl = 0; 925 u8 sta_id = 0; 926 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 927 __le16 bc_ent; 928 929 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); 930 931 if (txq_id != IWL_CMD_QUEUE_NUM) { 932 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 933 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; 934 935 switch (sec_ctl & TX_CMD_SEC_MSK) { 936 case TX_CMD_SEC_CCM: 937 len += CCMP_MIC_LEN; 938 break; 939 case TX_CMD_SEC_TKIP: 940 len += TKIP_ICV_LEN; 941 break; 942 case TX_CMD_SEC_WEP: 943 len += WEP_IV_LEN + WEP_ICV_LEN; 944 break; 945 } 946 } 947 948 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); 949 950 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 951 952 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP) 953 scd_bc_tbl[txq_id]. 954 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 955} 956 957static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, 958 struct iwl_tx_queue *txq) 959{ 960 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 961 int txq_id = txq->q.id; 962 int read_ptr = txq->q.read_ptr; 963 u8 sta_id = 0; 964 __le16 bc_ent; 965 966 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 967 968 if (txq_id != IWL_CMD_QUEUE_NUM) 969 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 970 971 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 972 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 973 974 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP) 975 scd_bc_tbl[txq_id]. 976 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 977} 978 979static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 980 u16 txq_id) 981{ 982 u32 tbl_dw_addr; 983 u32 tbl_dw; 984 u16 scd_q2ratid; 985 986 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; 987 988 tbl_dw_addr = priv->scd_base_addr + 989 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 990 991 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 992 993 if (txq_id & 0x1) 994 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 995 else 996 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 997 998 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); 999 1000 return 0; 1001} 1002static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) 1003{ 1004 /* Simply stop the queue, but don't change any configuration; 1005 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 1006 iwl_write_prph(priv, 1007 IWL50_SCD_QUEUE_STATUS_BITS(txq_id), 1008 (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)| 1009 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1010} 1011 1012static int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id, 1013 int tx_fifo, int sta_id, int tid, u16 ssn_idx) 1014{ 1015 unsigned long flags; 1016 int ret; 1017 u16 ra_tid; 1018 1019 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 1020 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 1021 IWL_WARN(priv, 1022 "queue number out of range: %d, must be %d to %d\n", 1023 txq_id, IWL50_FIRST_AMPDU_QUEUE, 1024 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 1025 return -EINVAL; 1026 } 1027 1028 ra_tid = BUILD_RAxTID(sta_id, tid); 1029 1030 /* Modify device's station table to Tx this TID */ 1031 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 1032 1033 spin_lock_irqsave(&priv->lock, flags); 1034 ret = iwl_grab_nic_access(priv); 1035 if (ret) { 1036 spin_unlock_irqrestore(&priv->lock, flags); 1037 return ret; 1038 } 1039 1040 /* Stop this Tx queue before configuring it */ 1041 iwl5000_tx_queue_stop_scheduler(priv, txq_id); 1042 1043 /* Map receiver-address / traffic-ID to this queue */ 1044 iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id); 1045 1046 /* Set this queue as a chain-building queue */ 1047 iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id)); 1048 1049 /* enable aggregations for the queue */ 1050 iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id)); 1051 1052 /* Place first TFD at index corresponding to start sequence number. 1053 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1054 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 1055 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 1056 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx); 1057 1058 /* Set up Tx window size and frame limit for this queue */ 1059 iwl_write_targ_mem(priv, priv->scd_base_addr + 1060 IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 1061 sizeof(u32), 1062 ((SCD_WIN_SIZE << 1063 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1064 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1065 ((SCD_FRAME_LIMIT << 1066 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1067 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1068 1069 iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); 1070 1071 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 1072 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 1073 1074 iwl_release_nic_access(priv); 1075 spin_unlock_irqrestore(&priv->lock, flags); 1076 1077 return 0; 1078} 1079 1080static int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 1081 u16 ssn_idx, u8 tx_fifo) 1082{ 1083 int ret; 1084 1085 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 1086 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 1087 IWL_WARN(priv, 1088 "queue number out of range: %d, must be %d to %d\n", 1089 txq_id, IWL50_FIRST_AMPDU_QUEUE, 1090 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 1091 return -EINVAL; 1092 } 1093 1094 ret = iwl_grab_nic_access(priv); 1095 if (ret) 1096 return ret; 1097 1098 iwl5000_tx_queue_stop_scheduler(priv, txq_id); 1099 1100 iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id)); 1101 1102 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 1103 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 1104 /* supposes that ssn_idx is valid (!= 0xFFF) */ 1105 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx); 1106 1107 iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); 1108 iwl_txq_ctx_deactivate(priv, txq_id); 1109 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); 1110 1111 iwl_release_nic_access(priv); 1112 1113 return 0; 1114} 1115 1116static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) 1117{ 1118 u16 size = (u16)sizeof(struct iwl_addsta_cmd); 1119 memcpy(data, cmd, size); 1120 return size; 1121} 1122 1123 1124/* 1125 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask 1126 * must be called under priv->lock and mac access 1127 */ 1128static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask) 1129{ 1130 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); 1131} 1132 1133 1134static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) 1135{ 1136 return le32_to_cpup((__le32 *)&tx_resp->status + 1137 tx_resp->frame_count) & MAX_SN; 1138} 1139 1140static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv, 1141 struct iwl_ht_agg *agg, 1142 struct iwl5000_tx_resp *tx_resp, 1143 int txq_id, u16 start_idx) 1144{ 1145 u16 status; 1146 struct agg_tx_status *frame_status = &tx_resp->status; 1147 struct ieee80211_tx_info *info = NULL; 1148 struct ieee80211_hdr *hdr = NULL; 1149 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); 1150 int i, sh, idx; 1151 u16 seq; 1152 1153 if (agg->wait_for_ba) 1154 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); 1155 1156 agg->frame_count = tx_resp->frame_count; 1157 agg->start_idx = start_idx; 1158 agg->rate_n_flags = rate_n_flags; 1159 agg->bitmap = 0; 1160 1161 /* # frames attempted by Tx command */ 1162 if (agg->frame_count == 1) { 1163 /* Only one frame was attempted; no block-ack will arrive */ 1164 status = le16_to_cpu(frame_status[0].status); 1165 idx = start_idx; 1166 1167 /* FIXME: code repetition */ 1168 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 1169 agg->frame_count, agg->start_idx, idx); 1170 1171 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 1172 info->status.rates[0].count = tx_resp->failure_frame + 1; 1173 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1174 info->flags |= iwl_is_tx_success(status) ? 1175 IEEE80211_TX_STAT_ACK : 0; 1176 iwl_hwrate_to_tx_control(priv, rate_n_flags, info); 1177 1178 /* FIXME: code repetition end */ 1179 1180 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", 1181 status & 0xff, tx_resp->failure_frame); 1182 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); 1183 1184 agg->wait_for_ba = 0; 1185 } else { 1186 /* Two or more frames were attempted; expect block-ack */ 1187 u64 bitmap = 0; 1188 int start = agg->start_idx; 1189 1190 /* Construct bit-map of pending frames within Tx window */ 1191 for (i = 0; i < agg->frame_count; i++) { 1192 u16 sc; 1193 status = le16_to_cpu(frame_status[i].status); 1194 seq = le16_to_cpu(frame_status[i].sequence); 1195 idx = SEQ_TO_INDEX(seq); 1196 txq_id = SEQ_TO_QUEUE(seq); 1197 1198 if (status & (AGG_TX_STATE_FEW_BYTES_MSK | 1199 AGG_TX_STATE_ABORT_MSK)) 1200 continue; 1201 1202 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 1203 agg->frame_count, txq_id, idx); 1204 1205 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 1206 1207 sc = le16_to_cpu(hdr->seq_ctrl); 1208 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 1209 IWL_ERR(priv, 1210 "BUG_ON idx doesn't match seq control" 1211 " idx=%d, seq_idx=%d, seq=%d\n", 1212 idx, SEQ_TO_SN(sc), 1213 hdr->seq_ctrl); 1214 return -1; 1215 } 1216 1217 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", 1218 i, idx, SEQ_TO_SN(sc)); 1219 1220 sh = idx - start; 1221 if (sh > 64) { 1222 sh = (start - idx) + 0xff; 1223 bitmap = bitmap << sh; 1224 sh = 0; 1225 start = idx; 1226 } else if (sh < -64) 1227 sh = 0xff - (start - idx); 1228 else if (sh < 0) { 1229 sh = start - idx; 1230 start = idx; 1231 bitmap = bitmap << sh; 1232 sh = 0; 1233 } 1234 bitmap |= 1ULL << sh; 1235 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", 1236 start, (unsigned long long)bitmap); 1237 } 1238 1239 agg->bitmap = bitmap; 1240 agg->start_idx = start; 1241 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", 1242 agg->frame_count, agg->start_idx, 1243 (unsigned long long)agg->bitmap); 1244 1245 if (bitmap) 1246 agg->wait_for_ba = 1; 1247 } 1248 return 0; 1249} 1250 1251static void iwl5000_rx_reply_tx(struct iwl_priv *priv, 1252 struct iwl_rx_mem_buffer *rxb) 1253{ 1254 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1255 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1256 int txq_id = SEQ_TO_QUEUE(sequence); 1257 int index = SEQ_TO_INDEX(sequence); 1258 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1259 struct ieee80211_tx_info *info; 1260 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 1261 u32 status = le16_to_cpu(tx_resp->status.status); 1262 int tid; 1263 int sta_id; 1264 int freed; 1265 1266 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 1267 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " 1268 "is out of range [0-%d] %d %d\n", txq_id, 1269 index, txq->q.n_bd, txq->q.write_ptr, 1270 txq->q.read_ptr); 1271 return; 1272 } 1273 1274 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); 1275 memset(&info->status, 0, sizeof(info->status)); 1276 1277 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; 1278 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; 1279 1280 if (txq->sched_retry) { 1281 const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp); 1282 struct iwl_ht_agg *agg = NULL; 1283 1284 agg = &priv->stations[sta_id].tid[tid].agg; 1285 1286 iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 1287 1288 /* check if BAR is needed */ 1289 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) 1290 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1291 1292 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 1293 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 1294 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim " 1295 "scd_ssn=%d idx=%d txq=%d swq=%d\n", 1296 scd_ssn , index, txq_id, txq->swq_id); 1297 1298 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1299 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1300 1301 if (priv->mac80211_registered && 1302 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1303 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 1304 if (agg->state == IWL_AGG_OFF) 1305 ieee80211_wake_queue(priv->hw, txq_id); 1306 else 1307 ieee80211_wake_queue(priv->hw, 1308 txq->swq_id); 1309 } 1310 } 1311 } else { 1312 BUG_ON(txq_id != txq->swq_id); 1313 1314 info->status.rates[0].count = tx_resp->failure_frame + 1; 1315 info->flags |= iwl_is_tx_success(status) ? 1316 IEEE80211_TX_STAT_ACK : 0; 1317 iwl_hwrate_to_tx_control(priv, 1318 le32_to_cpu(tx_resp->rate_n_flags), 1319 info); 1320 1321 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags " 1322 "0x%x retries %d\n", 1323 txq_id, 1324 iwl_get_tx_fail_reason(status), status, 1325 le32_to_cpu(tx_resp->rate_n_flags), 1326 tx_resp->failure_frame); 1327 1328 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1329 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1330 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1331 1332 if (priv->mac80211_registered && 1333 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1334 ieee80211_wake_queue(priv->hw, txq_id); 1335 } 1336 1337 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1338 iwl_txq_check_empty(priv, sta_id, tid, txq_id); 1339 1340 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 1341 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); 1342} 1343 1344/* Currently 5000 is the superset of everything */ 1345static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len) 1346{ 1347 return len; 1348} 1349 1350static void iwl5000_setup_deferred_work(struct iwl_priv *priv) 1351{ 1352 /* in 5000 the tx power calibration is done in uCode */ 1353 priv->disable_tx_power_cal = 1; 1354} 1355 1356static void iwl5000_rx_handler_setup(struct iwl_priv *priv) 1357{ 1358 /* init calibration handlers */ 1359 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = 1360 iwl5000_rx_calib_result; 1361 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = 1362 iwl5000_rx_calib_complete; 1363 priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx; 1364} 1365 1366 1367static int iwl5000_hw_valid_rtc_data_addr(u32 addr) 1368{ 1369 return (addr >= IWL50_RTC_DATA_LOWER_BOUND) && 1370 (addr < IWL50_RTC_DATA_UPPER_BOUND); 1371} 1372 1373static int iwl5000_send_rxon_assoc(struct iwl_priv *priv) 1374{ 1375 int ret = 0; 1376 struct iwl5000_rxon_assoc_cmd rxon_assoc; 1377 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; 1378 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; 1379 1380 if ((rxon1->flags == rxon2->flags) && 1381 (rxon1->filter_flags == rxon2->filter_flags) && 1382 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && 1383 (rxon1->ofdm_ht_single_stream_basic_rates == 1384 rxon2->ofdm_ht_single_stream_basic_rates) && 1385 (rxon1->ofdm_ht_dual_stream_basic_rates == 1386 rxon2->ofdm_ht_dual_stream_basic_rates) && 1387 (rxon1->ofdm_ht_triple_stream_basic_rates == 1388 rxon2->ofdm_ht_triple_stream_basic_rates) && 1389 (rxon1->acquisition_data == rxon2->acquisition_data) && 1390 (rxon1->rx_chain == rxon2->rx_chain) && 1391 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { 1392 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); 1393 return 0; 1394 } 1395 1396 rxon_assoc.flags = priv->staging_rxon.flags; 1397 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; 1398 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; 1399 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; 1400 rxon_assoc.reserved1 = 0; 1401 rxon_assoc.reserved2 = 0; 1402 rxon_assoc.reserved3 = 0; 1403 rxon_assoc.ofdm_ht_single_stream_basic_rates = 1404 priv->staging_rxon.ofdm_ht_single_stream_basic_rates; 1405 rxon_assoc.ofdm_ht_dual_stream_basic_rates = 1406 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; 1407 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; 1408 rxon_assoc.ofdm_ht_triple_stream_basic_rates = 1409 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates; 1410 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data; 1411 1412 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, 1413 sizeof(rxon_assoc), &rxon_assoc, NULL); 1414 if (ret) 1415 return ret; 1416 1417 return ret; 1418} 1419static int iwl5000_send_tx_power(struct iwl_priv *priv) 1420{ 1421 struct iwl5000_tx_power_dbm_cmd tx_power_cmd; 1422 1423 /* half dBm need to multiply */ 1424 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 1425 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; 1426 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; 1427 return iwl_send_cmd_pdu_async(priv, REPLY_TX_POWER_DBM_CMD, 1428 sizeof(tx_power_cmd), &tx_power_cmd, 1429 NULL); 1430} 1431 1432static void iwl5000_temperature(struct iwl_priv *priv) 1433{ 1434 /* store temperature from statistics (in Celsius) */ 1435 priv->temperature = le32_to_cpu(priv->statistics.general.temperature); 1436} 1437 1438/* Calc max signal level (dBm) among 3 possible receivers */ 1439static int iwl5000_calc_rssi(struct iwl_priv *priv, 1440 struct iwl_rx_phy_res *rx_resp) 1441{ 1442 /* data from PHY/DSP regarding signal strength, etc., 1443 * contents are always there, not configurable by host 1444 */ 1445 struct iwl5000_non_cfg_phy *ncphy = 1446 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf; 1447 u32 val, rssi_a, rssi_b, rssi_c, max_rssi; 1448 u8 agc; 1449 1450 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]); 1451 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS; 1452 1453 /* Find max rssi among 3 possible receivers. 1454 * These values are measured by the digital signal processor (DSP). 1455 * They should stay fairly constant even as the signal strength varies, 1456 * if the radio's automatic gain control (AGC) is working right. 1457 * AGC value (see below) will provide the "interesting" info. 1458 */ 1459 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]); 1460 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS; 1461 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS; 1462 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]); 1463 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS; 1464 1465 max_rssi = max_t(u32, rssi_a, rssi_b); 1466 max_rssi = max_t(u32, max_rssi, rssi_c); 1467 1468 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", 1469 rssi_a, rssi_b, rssi_c, max_rssi, agc); 1470 1471 /* dBm = max_rssi dB - agc dB - constant. 1472 * Higher AGC (higher radio gain) means lower signal. */ 1473 return max_rssi - agc - IWL49_RSSI_OFFSET; 1474} 1475 1476static struct iwl_hcmd_ops iwl5000_hcmd = { 1477 .rxon_assoc = iwl5000_send_rxon_assoc, 1478}; 1479 1480static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = { 1481 .get_hcmd_size = iwl5000_get_hcmd_size, 1482 .build_addsta_hcmd = iwl5000_build_addsta_hcmd, 1483 .gain_computation = iwl5000_gain_computation, 1484 .chain_noise_reset = iwl5000_chain_noise_reset, 1485 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag, 1486 .calc_rssi = iwl5000_calc_rssi, 1487}; 1488 1489static struct iwl_lib_ops iwl5000_lib = { 1490 .set_hw_params = iwl5000_hw_set_hw_params, 1491 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 1492 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 1493 .txq_set_sched = iwl5000_txq_set_sched, 1494 .txq_agg_enable = iwl5000_txq_agg_enable, 1495 .txq_agg_disable = iwl5000_txq_agg_disable, 1496 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 1497 .txq_free_tfd = iwl_hw_txq_free_tfd, 1498 .txq_init = iwl_hw_tx_queue_init, 1499 .rx_handler_setup = iwl5000_rx_handler_setup, 1500 .setup_deferred_work = iwl5000_setup_deferred_work, 1501 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 1502 .load_ucode = iwl5000_load_ucode, 1503 .init_alive_start = iwl5000_init_alive_start, 1504 .alive_notify = iwl5000_alive_notify, 1505 .send_tx_power = iwl5000_send_tx_power, 1506 .temperature = iwl5000_temperature, 1507 .update_chain_flags = iwl_update_chain_flags, 1508 .apm_ops = { 1509 .init = iwl5000_apm_init, 1510 .reset = iwl5000_apm_reset, 1511 .stop = iwl5000_apm_stop, 1512 .config = iwl5000_nic_config, 1513 .set_pwr_src = iwl_set_pwr_src, 1514 }, 1515 .eeprom_ops = { 1516 .regulatory_bands = { 1517 EEPROM_5000_REG_BAND_1_CHANNELS, 1518 EEPROM_5000_REG_BAND_2_CHANNELS, 1519 EEPROM_5000_REG_BAND_3_CHANNELS, 1520 EEPROM_5000_REG_BAND_4_CHANNELS, 1521 EEPROM_5000_REG_BAND_5_CHANNELS, 1522 EEPROM_5000_REG_BAND_24_FAT_CHANNELS, 1523 EEPROM_5000_REG_BAND_52_FAT_CHANNELS 1524 }, 1525 .verify_signature = iwlcore_eeprom_verify_signature, 1526 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 1527 .release_semaphore = iwlcore_eeprom_release_semaphore, 1528 .calib_version = iwl5000_eeprom_calib_version, 1529 .query_addr = iwl5000_eeprom_query_addr, 1530 }, 1531}; 1532 1533struct iwl_ops iwl5000_ops = { 1534 .lib = &iwl5000_lib, 1535 .hcmd = &iwl5000_hcmd, 1536 .utils = &iwl5000_hcmd_utils, 1537}; 1538 1539struct iwl_mod_params iwl50_mod_params = { 1540 .num_of_queues = IWL50_NUM_QUEUES, 1541 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 1542 .amsdu_size_8K = 1, 1543 .restart_fw = 1, 1544 /* the rest are 0 by default */ 1545}; 1546 1547 1548struct iwl_cfg iwl5300_agn_cfg = { 1549 .name = "5300AGN", 1550 .fw_name_pre = IWL5000_FW_PRE, 1551 .ucode_api_max = IWL5000_UCODE_API_MAX, 1552 .ucode_api_min = IWL5000_UCODE_API_MIN, 1553 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 1554 .ops = &iwl5000_ops, 1555 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1556 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1557 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1558 .mod_params = &iwl50_mod_params, 1559}; 1560 1561struct iwl_cfg iwl5100_bg_cfg = { 1562 .name = "5100BG", 1563 .fw_name_pre = IWL5000_FW_PRE, 1564 .ucode_api_max = IWL5000_UCODE_API_MAX, 1565 .ucode_api_min = IWL5000_UCODE_API_MIN, 1566 .sku = IWL_SKU_G, 1567 .ops = &iwl5000_ops, 1568 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1569 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1570 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1571 .mod_params = &iwl50_mod_params, 1572}; 1573 1574struct iwl_cfg iwl5100_abg_cfg = { 1575 .name = "5100ABG", 1576 .fw_name_pre = IWL5000_FW_PRE, 1577 .ucode_api_max = IWL5000_UCODE_API_MAX, 1578 .ucode_api_min = IWL5000_UCODE_API_MIN, 1579 .sku = IWL_SKU_A|IWL_SKU_G, 1580 .ops = &iwl5000_ops, 1581 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1582 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1583 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1584 .mod_params = &iwl50_mod_params, 1585}; 1586 1587struct iwl_cfg iwl5100_agn_cfg = { 1588 .name = "5100AGN", 1589 .fw_name_pre = IWL5000_FW_PRE, 1590 .ucode_api_max = IWL5000_UCODE_API_MAX, 1591 .ucode_api_min = IWL5000_UCODE_API_MIN, 1592 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 1593 .ops = &iwl5000_ops, 1594 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1595 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1596 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1597 .mod_params = &iwl50_mod_params, 1598}; 1599 1600struct iwl_cfg iwl5350_agn_cfg = { 1601 .name = "5350AGN", 1602 .fw_name_pre = IWL5000_FW_PRE, 1603 .ucode_api_max = IWL5000_UCODE_API_MAX, 1604 .ucode_api_min = IWL5000_UCODE_API_MIN, 1605 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 1606 .ops = &iwl5000_ops, 1607 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1608 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1609 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1610 .mod_params = &iwl50_mod_params, 1611}; 1612 1613struct iwl_cfg iwl5150_agn_cfg = { 1614 .name = "5150AGN", 1615 .fw_name_pre = IWL5150_FW_PRE, 1616 .ucode_api_max = IWL5150_UCODE_API_MAX, 1617 .ucode_api_min = IWL5150_UCODE_API_MIN, 1618 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 1619 .ops = &iwl5000_ops, 1620 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1621 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1622 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1623 .mod_params = &iwl50_mod_params, 1624}; 1625 1626MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 1627MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); 1628 1629module_param_named(disable50, iwl50_mod_params.disable, int, 0444); 1630MODULE_PARM_DESC(disable50, 1631 "manually disable the 50XX radio (default 0 [radio on])"); 1632module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444); 1633MODULE_PARM_DESC(swcrypto50, 1634 "using software crypto engine (default 0 [hardware])\n"); 1635module_param_named(debug50, iwl50_mod_params.debug, uint, 0444); 1636MODULE_PARM_DESC(debug50, "50XX debug output mask"); 1637module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444); 1638MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series"); 1639module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, 0444); 1640MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality"); 1641module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444); 1642MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series"); 1643module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, 0444); 1644MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error"); 1645