ipw2200.c revision a2bfbc072e279ff81e6b336acff612b9bc2e5281
1/****************************************************************************** 2 3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. 4 5 802.11 status code portion of this file from ethereal-0.10.6: 6 Copyright 2000, Axis Communications AB 7 Ethereal - Network traffic analyzer 8 By Gerald Combs <gerald@ethereal.com> 9 Copyright 1998 Gerald Combs 10 11 This program is free software; you can redistribute it and/or modify it 12 under the terms of version 2 of the GNU General Public License as 13 published by the Free Software Foundation. 14 15 This program is distributed in the hope that it will be useful, but WITHOUT 16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 more details. 19 20 You should have received a copy of the GNU General Public License along with 21 this program; if not, write to the Free Software Foundation, Inc., 59 22 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 24 The full GNU General Public License is included in this distribution in the 25 file called LICENSE. 26 27 Contact Information: 28 Intel Linux Wireless <ilw@linux.intel.com> 29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 30 31******************************************************************************/ 32 33#include "ipw2200.h" 34 35 36#ifndef KBUILD_EXTMOD 37#define VK "k" 38#else 39#define VK 40#endif 41 42#ifdef CONFIG_IPW2200_DEBUG 43#define VD "d" 44#else 45#define VD 46#endif 47 48#ifdef CONFIG_IPW2200_MONITOR 49#define VM "m" 50#else 51#define VM 52#endif 53 54#ifdef CONFIG_IPW2200_PROMISCUOUS 55#define VP "p" 56#else 57#define VP 58#endif 59 60#ifdef CONFIG_IPW2200_RADIOTAP 61#define VR "r" 62#else 63#define VR 64#endif 65 66#ifdef CONFIG_IPW2200_QOS 67#define VQ "q" 68#else 69#define VQ 70#endif 71 72#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ 73#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" 74#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 75#define DRV_VERSION IPW2200_VERSION 76 77#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) 78 79MODULE_DESCRIPTION(DRV_DESCRIPTION); 80MODULE_VERSION(DRV_VERSION); 81MODULE_AUTHOR(DRV_COPYRIGHT); 82MODULE_LICENSE("GPL"); 83 84static int cmdlog = 0; 85static int debug = 0; 86static int default_channel = 0; 87static int network_mode = 0; 88 89static u32 ipw_debug_level; 90static int associate; 91static int auto_create = 1; 92static int led_support = 0; 93static int disable = 0; 94static int bt_coexist = 0; 95static int hwcrypto = 0; 96static int roaming = 1; 97static const char ipw_modes[] = { 98 'a', 'b', 'g', '?' 99}; 100static int antenna = CFG_SYS_ANTENNA_BOTH; 101 102#ifdef CONFIG_IPW2200_PROMISCUOUS 103static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ 104#endif 105 106 107#ifdef CONFIG_IPW2200_QOS 108static int qos_enable = 0; 109static int qos_burst_enable = 0; 110static int qos_no_ack_mask = 0; 111static int burst_duration_CCK = 0; 112static int burst_duration_OFDM = 0; 113 114static struct libipw_qos_parameters def_qos_parameters_OFDM = { 115 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM, 116 QOS_TX3_CW_MIN_OFDM}, 117 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM, 118 QOS_TX3_CW_MAX_OFDM}, 119 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, 120 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, 121 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM, 122 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM} 123}; 124 125static struct libipw_qos_parameters def_qos_parameters_CCK = { 126 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK, 127 QOS_TX3_CW_MIN_CCK}, 128 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK, 129 QOS_TX3_CW_MAX_CCK}, 130 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, 131 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, 132 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK, 133 QOS_TX3_TXOP_LIMIT_CCK} 134}; 135 136static struct libipw_qos_parameters def_parameters_OFDM = { 137 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM, 138 DEF_TX3_CW_MIN_OFDM}, 139 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM, 140 DEF_TX3_CW_MAX_OFDM}, 141 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, 142 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, 143 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM, 144 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM} 145}; 146 147static struct libipw_qos_parameters def_parameters_CCK = { 148 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK, 149 DEF_TX3_CW_MIN_CCK}, 150 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK, 151 DEF_TX3_CW_MAX_CCK}, 152 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, 153 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, 154 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK, 155 DEF_TX3_TXOP_LIMIT_CCK} 156}; 157 158static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; 159 160static int from_priority_to_tx_queue[] = { 161 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1, 162 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4 163}; 164 165static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv); 166 167static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters 168 *qos_param); 169static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element 170 *qos_param); 171#endif /* CONFIG_IPW2200_QOS */ 172 173static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev); 174static void ipw_remove_current_network(struct ipw_priv *priv); 175static void ipw_rx(struct ipw_priv *priv); 176static int ipw_queue_tx_reclaim(struct ipw_priv *priv, 177 struct clx2_tx_queue *txq, int qindex); 178static int ipw_queue_reset(struct ipw_priv *priv); 179 180static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, 181 int len, int sync); 182 183static void ipw_tx_queue_free(struct ipw_priv *); 184 185static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); 186static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); 187static void ipw_rx_queue_replenish(void *); 188static int ipw_up(struct ipw_priv *); 189static void ipw_bg_up(struct work_struct *work); 190static void ipw_down(struct ipw_priv *); 191static void ipw_bg_down(struct work_struct *work); 192static int ipw_config(struct ipw_priv *); 193static int init_supported_rates(struct ipw_priv *priv, 194 struct ipw_supported_rates *prates); 195static void ipw_set_hwcrypto_keys(struct ipw_priv *); 196static void ipw_send_wep_keys(struct ipw_priv *, int); 197 198static int snprint_line(char *buf, size_t count, 199 const u8 * data, u32 len, u32 ofs) 200{ 201 int out, i, j, l; 202 char c; 203 204 out = snprintf(buf, count, "%08X", ofs); 205 206 for (l = 0, i = 0; i < 2; i++) { 207 out += snprintf(buf + out, count - out, " "); 208 for (j = 0; j < 8 && l < len; j++, l++) 209 out += snprintf(buf + out, count - out, "%02X ", 210 data[(i * 8 + j)]); 211 for (; j < 8; j++) 212 out += snprintf(buf + out, count - out, " "); 213 } 214 215 out += snprintf(buf + out, count - out, " "); 216 for (l = 0, i = 0; i < 2; i++) { 217 out += snprintf(buf + out, count - out, " "); 218 for (j = 0; j < 8 && l < len; j++, l++) { 219 c = data[(i * 8 + j)]; 220 if (!isascii(c) || !isprint(c)) 221 c = '.'; 222 223 out += snprintf(buf + out, count - out, "%c", c); 224 } 225 226 for (; j < 8; j++) 227 out += snprintf(buf + out, count - out, " "); 228 } 229 230 return out; 231} 232 233static void printk_buf(int level, const u8 * data, u32 len) 234{ 235 char line[81]; 236 u32 ofs = 0; 237 if (!(ipw_debug_level & level)) 238 return; 239 240 while (len) { 241 snprint_line(line, sizeof(line), &data[ofs], 242 min(len, 16U), ofs); 243 printk(KERN_DEBUG "%s\n", line); 244 ofs += 16; 245 len -= min(len, 16U); 246 } 247} 248 249static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len) 250{ 251 size_t out = size; 252 u32 ofs = 0; 253 int total = 0; 254 255 while (size && len) { 256 out = snprint_line(output, size, &data[ofs], 257 min_t(size_t, len, 16U), ofs); 258 259 ofs += 16; 260 output += out; 261 size -= out; 262 len -= min_t(size_t, len, 16U); 263 total += out; 264 } 265 return total; 266} 267 268/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ 269static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); 270#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) 271 272/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ 273static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); 274#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) 275 276/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 277static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); 278static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) 279{ 280 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, 281 __LINE__, (u32) (b), (u32) (c)); 282 _ipw_write_reg8(a, b, c); 283} 284 285/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 286static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); 287static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) 288{ 289 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, 290 __LINE__, (u32) (b), (u32) (c)); 291 _ipw_write_reg16(a, b, c); 292} 293 294/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 295static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); 296static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) 297{ 298 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, 299 __LINE__, (u32) (b), (u32) (c)); 300 _ipw_write_reg32(a, b, c); 301} 302 303/* 8-bit direct write (low 4K) */ 304static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs, 305 u8 val) 306{ 307 writeb(val, ipw->hw_base + ofs); 308} 309 310/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 311#define ipw_write8(ipw, ofs, val) do { \ 312 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \ 313 __LINE__, (u32)(ofs), (u32)(val)); \ 314 _ipw_write8(ipw, ofs, val); \ 315} while (0) 316 317/* 16-bit direct write (low 4K) */ 318static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs, 319 u16 val) 320{ 321 writew(val, ipw->hw_base + ofs); 322} 323 324/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 325#define ipw_write16(ipw, ofs, val) do { \ 326 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \ 327 __LINE__, (u32)(ofs), (u32)(val)); \ 328 _ipw_write16(ipw, ofs, val); \ 329} while (0) 330 331/* 32-bit direct write (low 4K) */ 332static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs, 333 u32 val) 334{ 335 writel(val, ipw->hw_base + ofs); 336} 337 338/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 339#define ipw_write32(ipw, ofs, val) do { \ 340 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \ 341 __LINE__, (u32)(ofs), (u32)(val)); \ 342 _ipw_write32(ipw, ofs, val); \ 343} while (0) 344 345/* 8-bit direct read (low 4K) */ 346static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs) 347{ 348 return readb(ipw->hw_base + ofs); 349} 350 351/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 352#define ipw_read8(ipw, ofs) ({ \ 353 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \ 354 (u32)(ofs)); \ 355 _ipw_read8(ipw, ofs); \ 356}) 357 358/* 16-bit direct read (low 4K) */ 359static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs) 360{ 361 return readw(ipw->hw_base + ofs); 362} 363 364/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 365#define ipw_read16(ipw, ofs) ({ \ 366 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \ 367 (u32)(ofs)); \ 368 _ipw_read16(ipw, ofs); \ 369}) 370 371/* 32-bit direct read (low 4K) */ 372static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs) 373{ 374 return readl(ipw->hw_base + ofs); 375} 376 377/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 378#define ipw_read32(ipw, ofs) ({ \ 379 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \ 380 (u32)(ofs)); \ 381 _ipw_read32(ipw, ofs); \ 382}) 383 384static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); 385/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ 386#define ipw_read_indirect(a, b, c, d) ({ \ 387 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \ 388 __LINE__, (u32)(b), (u32)(d)); \ 389 _ipw_read_indirect(a, b, c, d); \ 390}) 391 392/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ 393static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, 394 int num); 395#define ipw_write_indirect(a, b, c, d) do { \ 396 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \ 397 __LINE__, (u32)(b), (u32)(d)); \ 398 _ipw_write_indirect(a, b, c, d); \ 399} while (0) 400 401/* 32-bit indirect write (above 4K) */ 402static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) 403{ 404 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); 405 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 406 _ipw_write32(priv, IPW_INDIRECT_DATA, value); 407} 408 409/* 8-bit indirect write (above 4K) */ 410static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) 411{ 412 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ 413 u32 dif_len = reg - aligned_addr; 414 415 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 416 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 417 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value); 418} 419 420/* 16-bit indirect write (above 4K) */ 421static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) 422{ 423 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ 424 u32 dif_len = (reg - aligned_addr) & (~0x1ul); 425 426 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 427 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 428 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value); 429} 430 431/* 8-bit indirect read (above 4K) */ 432static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) 433{ 434 u32 word; 435 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); 436 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg); 437 word = _ipw_read32(priv, IPW_INDIRECT_DATA); 438 return (word >> ((reg & 0x3) * 8)) & 0xff; 439} 440 441/* 32-bit indirect read (above 4K) */ 442static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) 443{ 444 u32 value; 445 446 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg); 447 448 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 449 value = _ipw_read32(priv, IPW_INDIRECT_DATA); 450 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value); 451 return value; 452} 453 454/* General purpose, no alignment requirement, iterative (multi-byte) read, */ 455/* for area above 1st 4K of SRAM/reg space */ 456static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 457 int num) 458{ 459 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ 460 u32 dif_len = addr - aligned_addr; 461 u32 i; 462 463 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); 464 465 if (num <= 0) { 466 return; 467 } 468 469 /* Read the first dword (or portion) byte by byte */ 470 if (unlikely(dif_len)) { 471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 472 /* Start reading at aligned_addr + dif_len */ 473 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--) 474 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i); 475 aligned_addr += 4; 476 } 477 478 /* Read all of the middle dwords as dwords, with auto-increment */ 479 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 480 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 481 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); 482 483 /* Read the last dword (or portion) byte by byte */ 484 if (unlikely(num)) { 485 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 486 for (i = 0; num > 0; i++, num--) 487 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i); 488 } 489} 490 491/* General purpose, no alignment requirement, iterative (multi-byte) write, */ 492/* for area above 1st 4K of SRAM/reg space */ 493static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 494 int num) 495{ 496 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ 497 u32 dif_len = addr - aligned_addr; 498 u32 i; 499 500 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); 501 502 if (num <= 0) { 503 return; 504 } 505 506 /* Write the first dword (or portion) byte by byte */ 507 if (unlikely(dif_len)) { 508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 509 /* Start writing at aligned_addr + dif_len */ 510 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) 511 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 512 aligned_addr += 4; 513 } 514 515 /* Write all of the middle dwords as dwords, with auto-increment */ 516 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 517 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 518 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); 519 520 /* Write the last dword (or portion) byte by byte */ 521 if (unlikely(num)) { 522 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 523 for (i = 0; num > 0; i++, num--, buf++) 524 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 525 } 526} 527 528/* General purpose, no alignment requirement, iterative (multi-byte) write, */ 529/* for 1st 4K of SRAM/regs space */ 530static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, 531 int num) 532{ 533 memcpy_toio((priv->hw_base + addr), buf, num); 534} 535 536/* Set bit(s) in low 4K of SRAM/regs */ 537static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) 538{ 539 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); 540} 541 542/* Clear bit(s) in low 4K of SRAM/regs */ 543static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) 544{ 545 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); 546} 547 548static inline void __ipw_enable_interrupts(struct ipw_priv *priv) 549{ 550 if (priv->status & STATUS_INT_ENABLED) 551 return; 552 priv->status |= STATUS_INT_ENABLED; 553 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL); 554} 555 556static inline void __ipw_disable_interrupts(struct ipw_priv *priv) 557{ 558 if (!(priv->status & STATUS_INT_ENABLED)) 559 return; 560 priv->status &= ~STATUS_INT_ENABLED; 561 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 562} 563 564static inline void ipw_enable_interrupts(struct ipw_priv *priv) 565{ 566 unsigned long flags; 567 568 spin_lock_irqsave(&priv->irq_lock, flags); 569 __ipw_enable_interrupts(priv); 570 spin_unlock_irqrestore(&priv->irq_lock, flags); 571} 572 573static inline void ipw_disable_interrupts(struct ipw_priv *priv) 574{ 575 unsigned long flags; 576 577 spin_lock_irqsave(&priv->irq_lock, flags); 578 __ipw_disable_interrupts(priv); 579 spin_unlock_irqrestore(&priv->irq_lock, flags); 580} 581 582static char *ipw_error_desc(u32 val) 583{ 584 switch (val) { 585 case IPW_FW_ERROR_OK: 586 return "ERROR_OK"; 587 case IPW_FW_ERROR_FAIL: 588 return "ERROR_FAIL"; 589 case IPW_FW_ERROR_MEMORY_UNDERFLOW: 590 return "MEMORY_UNDERFLOW"; 591 case IPW_FW_ERROR_MEMORY_OVERFLOW: 592 return "MEMORY_OVERFLOW"; 593 case IPW_FW_ERROR_BAD_PARAM: 594 return "BAD_PARAM"; 595 case IPW_FW_ERROR_BAD_CHECKSUM: 596 return "BAD_CHECKSUM"; 597 case IPW_FW_ERROR_NMI_INTERRUPT: 598 return "NMI_INTERRUPT"; 599 case IPW_FW_ERROR_BAD_DATABASE: 600 return "BAD_DATABASE"; 601 case IPW_FW_ERROR_ALLOC_FAIL: 602 return "ALLOC_FAIL"; 603 case IPW_FW_ERROR_DMA_UNDERRUN: 604 return "DMA_UNDERRUN"; 605 case IPW_FW_ERROR_DMA_STATUS: 606 return "DMA_STATUS"; 607 case IPW_FW_ERROR_DINO_ERROR: 608 return "DINO_ERROR"; 609 case IPW_FW_ERROR_EEPROM_ERROR: 610 return "EEPROM_ERROR"; 611 case IPW_FW_ERROR_SYSASSERT: 612 return "SYSASSERT"; 613 case IPW_FW_ERROR_FATAL_ERROR: 614 return "FATAL_ERROR"; 615 default: 616 return "UNKNOWN_ERROR"; 617 } 618} 619 620static void ipw_dump_error_log(struct ipw_priv *priv, 621 struct ipw_fw_error *error) 622{ 623 u32 i; 624 625 if (!error) { 626 IPW_ERROR("Error allocating and capturing error log. " 627 "Nothing to dump.\n"); 628 return; 629 } 630 631 IPW_ERROR("Start IPW Error Log Dump:\n"); 632 IPW_ERROR("Status: 0x%08X, Config: %08X\n", 633 error->status, error->config); 634 635 for (i = 0; i < error->elem_len; i++) 636 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 637 ipw_error_desc(error->elem[i].desc), 638 error->elem[i].time, 639 error->elem[i].blink1, 640 error->elem[i].blink2, 641 error->elem[i].link1, 642 error->elem[i].link2, error->elem[i].data); 643 for (i = 0; i < error->log_len; i++) 644 IPW_ERROR("%i\t0x%08x\t%i\n", 645 error->log[i].time, 646 error->log[i].data, error->log[i].event); 647} 648 649static inline int ipw_is_init(struct ipw_priv *priv) 650{ 651 return (priv->status & STATUS_INIT) ? 1 : 0; 652} 653 654static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len) 655{ 656 u32 addr, field_info, field_len, field_count, total_len; 657 658 IPW_DEBUG_ORD("ordinal = %i\n", ord); 659 660 if (!priv || !val || !len) { 661 IPW_DEBUG_ORD("Invalid argument\n"); 662 return -EINVAL; 663 } 664 665 /* verify device ordinal tables have been initialized */ 666 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) { 667 IPW_DEBUG_ORD("Access ordinals before initialization\n"); 668 return -EINVAL; 669 } 670 671 switch (IPW_ORD_TABLE_ID_MASK & ord) { 672 case IPW_ORD_TABLE_0_MASK: 673 /* 674 * TABLE 0: Direct access to a table of 32 bit values 675 * 676 * This is a very simple table with the data directly 677 * read from the table 678 */ 679 680 /* remove the table id from the ordinal */ 681 ord &= IPW_ORD_TABLE_VALUE_MASK; 682 683 /* boundary check */ 684 if (ord > priv->table0_len) { 685 IPW_DEBUG_ORD("ordinal value (%i) longer then " 686 "max (%i)\n", ord, priv->table0_len); 687 return -EINVAL; 688 } 689 690 /* verify we have enough room to store the value */ 691 if (*len < sizeof(u32)) { 692 IPW_DEBUG_ORD("ordinal buffer length too small, " 693 "need %zd\n", sizeof(u32)); 694 return -EINVAL; 695 } 696 697 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n", 698 ord, priv->table0_addr + (ord << 2)); 699 700 *len = sizeof(u32); 701 ord <<= 2; 702 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord); 703 break; 704 705 case IPW_ORD_TABLE_1_MASK: 706 /* 707 * TABLE 1: Indirect access to a table of 32 bit values 708 * 709 * This is a fairly large table of u32 values each 710 * representing starting addr for the data (which is 711 * also a u32) 712 */ 713 714 /* remove the table id from the ordinal */ 715 ord &= IPW_ORD_TABLE_VALUE_MASK; 716 717 /* boundary check */ 718 if (ord > priv->table1_len) { 719 IPW_DEBUG_ORD("ordinal value too long\n"); 720 return -EINVAL; 721 } 722 723 /* verify we have enough room to store the value */ 724 if (*len < sizeof(u32)) { 725 IPW_DEBUG_ORD("ordinal buffer length too small, " 726 "need %zd\n", sizeof(u32)); 727 return -EINVAL; 728 } 729 730 *((u32 *) val) = 731 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2))); 732 *len = sizeof(u32); 733 break; 734 735 case IPW_ORD_TABLE_2_MASK: 736 /* 737 * TABLE 2: Indirect access to a table of variable sized values 738 * 739 * This table consist of six values, each containing 740 * - dword containing the starting offset of the data 741 * - dword containing the lengh in the first 16bits 742 * and the count in the second 16bits 743 */ 744 745 /* remove the table id from the ordinal */ 746 ord &= IPW_ORD_TABLE_VALUE_MASK; 747 748 /* boundary check */ 749 if (ord > priv->table2_len) { 750 IPW_DEBUG_ORD("ordinal value too long\n"); 751 return -EINVAL; 752 } 753 754 /* get the address of statistic */ 755 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3)); 756 757 /* get the second DW of statistics ; 758 * two 16-bit words - first is length, second is count */ 759 field_info = 760 ipw_read_reg32(priv, 761 priv->table2_addr + (ord << 3) + 762 sizeof(u32)); 763 764 /* get each entry length */ 765 field_len = *((u16 *) & field_info); 766 767 /* get number of entries */ 768 field_count = *(((u16 *) & field_info) + 1); 769 770 /* abort if not enought memory */ 771 total_len = field_len * field_count; 772 if (total_len > *len) { 773 *len = total_len; 774 return -EINVAL; 775 } 776 777 *len = total_len; 778 if (!total_len) 779 return 0; 780 781 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, " 782 "field_info = 0x%08x\n", 783 addr, total_len, field_info); 784 ipw_read_indirect(priv, addr, val, total_len); 785 break; 786 787 default: 788 IPW_DEBUG_ORD("Invalid ordinal!\n"); 789 return -EINVAL; 790 791 } 792 793 return 0; 794} 795 796static void ipw_init_ordinals(struct ipw_priv *priv) 797{ 798 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER; 799 priv->table0_len = ipw_read32(priv, priv->table0_addr); 800 801 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n", 802 priv->table0_addr, priv->table0_len); 803 804 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1); 805 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr); 806 807 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n", 808 priv->table1_addr, priv->table1_len); 809 810 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2); 811 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr); 812 priv->table2_len &= 0x0000ffff; /* use first two bytes */ 813 814 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n", 815 priv->table2_addr, priv->table2_len); 816 817} 818 819static u32 ipw_register_toggle(u32 reg) 820{ 821 reg &= ~IPW_START_STANDBY; 822 if (reg & IPW_GATE_ODMA) 823 reg &= ~IPW_GATE_ODMA; 824 if (reg & IPW_GATE_IDMA) 825 reg &= ~IPW_GATE_IDMA; 826 if (reg & IPW_GATE_ADMA) 827 reg &= ~IPW_GATE_ADMA; 828 return reg; 829} 830 831/* 832 * LED behavior: 833 * - On radio ON, turn on any LEDs that require to be on during start 834 * - On initialization, start unassociated blink 835 * - On association, disable unassociated blink 836 * - On disassociation, start unassociated blink 837 * - On radio OFF, turn off any LEDs started during radio on 838 * 839 */ 840#define LD_TIME_LINK_ON msecs_to_jiffies(300) 841#define LD_TIME_LINK_OFF msecs_to_jiffies(2700) 842#define LD_TIME_ACT_ON msecs_to_jiffies(250) 843 844static void ipw_led_link_on(struct ipw_priv *priv) 845{ 846 unsigned long flags; 847 u32 led; 848 849 /* If configured to not use LEDs, or nic_type is 1, 850 * then we don't toggle a LINK led */ 851 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) 852 return; 853 854 spin_lock_irqsave(&priv->lock, flags); 855 856 if (!(priv->status & STATUS_RF_KILL_MASK) && 857 !(priv->status & STATUS_LED_LINK_ON)) { 858 IPW_DEBUG_LED("Link LED On\n"); 859 led = ipw_read_reg32(priv, IPW_EVENT_REG); 860 led |= priv->led_association_on; 861 862 led = ipw_register_toggle(led); 863 864 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 865 ipw_write_reg32(priv, IPW_EVENT_REG, led); 866 867 priv->status |= STATUS_LED_LINK_ON; 868 869 /* If we aren't associated, schedule turning the LED off */ 870 if (!(priv->status & STATUS_ASSOCIATED)) 871 queue_delayed_work(priv->workqueue, 872 &priv->led_link_off, 873 LD_TIME_LINK_ON); 874 } 875 876 spin_unlock_irqrestore(&priv->lock, flags); 877} 878 879static void ipw_bg_led_link_on(struct work_struct *work) 880{ 881 struct ipw_priv *priv = 882 container_of(work, struct ipw_priv, led_link_on.work); 883 mutex_lock(&priv->mutex); 884 ipw_led_link_on(priv); 885 mutex_unlock(&priv->mutex); 886} 887 888static void ipw_led_link_off(struct ipw_priv *priv) 889{ 890 unsigned long flags; 891 u32 led; 892 893 /* If configured not to use LEDs, or nic type is 1, 894 * then we don't goggle the LINK led. */ 895 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) 896 return; 897 898 spin_lock_irqsave(&priv->lock, flags); 899 900 if (priv->status & STATUS_LED_LINK_ON) { 901 led = ipw_read_reg32(priv, IPW_EVENT_REG); 902 led &= priv->led_association_off; 903 led = ipw_register_toggle(led); 904 905 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 906 ipw_write_reg32(priv, IPW_EVENT_REG, led); 907 908 IPW_DEBUG_LED("Link LED Off\n"); 909 910 priv->status &= ~STATUS_LED_LINK_ON; 911 912 /* If we aren't associated and the radio is on, schedule 913 * turning the LED on (blink while unassociated) */ 914 if (!(priv->status & STATUS_RF_KILL_MASK) && 915 !(priv->status & STATUS_ASSOCIATED)) 916 queue_delayed_work(priv->workqueue, &priv->led_link_on, 917 LD_TIME_LINK_OFF); 918 919 } 920 921 spin_unlock_irqrestore(&priv->lock, flags); 922} 923 924static void ipw_bg_led_link_off(struct work_struct *work) 925{ 926 struct ipw_priv *priv = 927 container_of(work, struct ipw_priv, led_link_off.work); 928 mutex_lock(&priv->mutex); 929 ipw_led_link_off(priv); 930 mutex_unlock(&priv->mutex); 931} 932 933static void __ipw_led_activity_on(struct ipw_priv *priv) 934{ 935 u32 led; 936 937 if (priv->config & CFG_NO_LED) 938 return; 939 940 if (priv->status & STATUS_RF_KILL_MASK) 941 return; 942 943 if (!(priv->status & STATUS_LED_ACT_ON)) { 944 led = ipw_read_reg32(priv, IPW_EVENT_REG); 945 led |= priv->led_activity_on; 946 947 led = ipw_register_toggle(led); 948 949 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 950 ipw_write_reg32(priv, IPW_EVENT_REG, led); 951 952 IPW_DEBUG_LED("Activity LED On\n"); 953 954 priv->status |= STATUS_LED_ACT_ON; 955 956 cancel_delayed_work(&priv->led_act_off); 957 queue_delayed_work(priv->workqueue, &priv->led_act_off, 958 LD_TIME_ACT_ON); 959 } else { 960 /* Reschedule LED off for full time period */ 961 cancel_delayed_work(&priv->led_act_off); 962 queue_delayed_work(priv->workqueue, &priv->led_act_off, 963 LD_TIME_ACT_ON); 964 } 965} 966 967#if 0 968void ipw_led_activity_on(struct ipw_priv *priv) 969{ 970 unsigned long flags; 971 spin_lock_irqsave(&priv->lock, flags); 972 __ipw_led_activity_on(priv); 973 spin_unlock_irqrestore(&priv->lock, flags); 974} 975#endif /* 0 */ 976 977static void ipw_led_activity_off(struct ipw_priv *priv) 978{ 979 unsigned long flags; 980 u32 led; 981 982 if (priv->config & CFG_NO_LED) 983 return; 984 985 spin_lock_irqsave(&priv->lock, flags); 986 987 if (priv->status & STATUS_LED_ACT_ON) { 988 led = ipw_read_reg32(priv, IPW_EVENT_REG); 989 led &= priv->led_activity_off; 990 991 led = ipw_register_toggle(led); 992 993 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 994 ipw_write_reg32(priv, IPW_EVENT_REG, led); 995 996 IPW_DEBUG_LED("Activity LED Off\n"); 997 998 priv->status &= ~STATUS_LED_ACT_ON; 999 } 1000 1001 spin_unlock_irqrestore(&priv->lock, flags); 1002} 1003 1004static void ipw_bg_led_activity_off(struct work_struct *work) 1005{ 1006 struct ipw_priv *priv = 1007 container_of(work, struct ipw_priv, led_act_off.work); 1008 mutex_lock(&priv->mutex); 1009 ipw_led_activity_off(priv); 1010 mutex_unlock(&priv->mutex); 1011} 1012 1013static void ipw_led_band_on(struct ipw_priv *priv) 1014{ 1015 unsigned long flags; 1016 u32 led; 1017 1018 /* Only nic type 1 supports mode LEDs */ 1019 if (priv->config & CFG_NO_LED || 1020 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network) 1021 return; 1022 1023 spin_lock_irqsave(&priv->lock, flags); 1024 1025 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1026 if (priv->assoc_network->mode == IEEE_A) { 1027 led |= priv->led_ofdm_on; 1028 led &= priv->led_association_off; 1029 IPW_DEBUG_LED("Mode LED On: 802.11a\n"); 1030 } else if (priv->assoc_network->mode == IEEE_G) { 1031 led |= priv->led_ofdm_on; 1032 led |= priv->led_association_on; 1033 IPW_DEBUG_LED("Mode LED On: 802.11g\n"); 1034 } else { 1035 led &= priv->led_ofdm_off; 1036 led |= priv->led_association_on; 1037 IPW_DEBUG_LED("Mode LED On: 802.11b\n"); 1038 } 1039 1040 led = ipw_register_toggle(led); 1041 1042 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1043 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1044 1045 spin_unlock_irqrestore(&priv->lock, flags); 1046} 1047 1048static void ipw_led_band_off(struct ipw_priv *priv) 1049{ 1050 unsigned long flags; 1051 u32 led; 1052 1053 /* Only nic type 1 supports mode LEDs */ 1054 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1) 1055 return; 1056 1057 spin_lock_irqsave(&priv->lock, flags); 1058 1059 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1060 led &= priv->led_ofdm_off; 1061 led &= priv->led_association_off; 1062 1063 led = ipw_register_toggle(led); 1064 1065 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1066 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1067 1068 spin_unlock_irqrestore(&priv->lock, flags); 1069} 1070 1071static void ipw_led_radio_on(struct ipw_priv *priv) 1072{ 1073 ipw_led_link_on(priv); 1074} 1075 1076static void ipw_led_radio_off(struct ipw_priv *priv) 1077{ 1078 ipw_led_activity_off(priv); 1079 ipw_led_link_off(priv); 1080} 1081 1082static void ipw_led_link_up(struct ipw_priv *priv) 1083{ 1084 /* Set the Link Led on for all nic types */ 1085 ipw_led_link_on(priv); 1086} 1087 1088static void ipw_led_link_down(struct ipw_priv *priv) 1089{ 1090 ipw_led_activity_off(priv); 1091 ipw_led_link_off(priv); 1092 1093 if (priv->status & STATUS_RF_KILL_MASK) 1094 ipw_led_radio_off(priv); 1095} 1096 1097static void ipw_led_init(struct ipw_priv *priv) 1098{ 1099 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; 1100 1101 /* Set the default PINs for the link and activity leds */ 1102 priv->led_activity_on = IPW_ACTIVITY_LED; 1103 priv->led_activity_off = ~(IPW_ACTIVITY_LED); 1104 1105 priv->led_association_on = IPW_ASSOCIATED_LED; 1106 priv->led_association_off = ~(IPW_ASSOCIATED_LED); 1107 1108 /* Set the default PINs for the OFDM leds */ 1109 priv->led_ofdm_on = IPW_OFDM_LED; 1110 priv->led_ofdm_off = ~(IPW_OFDM_LED); 1111 1112 switch (priv->nic_type) { 1113 case EEPROM_NIC_TYPE_1: 1114 /* In this NIC type, the LEDs are reversed.... */ 1115 priv->led_activity_on = IPW_ASSOCIATED_LED; 1116 priv->led_activity_off = ~(IPW_ASSOCIATED_LED); 1117 priv->led_association_on = IPW_ACTIVITY_LED; 1118 priv->led_association_off = ~(IPW_ACTIVITY_LED); 1119 1120 if (!(priv->config & CFG_NO_LED)) 1121 ipw_led_band_on(priv); 1122 1123 /* And we don't blink link LEDs for this nic, so 1124 * just return here */ 1125 return; 1126 1127 case EEPROM_NIC_TYPE_3: 1128 case EEPROM_NIC_TYPE_2: 1129 case EEPROM_NIC_TYPE_4: 1130 case EEPROM_NIC_TYPE_0: 1131 break; 1132 1133 default: 1134 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n", 1135 priv->nic_type); 1136 priv->nic_type = EEPROM_NIC_TYPE_0; 1137 break; 1138 } 1139 1140 if (!(priv->config & CFG_NO_LED)) { 1141 if (priv->status & STATUS_ASSOCIATED) 1142 ipw_led_link_on(priv); 1143 else 1144 ipw_led_link_off(priv); 1145 } 1146} 1147 1148static void ipw_led_shutdown(struct ipw_priv *priv) 1149{ 1150 ipw_led_activity_off(priv); 1151 ipw_led_link_off(priv); 1152 ipw_led_band_off(priv); 1153 cancel_delayed_work(&priv->led_link_on); 1154 cancel_delayed_work(&priv->led_link_off); 1155 cancel_delayed_work(&priv->led_act_off); 1156} 1157 1158/* 1159 * The following adds a new attribute to the sysfs representation 1160 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/) 1161 * used for controling the debug level. 1162 * 1163 * See the level definitions in ipw for details. 1164 */ 1165static ssize_t show_debug_level(struct device_driver *d, char *buf) 1166{ 1167 return sprintf(buf, "0x%08X\n", ipw_debug_level); 1168} 1169 1170static ssize_t store_debug_level(struct device_driver *d, const char *buf, 1171 size_t count) 1172{ 1173 char *p = (char *)buf; 1174 u32 val; 1175 1176 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 1177 p++; 1178 if (p[0] == 'x' || p[0] == 'X') 1179 p++; 1180 val = simple_strtoul(p, &p, 16); 1181 } else 1182 val = simple_strtoul(p, &p, 10); 1183 if (p == buf) 1184 printk(KERN_INFO DRV_NAME 1185 ": %s is not in hex or decimal form.\n", buf); 1186 else 1187 ipw_debug_level = val; 1188 1189 return strnlen(buf, count); 1190} 1191 1192static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, 1193 show_debug_level, store_debug_level); 1194 1195static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) 1196{ 1197 /* length = 1st dword in log */ 1198 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); 1199} 1200 1201static void ipw_capture_event_log(struct ipw_priv *priv, 1202 u32 log_len, struct ipw_event *log) 1203{ 1204 u32 base; 1205 1206 if (log_len) { 1207 base = ipw_read32(priv, IPW_EVENT_LOG); 1208 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32), 1209 (u8 *) log, sizeof(*log) * log_len); 1210 } 1211} 1212 1213static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv) 1214{ 1215 struct ipw_fw_error *error; 1216 u32 log_len = ipw_get_event_log_len(priv); 1217 u32 base = ipw_read32(priv, IPW_ERROR_LOG); 1218 u32 elem_len = ipw_read_reg32(priv, base); 1219 1220 error = kmalloc(sizeof(*error) + 1221 sizeof(*error->elem) * elem_len + 1222 sizeof(*error->log) * log_len, GFP_ATOMIC); 1223 if (!error) { 1224 IPW_ERROR("Memory allocation for firmware error log " 1225 "failed.\n"); 1226 return NULL; 1227 } 1228 error->jiffies = jiffies; 1229 error->status = priv->status; 1230 error->config = priv->config; 1231 error->elem_len = elem_len; 1232 error->log_len = log_len; 1233 error->elem = (struct ipw_error_elem *)error->payload; 1234 error->log = (struct ipw_event *)(error->elem + elem_len); 1235 1236 ipw_capture_event_log(priv, log_len, error->log); 1237 1238 if (elem_len) 1239 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem, 1240 sizeof(*error->elem) * elem_len); 1241 1242 return error; 1243} 1244 1245static ssize_t show_event_log(struct device *d, 1246 struct device_attribute *attr, char *buf) 1247{ 1248 struct ipw_priv *priv = dev_get_drvdata(d); 1249 u32 log_len = ipw_get_event_log_len(priv); 1250 u32 log_size; 1251 struct ipw_event *log; 1252 u32 len = 0, i; 1253 1254 /* not using min() because of its strict type checking */ 1255 log_size = PAGE_SIZE / sizeof(*log) > log_len ? 1256 sizeof(*log) * log_len : PAGE_SIZE; 1257 log = kzalloc(log_size, GFP_KERNEL); 1258 if (!log) { 1259 IPW_ERROR("Unable to allocate memory for log\n"); 1260 return 0; 1261 } 1262 log_len = log_size / sizeof(*log); 1263 ipw_capture_event_log(priv, log_len, log); 1264 1265 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len); 1266 for (i = 0; i < log_len; i++) 1267 len += snprintf(buf + len, PAGE_SIZE - len, 1268 "\n%08X%08X%08X", 1269 log[i].time, log[i].event, log[i].data); 1270 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1271 kfree(log); 1272 return len; 1273} 1274 1275static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL); 1276 1277static ssize_t show_error(struct device *d, 1278 struct device_attribute *attr, char *buf) 1279{ 1280 struct ipw_priv *priv = dev_get_drvdata(d); 1281 u32 len = 0, i; 1282 if (!priv->error) 1283 return 0; 1284 len += snprintf(buf + len, PAGE_SIZE - len, 1285 "%08lX%08X%08X%08X", 1286 priv->error->jiffies, 1287 priv->error->status, 1288 priv->error->config, priv->error->elem_len); 1289 for (i = 0; i < priv->error->elem_len; i++) 1290 len += snprintf(buf + len, PAGE_SIZE - len, 1291 "\n%08X%08X%08X%08X%08X%08X%08X", 1292 priv->error->elem[i].time, 1293 priv->error->elem[i].desc, 1294 priv->error->elem[i].blink1, 1295 priv->error->elem[i].blink2, 1296 priv->error->elem[i].link1, 1297 priv->error->elem[i].link2, 1298 priv->error->elem[i].data); 1299 1300 len += snprintf(buf + len, PAGE_SIZE - len, 1301 "\n%08X", priv->error->log_len); 1302 for (i = 0; i < priv->error->log_len; i++) 1303 len += snprintf(buf + len, PAGE_SIZE - len, 1304 "\n%08X%08X%08X", 1305 priv->error->log[i].time, 1306 priv->error->log[i].event, 1307 priv->error->log[i].data); 1308 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1309 return len; 1310} 1311 1312static ssize_t clear_error(struct device *d, 1313 struct device_attribute *attr, 1314 const char *buf, size_t count) 1315{ 1316 struct ipw_priv *priv = dev_get_drvdata(d); 1317 1318 kfree(priv->error); 1319 priv->error = NULL; 1320 return count; 1321} 1322 1323static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error); 1324 1325static ssize_t show_cmd_log(struct device *d, 1326 struct device_attribute *attr, char *buf) 1327{ 1328 struct ipw_priv *priv = dev_get_drvdata(d); 1329 u32 len = 0, i; 1330 if (!priv->cmdlog) 1331 return 0; 1332 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len; 1333 (i != priv->cmdlog_pos) && (PAGE_SIZE - len); 1334 i = (i + 1) % priv->cmdlog_len) { 1335 len += 1336 snprintf(buf + len, PAGE_SIZE - len, 1337 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies, 1338 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd, 1339 priv->cmdlog[i].cmd.len); 1340 len += 1341 snprintk_buf(buf + len, PAGE_SIZE - len, 1342 (u8 *) priv->cmdlog[i].cmd.param, 1343 priv->cmdlog[i].cmd.len); 1344 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1345 } 1346 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1347 return len; 1348} 1349 1350static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL); 1351 1352#ifdef CONFIG_IPW2200_PROMISCUOUS 1353static void ipw_prom_free(struct ipw_priv *priv); 1354static int ipw_prom_alloc(struct ipw_priv *priv); 1355static ssize_t store_rtap_iface(struct device *d, 1356 struct device_attribute *attr, 1357 const char *buf, size_t count) 1358{ 1359 struct ipw_priv *priv = dev_get_drvdata(d); 1360 int rc = 0; 1361 1362 if (count < 1) 1363 return -EINVAL; 1364 1365 switch (buf[0]) { 1366 case '0': 1367 if (!rtap_iface) 1368 return count; 1369 1370 if (netif_running(priv->prom_net_dev)) { 1371 IPW_WARNING("Interface is up. Cannot unregister.\n"); 1372 return count; 1373 } 1374 1375 ipw_prom_free(priv); 1376 rtap_iface = 0; 1377 break; 1378 1379 case '1': 1380 if (rtap_iface) 1381 return count; 1382 1383 rc = ipw_prom_alloc(priv); 1384 if (!rc) 1385 rtap_iface = 1; 1386 break; 1387 1388 default: 1389 return -EINVAL; 1390 } 1391 1392 if (rc) { 1393 IPW_ERROR("Failed to register promiscuous network " 1394 "device (error %d).\n", rc); 1395 } 1396 1397 return count; 1398} 1399 1400static ssize_t show_rtap_iface(struct device *d, 1401 struct device_attribute *attr, 1402 char *buf) 1403{ 1404 struct ipw_priv *priv = dev_get_drvdata(d); 1405 if (rtap_iface) 1406 return sprintf(buf, "%s", priv->prom_net_dev->name); 1407 else { 1408 buf[0] = '-'; 1409 buf[1] = '1'; 1410 buf[2] = '\0'; 1411 return 3; 1412 } 1413} 1414 1415static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface, 1416 store_rtap_iface); 1417 1418static ssize_t store_rtap_filter(struct device *d, 1419 struct device_attribute *attr, 1420 const char *buf, size_t count) 1421{ 1422 struct ipw_priv *priv = dev_get_drvdata(d); 1423 1424 if (!priv->prom_priv) { 1425 IPW_ERROR("Attempting to set filter without " 1426 "rtap_iface enabled.\n"); 1427 return -EPERM; 1428 } 1429 1430 priv->prom_priv->filter = simple_strtol(buf, NULL, 0); 1431 1432 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n", 1433 BIT_ARG16(priv->prom_priv->filter)); 1434 1435 return count; 1436} 1437 1438static ssize_t show_rtap_filter(struct device *d, 1439 struct device_attribute *attr, 1440 char *buf) 1441{ 1442 struct ipw_priv *priv = dev_get_drvdata(d); 1443 return sprintf(buf, "0x%04X", 1444 priv->prom_priv ? priv->prom_priv->filter : 0); 1445} 1446 1447static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter, 1448 store_rtap_filter); 1449#endif 1450 1451static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, 1452 char *buf) 1453{ 1454 struct ipw_priv *priv = dev_get_drvdata(d); 1455 return sprintf(buf, "%d\n", priv->ieee->scan_age); 1456} 1457 1458static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, 1459 const char *buf, size_t count) 1460{ 1461 struct ipw_priv *priv = dev_get_drvdata(d); 1462 struct net_device *dev = priv->net_dev; 1463 char buffer[] = "00000000"; 1464 unsigned long len = 1465 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; 1466 unsigned long val; 1467 char *p = buffer; 1468 1469 IPW_DEBUG_INFO("enter\n"); 1470 1471 strncpy(buffer, buf, len); 1472 buffer[len] = 0; 1473 1474 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 1475 p++; 1476 if (p[0] == 'x' || p[0] == 'X') 1477 p++; 1478 val = simple_strtoul(p, &p, 16); 1479 } else 1480 val = simple_strtoul(p, &p, 10); 1481 if (p == buffer) { 1482 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); 1483 } else { 1484 priv->ieee->scan_age = val; 1485 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age); 1486 } 1487 1488 IPW_DEBUG_INFO("exit\n"); 1489 return len; 1490} 1491 1492static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); 1493 1494static ssize_t show_led(struct device *d, struct device_attribute *attr, 1495 char *buf) 1496{ 1497 struct ipw_priv *priv = dev_get_drvdata(d); 1498 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1); 1499} 1500 1501static ssize_t store_led(struct device *d, struct device_attribute *attr, 1502 const char *buf, size_t count) 1503{ 1504 struct ipw_priv *priv = dev_get_drvdata(d); 1505 1506 IPW_DEBUG_INFO("enter\n"); 1507 1508 if (count == 0) 1509 return 0; 1510 1511 if (*buf == 0) { 1512 IPW_DEBUG_LED("Disabling LED control.\n"); 1513 priv->config |= CFG_NO_LED; 1514 ipw_led_shutdown(priv); 1515 } else { 1516 IPW_DEBUG_LED("Enabling LED control.\n"); 1517 priv->config &= ~CFG_NO_LED; 1518 ipw_led_init(priv); 1519 } 1520 1521 IPW_DEBUG_INFO("exit\n"); 1522 return count; 1523} 1524 1525static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led); 1526 1527static ssize_t show_status(struct device *d, 1528 struct device_attribute *attr, char *buf) 1529{ 1530 struct ipw_priv *p = dev_get_drvdata(d); 1531 return sprintf(buf, "0x%08x\n", (int)p->status); 1532} 1533 1534static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 1535 1536static ssize_t show_cfg(struct device *d, struct device_attribute *attr, 1537 char *buf) 1538{ 1539 struct ipw_priv *p = dev_get_drvdata(d); 1540 return sprintf(buf, "0x%08x\n", (int)p->config); 1541} 1542 1543static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); 1544 1545static ssize_t show_nic_type(struct device *d, 1546 struct device_attribute *attr, char *buf) 1547{ 1548 struct ipw_priv *priv = dev_get_drvdata(d); 1549 return sprintf(buf, "TYPE: %d\n", priv->nic_type); 1550} 1551 1552static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL); 1553 1554static ssize_t show_ucode_version(struct device *d, 1555 struct device_attribute *attr, char *buf) 1556{ 1557 u32 len = sizeof(u32), tmp = 0; 1558 struct ipw_priv *p = dev_get_drvdata(d); 1559 1560 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len)) 1561 return 0; 1562 1563 return sprintf(buf, "0x%08x\n", tmp); 1564} 1565 1566static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL); 1567 1568static ssize_t show_rtc(struct device *d, struct device_attribute *attr, 1569 char *buf) 1570{ 1571 u32 len = sizeof(u32), tmp = 0; 1572 struct ipw_priv *p = dev_get_drvdata(d); 1573 1574 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len)) 1575 return 0; 1576 1577 return sprintf(buf, "0x%08x\n", tmp); 1578} 1579 1580static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL); 1581 1582/* 1583 * Add a device attribute to view/control the delay between eeprom 1584 * operations. 1585 */ 1586static ssize_t show_eeprom_delay(struct device *d, 1587 struct device_attribute *attr, char *buf) 1588{ 1589 struct ipw_priv *p = dev_get_drvdata(d); 1590 int n = p->eeprom_delay; 1591 return sprintf(buf, "%i\n", n); 1592} 1593static ssize_t store_eeprom_delay(struct device *d, 1594 struct device_attribute *attr, 1595 const char *buf, size_t count) 1596{ 1597 struct ipw_priv *p = dev_get_drvdata(d); 1598 sscanf(buf, "%i", &p->eeprom_delay); 1599 return strnlen(buf, count); 1600} 1601 1602static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO, 1603 show_eeprom_delay, store_eeprom_delay); 1604 1605static ssize_t show_command_event_reg(struct device *d, 1606 struct device_attribute *attr, char *buf) 1607{ 1608 u32 reg = 0; 1609 struct ipw_priv *p = dev_get_drvdata(d); 1610 1611 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT); 1612 return sprintf(buf, "0x%08x\n", reg); 1613} 1614static ssize_t store_command_event_reg(struct device *d, 1615 struct device_attribute *attr, 1616 const char *buf, size_t count) 1617{ 1618 u32 reg; 1619 struct ipw_priv *p = dev_get_drvdata(d); 1620 1621 sscanf(buf, "%x", ®); 1622 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg); 1623 return strnlen(buf, count); 1624} 1625 1626static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO, 1627 show_command_event_reg, store_command_event_reg); 1628 1629static ssize_t show_mem_gpio_reg(struct device *d, 1630 struct device_attribute *attr, char *buf) 1631{ 1632 u32 reg = 0; 1633 struct ipw_priv *p = dev_get_drvdata(d); 1634 1635 reg = ipw_read_reg32(p, 0x301100); 1636 return sprintf(buf, "0x%08x\n", reg); 1637} 1638static ssize_t store_mem_gpio_reg(struct device *d, 1639 struct device_attribute *attr, 1640 const char *buf, size_t count) 1641{ 1642 u32 reg; 1643 struct ipw_priv *p = dev_get_drvdata(d); 1644 1645 sscanf(buf, "%x", ®); 1646 ipw_write_reg32(p, 0x301100, reg); 1647 return strnlen(buf, count); 1648} 1649 1650static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO, 1651 show_mem_gpio_reg, store_mem_gpio_reg); 1652 1653static ssize_t show_indirect_dword(struct device *d, 1654 struct device_attribute *attr, char *buf) 1655{ 1656 u32 reg = 0; 1657 struct ipw_priv *priv = dev_get_drvdata(d); 1658 1659 if (priv->status & STATUS_INDIRECT_DWORD) 1660 reg = ipw_read_reg32(priv, priv->indirect_dword); 1661 else 1662 reg = 0; 1663 1664 return sprintf(buf, "0x%08x\n", reg); 1665} 1666static ssize_t store_indirect_dword(struct device *d, 1667 struct device_attribute *attr, 1668 const char *buf, size_t count) 1669{ 1670 struct ipw_priv *priv = dev_get_drvdata(d); 1671 1672 sscanf(buf, "%x", &priv->indirect_dword); 1673 priv->status |= STATUS_INDIRECT_DWORD; 1674 return strnlen(buf, count); 1675} 1676 1677static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO, 1678 show_indirect_dword, store_indirect_dword); 1679 1680static ssize_t show_indirect_byte(struct device *d, 1681 struct device_attribute *attr, char *buf) 1682{ 1683 u8 reg = 0; 1684 struct ipw_priv *priv = dev_get_drvdata(d); 1685 1686 if (priv->status & STATUS_INDIRECT_BYTE) 1687 reg = ipw_read_reg8(priv, priv->indirect_byte); 1688 else 1689 reg = 0; 1690 1691 return sprintf(buf, "0x%02x\n", reg); 1692} 1693static ssize_t store_indirect_byte(struct device *d, 1694 struct device_attribute *attr, 1695 const char *buf, size_t count) 1696{ 1697 struct ipw_priv *priv = dev_get_drvdata(d); 1698 1699 sscanf(buf, "%x", &priv->indirect_byte); 1700 priv->status |= STATUS_INDIRECT_BYTE; 1701 return strnlen(buf, count); 1702} 1703 1704static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO, 1705 show_indirect_byte, store_indirect_byte); 1706 1707static ssize_t show_direct_dword(struct device *d, 1708 struct device_attribute *attr, char *buf) 1709{ 1710 u32 reg = 0; 1711 struct ipw_priv *priv = dev_get_drvdata(d); 1712 1713 if (priv->status & STATUS_DIRECT_DWORD) 1714 reg = ipw_read32(priv, priv->direct_dword); 1715 else 1716 reg = 0; 1717 1718 return sprintf(buf, "0x%08x\n", reg); 1719} 1720static ssize_t store_direct_dword(struct device *d, 1721 struct device_attribute *attr, 1722 const char *buf, size_t count) 1723{ 1724 struct ipw_priv *priv = dev_get_drvdata(d); 1725 1726 sscanf(buf, "%x", &priv->direct_dword); 1727 priv->status |= STATUS_DIRECT_DWORD; 1728 return strnlen(buf, count); 1729} 1730 1731static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO, 1732 show_direct_dword, store_direct_dword); 1733 1734static int rf_kill_active(struct ipw_priv *priv) 1735{ 1736 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) 1737 priv->status |= STATUS_RF_KILL_HW; 1738 else 1739 priv->status &= ~STATUS_RF_KILL_HW; 1740 1741 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0; 1742} 1743 1744static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, 1745 char *buf) 1746{ 1747 /* 0 - RF kill not enabled 1748 1 - SW based RF kill active (sysfs) 1749 2 - HW based RF kill active 1750 3 - Both HW and SW baed RF kill active */ 1751 struct ipw_priv *priv = dev_get_drvdata(d); 1752 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | 1753 (rf_kill_active(priv) ? 0x2 : 0x0); 1754 return sprintf(buf, "%i\n", val); 1755} 1756 1757static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) 1758{ 1759 if ((disable_radio ? 1 : 0) == 1760 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0)) 1761 return 0; 1762 1763 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", 1764 disable_radio ? "OFF" : "ON"); 1765 1766 if (disable_radio) { 1767 priv->status |= STATUS_RF_KILL_SW; 1768 1769 if (priv->workqueue) { 1770 cancel_delayed_work(&priv->request_scan); 1771 cancel_delayed_work(&priv->request_direct_scan); 1772 cancel_delayed_work(&priv->request_passive_scan); 1773 cancel_delayed_work(&priv->scan_event); 1774 } 1775 queue_work(priv->workqueue, &priv->down); 1776 } else { 1777 priv->status &= ~STATUS_RF_KILL_SW; 1778 if (rf_kill_active(priv)) { 1779 IPW_DEBUG_RF_KILL("Can not turn radio back on - " 1780 "disabled by HW switch\n"); 1781 /* Make sure the RF_KILL check timer is running */ 1782 cancel_delayed_work(&priv->rf_kill); 1783 queue_delayed_work(priv->workqueue, &priv->rf_kill, 1784 round_jiffies_relative(2 * HZ)); 1785 } else 1786 queue_work(priv->workqueue, &priv->up); 1787 } 1788 1789 return 1; 1790} 1791 1792static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, 1793 const char *buf, size_t count) 1794{ 1795 struct ipw_priv *priv = dev_get_drvdata(d); 1796 1797 ipw_radio_kill_sw(priv, buf[0] == '1'); 1798 1799 return count; 1800} 1801 1802static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); 1803 1804static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr, 1805 char *buf) 1806{ 1807 struct ipw_priv *priv = dev_get_drvdata(d); 1808 int pos = 0, len = 0; 1809 if (priv->config & CFG_SPEED_SCAN) { 1810 while (priv->speed_scan[pos] != 0) 1811 len += sprintf(&buf[len], "%d ", 1812 priv->speed_scan[pos++]); 1813 return len + sprintf(&buf[len], "\n"); 1814 } 1815 1816 return sprintf(buf, "0\n"); 1817} 1818 1819static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, 1820 const char *buf, size_t count) 1821{ 1822 struct ipw_priv *priv = dev_get_drvdata(d); 1823 int channel, pos = 0; 1824 const char *p = buf; 1825 1826 /* list of space separated channels to scan, optionally ending with 0 */ 1827 while ((channel = simple_strtol(p, NULL, 0))) { 1828 if (pos == MAX_SPEED_SCAN - 1) { 1829 priv->speed_scan[pos] = 0; 1830 break; 1831 } 1832 1833 if (libipw_is_valid_channel(priv->ieee, channel)) 1834 priv->speed_scan[pos++] = channel; 1835 else 1836 IPW_WARNING("Skipping invalid channel request: %d\n", 1837 channel); 1838 p = strchr(p, ' '); 1839 if (!p) 1840 break; 1841 while (*p == ' ' || *p == '\t') 1842 p++; 1843 } 1844 1845 if (pos == 0) 1846 priv->config &= ~CFG_SPEED_SCAN; 1847 else { 1848 priv->speed_scan_pos = 0; 1849 priv->config |= CFG_SPEED_SCAN; 1850 } 1851 1852 return count; 1853} 1854 1855static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan, 1856 store_speed_scan); 1857 1858static ssize_t show_net_stats(struct device *d, struct device_attribute *attr, 1859 char *buf) 1860{ 1861 struct ipw_priv *priv = dev_get_drvdata(d); 1862 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0'); 1863} 1864 1865static ssize_t store_net_stats(struct device *d, struct device_attribute *attr, 1866 const char *buf, size_t count) 1867{ 1868 struct ipw_priv *priv = dev_get_drvdata(d); 1869 if (buf[0] == '1') 1870 priv->config |= CFG_NET_STATS; 1871 else 1872 priv->config &= ~CFG_NET_STATS; 1873 1874 return count; 1875} 1876 1877static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO, 1878 show_net_stats, store_net_stats); 1879 1880static ssize_t show_channels(struct device *d, 1881 struct device_attribute *attr, 1882 char *buf) 1883{ 1884 struct ipw_priv *priv = dev_get_drvdata(d); 1885 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 1886 int len = 0, i; 1887 1888 len = sprintf(&buf[len], 1889 "Displaying %d channels in 2.4Ghz band " 1890 "(802.11bg):\n", geo->bg_channels); 1891 1892 for (i = 0; i < geo->bg_channels; i++) { 1893 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n", 1894 geo->bg[i].channel, 1895 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ? 1896 " (radar spectrum)" : "", 1897 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) || 1898 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)) 1899 ? "" : ", IBSS", 1900 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ? 1901 "passive only" : "active/passive", 1902 geo->bg[i].flags & LIBIPW_CH_B_ONLY ? 1903 "B" : "B/G"); 1904 } 1905 1906 len += sprintf(&buf[len], 1907 "Displaying %d channels in 5.2Ghz band " 1908 "(802.11a):\n", geo->a_channels); 1909 for (i = 0; i < geo->a_channels; i++) { 1910 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n", 1911 geo->a[i].channel, 1912 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ? 1913 " (radar spectrum)" : "", 1914 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) || 1915 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)) 1916 ? "" : ", IBSS", 1917 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ? 1918 "passive only" : "active/passive"); 1919 } 1920 1921 return len; 1922} 1923 1924static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 1925 1926static void notify_wx_assoc_event(struct ipw_priv *priv) 1927{ 1928 union iwreq_data wrqu; 1929 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1930 if (priv->status & STATUS_ASSOCIATED) 1931 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); 1932 else 1933 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); 1934 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); 1935} 1936 1937static void ipw_irq_tasklet(struct ipw_priv *priv) 1938{ 1939 u32 inta, inta_mask, handled = 0; 1940 unsigned long flags; 1941 int rc = 0; 1942 1943 spin_lock_irqsave(&priv->irq_lock, flags); 1944 1945 inta = ipw_read32(priv, IPW_INTA_RW); 1946 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 1947 inta &= (IPW_INTA_MASK_ALL & inta_mask); 1948 1949 /* Add any cached INTA values that need to be handled */ 1950 inta |= priv->isr_inta; 1951 1952 spin_unlock_irqrestore(&priv->irq_lock, flags); 1953 1954 spin_lock_irqsave(&priv->lock, flags); 1955 1956 /* handle all the justifications for the interrupt */ 1957 if (inta & IPW_INTA_BIT_RX_TRANSFER) { 1958 ipw_rx(priv); 1959 handled |= IPW_INTA_BIT_RX_TRANSFER; 1960 } 1961 1962 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) { 1963 IPW_DEBUG_HC("Command completed.\n"); 1964 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1); 1965 priv->status &= ~STATUS_HCMD_ACTIVE; 1966 wake_up_interruptible(&priv->wait_command_queue); 1967 handled |= IPW_INTA_BIT_TX_CMD_QUEUE; 1968 } 1969 1970 if (inta & IPW_INTA_BIT_TX_QUEUE_1) { 1971 IPW_DEBUG_TX("TX_QUEUE_1\n"); 1972 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0); 1973 handled |= IPW_INTA_BIT_TX_QUEUE_1; 1974 } 1975 1976 if (inta & IPW_INTA_BIT_TX_QUEUE_2) { 1977 IPW_DEBUG_TX("TX_QUEUE_2\n"); 1978 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1); 1979 handled |= IPW_INTA_BIT_TX_QUEUE_2; 1980 } 1981 1982 if (inta & IPW_INTA_BIT_TX_QUEUE_3) { 1983 IPW_DEBUG_TX("TX_QUEUE_3\n"); 1984 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2); 1985 handled |= IPW_INTA_BIT_TX_QUEUE_3; 1986 } 1987 1988 if (inta & IPW_INTA_BIT_TX_QUEUE_4) { 1989 IPW_DEBUG_TX("TX_QUEUE_4\n"); 1990 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3); 1991 handled |= IPW_INTA_BIT_TX_QUEUE_4; 1992 } 1993 1994 if (inta & IPW_INTA_BIT_STATUS_CHANGE) { 1995 IPW_WARNING("STATUS_CHANGE\n"); 1996 handled |= IPW_INTA_BIT_STATUS_CHANGE; 1997 } 1998 1999 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) { 2000 IPW_WARNING("TX_PERIOD_EXPIRED\n"); 2001 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED; 2002 } 2003 2004 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) { 2005 IPW_WARNING("HOST_CMD_DONE\n"); 2006 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE; 2007 } 2008 2009 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) { 2010 IPW_WARNING("FW_INITIALIZATION_DONE\n"); 2011 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE; 2012 } 2013 2014 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) { 2015 IPW_WARNING("PHY_OFF_DONE\n"); 2016 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE; 2017 } 2018 2019 if (inta & IPW_INTA_BIT_RF_KILL_DONE) { 2020 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n"); 2021 priv->status |= STATUS_RF_KILL_HW; 2022 wake_up_interruptible(&priv->wait_command_queue); 2023 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); 2024 cancel_delayed_work(&priv->request_scan); 2025 cancel_delayed_work(&priv->request_direct_scan); 2026 cancel_delayed_work(&priv->request_passive_scan); 2027 cancel_delayed_work(&priv->scan_event); 2028 schedule_work(&priv->link_down); 2029 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); 2030 handled |= IPW_INTA_BIT_RF_KILL_DONE; 2031 } 2032 2033 if (inta & IPW_INTA_BIT_FATAL_ERROR) { 2034 IPW_WARNING("Firmware error detected. Restarting.\n"); 2035 if (priv->error) { 2036 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); 2037 if (ipw_debug_level & IPW_DL_FW_ERRORS) { 2038 struct ipw_fw_error *error = 2039 ipw_alloc_error_log(priv); 2040 ipw_dump_error_log(priv, error); 2041 kfree(error); 2042 } 2043 } else { 2044 priv->error = ipw_alloc_error_log(priv); 2045 if (priv->error) 2046 IPW_DEBUG_FW("Sysfs 'error' log captured.\n"); 2047 else 2048 IPW_DEBUG_FW("Error allocating sysfs 'error' " 2049 "log.\n"); 2050 if (ipw_debug_level & IPW_DL_FW_ERRORS) 2051 ipw_dump_error_log(priv, priv->error); 2052 } 2053 2054 /* XXX: If hardware encryption is for WPA/WPA2, 2055 * we have to notify the supplicant. */ 2056 if (priv->ieee->sec.encrypt) { 2057 priv->status &= ~STATUS_ASSOCIATED; 2058 notify_wx_assoc_event(priv); 2059 } 2060 2061 /* Keep the restart process from trying to send host 2062 * commands by clearing the INIT status bit */ 2063 priv->status &= ~STATUS_INIT; 2064 2065 /* Cancel currently queued command. */ 2066 priv->status &= ~STATUS_HCMD_ACTIVE; 2067 wake_up_interruptible(&priv->wait_command_queue); 2068 2069 queue_work(priv->workqueue, &priv->adapter_restart); 2070 handled |= IPW_INTA_BIT_FATAL_ERROR; 2071 } 2072 2073 if (inta & IPW_INTA_BIT_PARITY_ERROR) { 2074 IPW_ERROR("Parity error\n"); 2075 handled |= IPW_INTA_BIT_PARITY_ERROR; 2076 } 2077 2078 if (handled != inta) { 2079 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); 2080 } 2081 2082 spin_unlock_irqrestore(&priv->lock, flags); 2083 2084 /* enable all interrupts */ 2085 ipw_enable_interrupts(priv); 2086} 2087 2088#define IPW_CMD(x) case IPW_CMD_ ## x : return #x 2089static char *get_cmd_string(u8 cmd) 2090{ 2091 switch (cmd) { 2092 IPW_CMD(HOST_COMPLETE); 2093 IPW_CMD(POWER_DOWN); 2094 IPW_CMD(SYSTEM_CONFIG); 2095 IPW_CMD(MULTICAST_ADDRESS); 2096 IPW_CMD(SSID); 2097 IPW_CMD(ADAPTER_ADDRESS); 2098 IPW_CMD(PORT_TYPE); 2099 IPW_CMD(RTS_THRESHOLD); 2100 IPW_CMD(FRAG_THRESHOLD); 2101 IPW_CMD(POWER_MODE); 2102 IPW_CMD(WEP_KEY); 2103 IPW_CMD(TGI_TX_KEY); 2104 IPW_CMD(SCAN_REQUEST); 2105 IPW_CMD(SCAN_REQUEST_EXT); 2106 IPW_CMD(ASSOCIATE); 2107 IPW_CMD(SUPPORTED_RATES); 2108 IPW_CMD(SCAN_ABORT); 2109 IPW_CMD(TX_FLUSH); 2110 IPW_CMD(QOS_PARAMETERS); 2111 IPW_CMD(DINO_CONFIG); 2112 IPW_CMD(RSN_CAPABILITIES); 2113 IPW_CMD(RX_KEY); 2114 IPW_CMD(CARD_DISABLE); 2115 IPW_CMD(SEED_NUMBER); 2116 IPW_CMD(TX_POWER); 2117 IPW_CMD(COUNTRY_INFO); 2118 IPW_CMD(AIRONET_INFO); 2119 IPW_CMD(AP_TX_POWER); 2120 IPW_CMD(CCKM_INFO); 2121 IPW_CMD(CCX_VER_INFO); 2122 IPW_CMD(SET_CALIBRATION); 2123 IPW_CMD(SENSITIVITY_CALIB); 2124 IPW_CMD(RETRY_LIMIT); 2125 IPW_CMD(IPW_PRE_POWER_DOWN); 2126 IPW_CMD(VAP_BEACON_TEMPLATE); 2127 IPW_CMD(VAP_DTIM_PERIOD); 2128 IPW_CMD(EXT_SUPPORTED_RATES); 2129 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT); 2130 IPW_CMD(VAP_QUIET_INTERVALS); 2131 IPW_CMD(VAP_CHANNEL_SWITCH); 2132 IPW_CMD(VAP_MANDATORY_CHANNELS); 2133 IPW_CMD(VAP_CELL_PWR_LIMIT); 2134 IPW_CMD(VAP_CF_PARAM_SET); 2135 IPW_CMD(VAP_SET_BEACONING_STATE); 2136 IPW_CMD(MEASUREMENT); 2137 IPW_CMD(POWER_CAPABILITY); 2138 IPW_CMD(SUPPORTED_CHANNELS); 2139 IPW_CMD(TPC_REPORT); 2140 IPW_CMD(WME_INFO); 2141 IPW_CMD(PRODUCTION_COMMAND); 2142 default: 2143 return "UNKNOWN"; 2144 } 2145} 2146 2147#define HOST_COMPLETE_TIMEOUT HZ 2148 2149static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) 2150{ 2151 int rc = 0; 2152 unsigned long flags; 2153 2154 spin_lock_irqsave(&priv->lock, flags); 2155 if (priv->status & STATUS_HCMD_ACTIVE) { 2156 IPW_ERROR("Failed to send %s: Already sending a command.\n", 2157 get_cmd_string(cmd->cmd)); 2158 spin_unlock_irqrestore(&priv->lock, flags); 2159 return -EAGAIN; 2160 } 2161 2162 priv->status |= STATUS_HCMD_ACTIVE; 2163 2164 if (priv->cmdlog) { 2165 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies; 2166 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd; 2167 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len; 2168 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param, 2169 cmd->len); 2170 priv->cmdlog[priv->cmdlog_pos].retcode = -1; 2171 } 2172 2173 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", 2174 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, 2175 priv->status); 2176 2177#ifndef DEBUG_CMD_WEP_KEY 2178 if (cmd->cmd == IPW_CMD_WEP_KEY) 2179 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n"); 2180 else 2181#endif 2182 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); 2183 2184 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0); 2185 if (rc) { 2186 priv->status &= ~STATUS_HCMD_ACTIVE; 2187 IPW_ERROR("Failed to send %s: Reason %d\n", 2188 get_cmd_string(cmd->cmd), rc); 2189 spin_unlock_irqrestore(&priv->lock, flags); 2190 goto exit; 2191 } 2192 spin_unlock_irqrestore(&priv->lock, flags); 2193 2194 rc = wait_event_interruptible_timeout(priv->wait_command_queue, 2195 !(priv-> 2196 status & STATUS_HCMD_ACTIVE), 2197 HOST_COMPLETE_TIMEOUT); 2198 if (rc == 0) { 2199 spin_lock_irqsave(&priv->lock, flags); 2200 if (priv->status & STATUS_HCMD_ACTIVE) { 2201 IPW_ERROR("Failed to send %s: Command timed out.\n", 2202 get_cmd_string(cmd->cmd)); 2203 priv->status &= ~STATUS_HCMD_ACTIVE; 2204 spin_unlock_irqrestore(&priv->lock, flags); 2205 rc = -EIO; 2206 goto exit; 2207 } 2208 spin_unlock_irqrestore(&priv->lock, flags); 2209 } else 2210 rc = 0; 2211 2212 if (priv->status & STATUS_RF_KILL_HW) { 2213 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n", 2214 get_cmd_string(cmd->cmd)); 2215 rc = -EIO; 2216 goto exit; 2217 } 2218 2219 exit: 2220 if (priv->cmdlog) { 2221 priv->cmdlog[priv->cmdlog_pos++].retcode = rc; 2222 priv->cmdlog_pos %= priv->cmdlog_len; 2223 } 2224 return rc; 2225} 2226 2227static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command) 2228{ 2229 struct host_cmd cmd = { 2230 .cmd = command, 2231 }; 2232 2233 return __ipw_send_cmd(priv, &cmd); 2234} 2235 2236static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len, 2237 void *data) 2238{ 2239 struct host_cmd cmd = { 2240 .cmd = command, 2241 .len = len, 2242 .param = data, 2243 }; 2244 2245 return __ipw_send_cmd(priv, &cmd); 2246} 2247 2248static int ipw_send_host_complete(struct ipw_priv *priv) 2249{ 2250 if (!priv) { 2251 IPW_ERROR("Invalid args\n"); 2252 return -1; 2253 } 2254 2255 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); 2256} 2257 2258static int ipw_send_system_config(struct ipw_priv *priv) 2259{ 2260 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, 2261 sizeof(priv->sys_config), 2262 &priv->sys_config); 2263} 2264 2265static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) 2266{ 2267 if (!priv || !ssid) { 2268 IPW_ERROR("Invalid args\n"); 2269 return -1; 2270 } 2271 2272 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE), 2273 ssid); 2274} 2275 2276static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) 2277{ 2278 if (!priv || !mac) { 2279 IPW_ERROR("Invalid args\n"); 2280 return -1; 2281 } 2282 2283 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n", 2284 priv->net_dev->name, mac); 2285 2286 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); 2287} 2288 2289/* 2290 * NOTE: This must be executed from our workqueue as it results in udelay 2291 * being called which may corrupt the keyboard if executed on default 2292 * workqueue 2293 */ 2294static void ipw_adapter_restart(void *adapter) 2295{ 2296 struct ipw_priv *priv = adapter; 2297 2298 if (priv->status & STATUS_RF_KILL_MASK) 2299 return; 2300 2301 ipw_down(priv); 2302 2303 if (priv->assoc_network && 2304 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS)) 2305 ipw_remove_current_network(priv); 2306 2307 if (ipw_up(priv)) { 2308 IPW_ERROR("Failed to up device\n"); 2309 return; 2310 } 2311} 2312 2313static void ipw_bg_adapter_restart(struct work_struct *work) 2314{ 2315 struct ipw_priv *priv = 2316 container_of(work, struct ipw_priv, adapter_restart); 2317 mutex_lock(&priv->mutex); 2318 ipw_adapter_restart(priv); 2319 mutex_unlock(&priv->mutex); 2320} 2321 2322#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) 2323 2324static void ipw_scan_check(void *data) 2325{ 2326 struct ipw_priv *priv = data; 2327 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { 2328 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2329 "adapter after (%dms).\n", 2330 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2331 queue_work(priv->workqueue, &priv->adapter_restart); 2332 } 2333} 2334 2335static void ipw_bg_scan_check(struct work_struct *work) 2336{ 2337 struct ipw_priv *priv = 2338 container_of(work, struct ipw_priv, scan_check.work); 2339 mutex_lock(&priv->mutex); 2340 ipw_scan_check(priv); 2341 mutex_unlock(&priv->mutex); 2342} 2343 2344static int ipw_send_scan_request_ext(struct ipw_priv *priv, 2345 struct ipw_scan_request_ext *request) 2346{ 2347 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT, 2348 sizeof(*request), request); 2349} 2350 2351static int ipw_send_scan_abort(struct ipw_priv *priv) 2352{ 2353 if (!priv) { 2354 IPW_ERROR("Invalid args\n"); 2355 return -1; 2356 } 2357 2358 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT); 2359} 2360 2361static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) 2362{ 2363 struct ipw_sensitivity_calib calib = { 2364 .beacon_rssi_raw = cpu_to_le16(sens), 2365 }; 2366 2367 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), 2368 &calib); 2369} 2370 2371static int ipw_send_associate(struct ipw_priv *priv, 2372 struct ipw_associate *associate) 2373{ 2374 if (!priv || !associate) { 2375 IPW_ERROR("Invalid args\n"); 2376 return -1; 2377 } 2378 2379 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate), 2380 associate); 2381} 2382 2383static int ipw_send_supported_rates(struct ipw_priv *priv, 2384 struct ipw_supported_rates *rates) 2385{ 2386 if (!priv || !rates) { 2387 IPW_ERROR("Invalid args\n"); 2388 return -1; 2389 } 2390 2391 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates), 2392 rates); 2393} 2394 2395static int ipw_set_random_seed(struct ipw_priv *priv) 2396{ 2397 u32 val; 2398 2399 if (!priv) { 2400 IPW_ERROR("Invalid args\n"); 2401 return -1; 2402 } 2403 2404 get_random_bytes(&val, sizeof(val)); 2405 2406 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val); 2407} 2408 2409static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) 2410{ 2411 __le32 v = cpu_to_le32(phy_off); 2412 if (!priv) { 2413 IPW_ERROR("Invalid args\n"); 2414 return -1; 2415 } 2416 2417 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v); 2418} 2419 2420static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) 2421{ 2422 if (!priv || !power) { 2423 IPW_ERROR("Invalid args\n"); 2424 return -1; 2425 } 2426 2427 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power); 2428} 2429 2430static int ipw_set_tx_power(struct ipw_priv *priv) 2431{ 2432 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 2433 struct ipw_tx_power tx_power; 2434 s8 max_power; 2435 int i; 2436 2437 memset(&tx_power, 0, sizeof(tx_power)); 2438 2439 /* configure device for 'G' band */ 2440 tx_power.ieee_mode = IPW_G_MODE; 2441 tx_power.num_channels = geo->bg_channels; 2442 for (i = 0; i < geo->bg_channels; i++) { 2443 max_power = geo->bg[i].max_power; 2444 tx_power.channels_tx_power[i].channel_number = 2445 geo->bg[i].channel; 2446 tx_power.channels_tx_power[i].tx_power = max_power ? 2447 min(max_power, priv->tx_power) : priv->tx_power; 2448 } 2449 if (ipw_send_tx_power(priv, &tx_power)) 2450 return -EIO; 2451 2452 /* configure device to also handle 'B' band */ 2453 tx_power.ieee_mode = IPW_B_MODE; 2454 if (ipw_send_tx_power(priv, &tx_power)) 2455 return -EIO; 2456 2457 /* configure device to also handle 'A' band */ 2458 if (priv->ieee->abg_true) { 2459 tx_power.ieee_mode = IPW_A_MODE; 2460 tx_power.num_channels = geo->a_channels; 2461 for (i = 0; i < tx_power.num_channels; i++) { 2462 max_power = geo->a[i].max_power; 2463 tx_power.channels_tx_power[i].channel_number = 2464 geo->a[i].channel; 2465 tx_power.channels_tx_power[i].tx_power = max_power ? 2466 min(max_power, priv->tx_power) : priv->tx_power; 2467 } 2468 if (ipw_send_tx_power(priv, &tx_power)) 2469 return -EIO; 2470 } 2471 return 0; 2472} 2473 2474static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) 2475{ 2476 struct ipw_rts_threshold rts_threshold = { 2477 .rts_threshold = cpu_to_le16(rts), 2478 }; 2479 2480 if (!priv) { 2481 IPW_ERROR("Invalid args\n"); 2482 return -1; 2483 } 2484 2485 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD, 2486 sizeof(rts_threshold), &rts_threshold); 2487} 2488 2489static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) 2490{ 2491 struct ipw_frag_threshold frag_threshold = { 2492 .frag_threshold = cpu_to_le16(frag), 2493 }; 2494 2495 if (!priv) { 2496 IPW_ERROR("Invalid args\n"); 2497 return -1; 2498 } 2499 2500 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD, 2501 sizeof(frag_threshold), &frag_threshold); 2502} 2503 2504static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) 2505{ 2506 __le32 param; 2507 2508 if (!priv) { 2509 IPW_ERROR("Invalid args\n"); 2510 return -1; 2511 } 2512 2513 /* If on battery, set to 3, if AC set to CAM, else user 2514 * level */ 2515 switch (mode) { 2516 case IPW_POWER_BATTERY: 2517 param = cpu_to_le32(IPW_POWER_INDEX_3); 2518 break; 2519 case IPW_POWER_AC: 2520 param = cpu_to_le32(IPW_POWER_MODE_CAM); 2521 break; 2522 default: 2523 param = cpu_to_le32(mode); 2524 break; 2525 } 2526 2527 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), 2528 ¶m); 2529} 2530 2531static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) 2532{ 2533 struct ipw_retry_limit retry_limit = { 2534 .short_retry_limit = slimit, 2535 .long_retry_limit = llimit 2536 }; 2537 2538 if (!priv) { 2539 IPW_ERROR("Invalid args\n"); 2540 return -1; 2541 } 2542 2543 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit), 2544 &retry_limit); 2545} 2546 2547/* 2548 * The IPW device contains a Microwire compatible EEPROM that stores 2549 * various data like the MAC address. Usually the firmware has exclusive 2550 * access to the eeprom, but during device initialization (before the 2551 * device driver has sent the HostComplete command to the firmware) the 2552 * device driver has read access to the EEPROM by way of indirect addressing 2553 * through a couple of memory mapped registers. 2554 * 2555 * The following is a simplified implementation for pulling data out of the 2556 * the eeprom, along with some helper functions to find information in 2557 * the per device private data's copy of the eeprom. 2558 * 2559 * NOTE: To better understand how these functions work (i.e what is a chip 2560 * select and why do have to keep driving the eeprom clock?), read 2561 * just about any data sheet for a Microwire compatible EEPROM. 2562 */ 2563 2564/* write a 32 bit value into the indirect accessor register */ 2565static inline void eeprom_write_reg(struct ipw_priv *p, u32 data) 2566{ 2567 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data); 2568 2569 /* the eeprom requires some time to complete the operation */ 2570 udelay(p->eeprom_delay); 2571 2572 return; 2573} 2574 2575/* perform a chip select operation */ 2576static void eeprom_cs(struct ipw_priv *priv) 2577{ 2578 eeprom_write_reg(priv, 0); 2579 eeprom_write_reg(priv, EEPROM_BIT_CS); 2580 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); 2581 eeprom_write_reg(priv, EEPROM_BIT_CS); 2582} 2583 2584/* perform a chip select operation */ 2585static void eeprom_disable_cs(struct ipw_priv *priv) 2586{ 2587 eeprom_write_reg(priv, EEPROM_BIT_CS); 2588 eeprom_write_reg(priv, 0); 2589 eeprom_write_reg(priv, EEPROM_BIT_SK); 2590} 2591 2592/* push a single bit down to the eeprom */ 2593static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit) 2594{ 2595 int d = (bit ? EEPROM_BIT_DI : 0); 2596 eeprom_write_reg(p, EEPROM_BIT_CS | d); 2597 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK); 2598} 2599 2600/* push an opcode followed by an address down to the eeprom */ 2601static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr) 2602{ 2603 int i; 2604 2605 eeprom_cs(priv); 2606 eeprom_write_bit(priv, 1); 2607 eeprom_write_bit(priv, op & 2); 2608 eeprom_write_bit(priv, op & 1); 2609 for (i = 7; i >= 0; i--) { 2610 eeprom_write_bit(priv, addr & (1 << i)); 2611 } 2612} 2613 2614/* pull 16 bits off the eeprom, one bit at a time */ 2615static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr) 2616{ 2617 int i; 2618 u16 r = 0; 2619 2620 /* Send READ Opcode */ 2621 eeprom_op(priv, EEPROM_CMD_READ, addr); 2622 2623 /* Send dummy bit */ 2624 eeprom_write_reg(priv, EEPROM_BIT_CS); 2625 2626 /* Read the byte off the eeprom one bit at a time */ 2627 for (i = 0; i < 16; i++) { 2628 u32 data = 0; 2629 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); 2630 eeprom_write_reg(priv, EEPROM_BIT_CS); 2631 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS); 2632 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0); 2633 } 2634 2635 /* Send another dummy bit */ 2636 eeprom_write_reg(priv, 0); 2637 eeprom_disable_cs(priv); 2638 2639 return r; 2640} 2641 2642/* helper function for pulling the mac address out of the private */ 2643/* data's copy of the eeprom data */ 2644static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac) 2645{ 2646 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6); 2647} 2648 2649/* 2650 * Either the device driver (i.e. the host) or the firmware can 2651 * load eeprom data into the designated region in SRAM. If neither 2652 * happens then the FW will shutdown with a fatal error. 2653 * 2654 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE 2655 * bit needs region of shared SRAM needs to be non-zero. 2656 */ 2657static void ipw_eeprom_init_sram(struct ipw_priv *priv) 2658{ 2659 int i; 2660 __le16 *eeprom = (__le16 *) priv->eeprom; 2661 2662 IPW_DEBUG_TRACE(">>\n"); 2663 2664 /* read entire contents of eeprom into private buffer */ 2665 for (i = 0; i < 128; i++) 2666 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i)); 2667 2668 /* 2669 If the data looks correct, then copy it to our private 2670 copy. Otherwise let the firmware know to perform the operation 2671 on its own. 2672 */ 2673 if (priv->eeprom[EEPROM_VERSION] != 0) { 2674 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); 2675 2676 /* write the eeprom data to sram */ 2677 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) 2678 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]); 2679 2680 /* Do not load eeprom data on fatal error or suspend */ 2681 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 2682 } else { 2683 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n"); 2684 2685 /* Load eeprom data on fatal error or suspend */ 2686 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1); 2687 } 2688 2689 IPW_DEBUG_TRACE("<<\n"); 2690} 2691 2692static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) 2693{ 2694 count >>= 2; 2695 if (!count) 2696 return; 2697 _ipw_write32(priv, IPW_AUTOINC_ADDR, start); 2698 while (count--) 2699 _ipw_write32(priv, IPW_AUTOINC_DATA, 0); 2700} 2701 2702static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv) 2703{ 2704 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL, 2705 CB_NUMBER_OF_ELEMENTS_SMALL * 2706 sizeof(struct command_block)); 2707} 2708 2709static int ipw_fw_dma_enable(struct ipw_priv *priv) 2710{ /* start dma engine but no transfers yet */ 2711 2712 IPW_DEBUG_FW(">> : \n"); 2713 2714 /* Start the dma */ 2715 ipw_fw_dma_reset_command_blocks(priv); 2716 2717 /* Write CB base address */ 2718 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL); 2719 2720 IPW_DEBUG_FW("<< : \n"); 2721 return 0; 2722} 2723 2724static void ipw_fw_dma_abort(struct ipw_priv *priv) 2725{ 2726 u32 control = 0; 2727 2728 IPW_DEBUG_FW(">> :\n"); 2729 2730 /* set the Stop and Abort bit */ 2731 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; 2732 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2733 priv->sram_desc.last_cb_index = 0; 2734 2735 IPW_DEBUG_FW("<< \n"); 2736} 2737 2738static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, 2739 struct command_block *cb) 2740{ 2741 u32 address = 2742 IPW_SHARED_SRAM_DMA_CONTROL + 2743 (sizeof(struct command_block) * index); 2744 IPW_DEBUG_FW(">> :\n"); 2745 2746 ipw_write_indirect(priv, address, (u8 *) cb, 2747 (int)sizeof(struct command_block)); 2748 2749 IPW_DEBUG_FW("<< :\n"); 2750 return 0; 2751 2752} 2753 2754static int ipw_fw_dma_kick(struct ipw_priv *priv) 2755{ 2756 u32 control = 0; 2757 u32 index = 0; 2758 2759 IPW_DEBUG_FW(">> :\n"); 2760 2761 for (index = 0; index < priv->sram_desc.last_cb_index; index++) 2762 ipw_fw_dma_write_command_block(priv, index, 2763 &priv->sram_desc.cb_list[index]); 2764 2765 /* Enable the DMA in the CSR register */ 2766 ipw_clear_bit(priv, IPW_RESET_REG, 2767 IPW_RESET_REG_MASTER_DISABLED | 2768 IPW_RESET_REG_STOP_MASTER); 2769 2770 /* Set the Start bit. */ 2771 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START; 2772 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2773 2774 IPW_DEBUG_FW("<< :\n"); 2775 return 0; 2776} 2777 2778static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv) 2779{ 2780 u32 address; 2781 u32 register_value = 0; 2782 u32 cb_fields_address = 0; 2783 2784 IPW_DEBUG_FW(">> :\n"); 2785 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2786 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address); 2787 2788 /* Read the DMA Controlor register */ 2789 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL); 2790 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value); 2791 2792 /* Print the CB values */ 2793 cb_fields_address = address; 2794 register_value = ipw_read_reg32(priv, cb_fields_address); 2795 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value); 2796 2797 cb_fields_address += sizeof(u32); 2798 register_value = ipw_read_reg32(priv, cb_fields_address); 2799 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value); 2800 2801 cb_fields_address += sizeof(u32); 2802 register_value = ipw_read_reg32(priv, cb_fields_address); 2803 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n", 2804 register_value); 2805 2806 cb_fields_address += sizeof(u32); 2807 register_value = ipw_read_reg32(priv, cb_fields_address); 2808 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value); 2809 2810 IPW_DEBUG_FW(">> :\n"); 2811} 2812 2813static int ipw_fw_dma_command_block_index(struct ipw_priv *priv) 2814{ 2815 u32 current_cb_address = 0; 2816 u32 current_cb_index = 0; 2817 2818 IPW_DEBUG_FW("<< :\n"); 2819 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2820 2821 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) / 2822 sizeof(struct command_block); 2823 2824 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n", 2825 current_cb_index, current_cb_address); 2826 2827 IPW_DEBUG_FW(">> :\n"); 2828 return current_cb_index; 2829 2830} 2831 2832static int ipw_fw_dma_add_command_block(struct ipw_priv *priv, 2833 u32 src_address, 2834 u32 dest_address, 2835 u32 length, 2836 int interrupt_enabled, int is_last) 2837{ 2838 2839 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC | 2840 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG | 2841 CB_DEST_SIZE_LONG; 2842 struct command_block *cb; 2843 u32 last_cb_element = 0; 2844 2845 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n", 2846 src_address, dest_address, length); 2847 2848 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL) 2849 return -1; 2850 2851 last_cb_element = priv->sram_desc.last_cb_index; 2852 cb = &priv->sram_desc.cb_list[last_cb_element]; 2853 priv->sram_desc.last_cb_index++; 2854 2855 /* Calculate the new CB control word */ 2856 if (interrupt_enabled) 2857 control |= CB_INT_ENABLED; 2858 2859 if (is_last) 2860 control |= CB_LAST_VALID; 2861 2862 control |= length; 2863 2864 /* Calculate the CB Element's checksum value */ 2865 cb->status = control ^ src_address ^ dest_address; 2866 2867 /* Copy the Source and Destination addresses */ 2868 cb->dest_addr = dest_address; 2869 cb->source_addr = src_address; 2870 2871 /* Copy the Control Word last */ 2872 cb->control = control; 2873 2874 return 0; 2875} 2876 2877static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address, 2878 int nr, u32 dest_address, u32 len) 2879{ 2880 int ret, i; 2881 u32 size; 2882 2883 IPW_DEBUG_FW(">> \n"); 2884 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", 2885 nr, dest_address, len); 2886 2887 for (i = 0; i < nr; i++) { 2888 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH); 2889 ret = ipw_fw_dma_add_command_block(priv, src_address[i], 2890 dest_address + 2891 i * CB_MAX_LENGTH, size, 2892 0, 0); 2893 if (ret) { 2894 IPW_DEBUG_FW_INFO(": Failed\n"); 2895 return -1; 2896 } else 2897 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2898 } 2899 2900 IPW_DEBUG_FW("<< \n"); 2901 return 0; 2902} 2903 2904static int ipw_fw_dma_wait(struct ipw_priv *priv) 2905{ 2906 u32 current_index = 0, previous_index; 2907 u32 watchdog = 0; 2908 2909 IPW_DEBUG_FW(">> : \n"); 2910 2911 current_index = ipw_fw_dma_command_block_index(priv); 2912 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", 2913 (int)priv->sram_desc.last_cb_index); 2914 2915 while (current_index < priv->sram_desc.last_cb_index) { 2916 udelay(50); 2917 previous_index = current_index; 2918 current_index = ipw_fw_dma_command_block_index(priv); 2919 2920 if (previous_index < current_index) { 2921 watchdog = 0; 2922 continue; 2923 } 2924 if (++watchdog > 400) { 2925 IPW_DEBUG_FW_INFO("Timeout\n"); 2926 ipw_fw_dma_dump_command_block(priv); 2927 ipw_fw_dma_abort(priv); 2928 return -1; 2929 } 2930 } 2931 2932 ipw_fw_dma_abort(priv); 2933 2934 /*Disable the DMA in the CSR register */ 2935 ipw_set_bit(priv, IPW_RESET_REG, 2936 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER); 2937 2938 IPW_DEBUG_FW("<< dmaWaitSync \n"); 2939 return 0; 2940} 2941 2942static void ipw_remove_current_network(struct ipw_priv *priv) 2943{ 2944 struct list_head *element, *safe; 2945 struct libipw_network *network = NULL; 2946 unsigned long flags; 2947 2948 spin_lock_irqsave(&priv->ieee->lock, flags); 2949 list_for_each_safe(element, safe, &priv->ieee->network_list) { 2950 network = list_entry(element, struct libipw_network, list); 2951 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 2952 list_del(element); 2953 list_add_tail(&network->list, 2954 &priv->ieee->network_free_list); 2955 } 2956 } 2957 spin_unlock_irqrestore(&priv->ieee->lock, flags); 2958} 2959 2960/** 2961 * Check that card is still alive. 2962 * Reads debug register from domain0. 2963 * If card is present, pre-defined value should 2964 * be found there. 2965 * 2966 * @param priv 2967 * @return 1 if card is present, 0 otherwise 2968 */ 2969static inline int ipw_alive(struct ipw_priv *priv) 2970{ 2971 return ipw_read32(priv, 0x90) == 0xd55555d5; 2972} 2973 2974/* timeout in msec, attempted in 10-msec quanta */ 2975static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, 2976 int timeout) 2977{ 2978 int i = 0; 2979 2980 do { 2981 if ((ipw_read32(priv, addr) & mask) == mask) 2982 return i; 2983 mdelay(10); 2984 i += 10; 2985 } while (i < timeout); 2986 2987 return -ETIME; 2988} 2989 2990/* These functions load the firmware and micro code for the operation of 2991 * the ipw hardware. It assumes the buffer has all the bits for the 2992 * image and the caller is handling the memory allocation and clean up. 2993 */ 2994 2995static int ipw_stop_master(struct ipw_priv *priv) 2996{ 2997 int rc; 2998 2999 IPW_DEBUG_TRACE(">> \n"); 3000 /* stop master. typical delay - 0 */ 3001 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3002 3003 /* timeout is in msec, polled in 10-msec quanta */ 3004 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3005 IPW_RESET_REG_MASTER_DISABLED, 100); 3006 if (rc < 0) { 3007 IPW_ERROR("wait for stop master failed after 100ms\n"); 3008 return -1; 3009 } 3010 3011 IPW_DEBUG_INFO("stop master %dms\n", rc); 3012 3013 return rc; 3014} 3015 3016static void ipw_arc_release(struct ipw_priv *priv) 3017{ 3018 IPW_DEBUG_TRACE(">> \n"); 3019 mdelay(5); 3020 3021 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3022 3023 /* no one knows timing, for safety add some delay */ 3024 mdelay(5); 3025} 3026 3027struct fw_chunk { 3028 __le32 address; 3029 __le32 length; 3030}; 3031 3032static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) 3033{ 3034 int rc = 0, i, addr; 3035 u8 cr = 0; 3036 __le16 *image; 3037 3038 image = (__le16 *) data; 3039 3040 IPW_DEBUG_TRACE(">> \n"); 3041 3042 rc = ipw_stop_master(priv); 3043 3044 if (rc < 0) 3045 return rc; 3046 3047 for (addr = IPW_SHARED_LOWER_BOUND; 3048 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) { 3049 ipw_write32(priv, addr, 0); 3050 } 3051 3052 /* no ucode (yet) */ 3053 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive)); 3054 /* destroy DMA queues */ 3055 /* reset sequence */ 3056 3057 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON); 3058 ipw_arc_release(priv); 3059 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF); 3060 mdelay(1); 3061 3062 /* reset PHY */ 3063 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN); 3064 mdelay(1); 3065 3066 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0); 3067 mdelay(1); 3068 3069 /* enable ucode store */ 3070 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0); 3071 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS); 3072 mdelay(1); 3073 3074 /* write ucode */ 3075 /** 3076 * @bug 3077 * Do NOT set indirect address register once and then 3078 * store data to indirect data register in the loop. 3079 * It seems very reasonable, but in this case DINO do not 3080 * accept ucode. It is essential to set address each time. 3081 */ 3082 /* load new ipw uCode */ 3083 for (i = 0; i < len / 2; i++) 3084 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE, 3085 le16_to_cpu(image[i])); 3086 3087 /* enable DINO */ 3088 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3089 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM); 3090 3091 /* this is where the igx / win driver deveates from the VAP driver. */ 3092 3093 /* wait for alive response */ 3094 for (i = 0; i < 100; i++) { 3095 /* poll for incoming data */ 3096 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS); 3097 if (cr & DINO_RXFIFO_DATA) 3098 break; 3099 mdelay(1); 3100 } 3101 3102 if (cr & DINO_RXFIFO_DATA) { 3103 /* alive_command_responce size is NOT multiple of 4 */ 3104 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4]; 3105 3106 for (i = 0; i < ARRAY_SIZE(response_buffer); i++) 3107 response_buffer[i] = 3108 cpu_to_le32(ipw_read_reg32(priv, 3109 IPW_BASEBAND_RX_FIFO_READ)); 3110 memcpy(&priv->dino_alive, response_buffer, 3111 sizeof(priv->dino_alive)); 3112 if (priv->dino_alive.alive_command == 1 3113 && priv->dino_alive.ucode_valid == 1) { 3114 rc = 0; 3115 IPW_DEBUG_INFO 3116 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) " 3117 "of %02d/%02d/%02d %02d:%02d\n", 3118 priv->dino_alive.software_revision, 3119 priv->dino_alive.software_revision, 3120 priv->dino_alive.device_identifier, 3121 priv->dino_alive.device_identifier, 3122 priv->dino_alive.time_stamp[0], 3123 priv->dino_alive.time_stamp[1], 3124 priv->dino_alive.time_stamp[2], 3125 priv->dino_alive.time_stamp[3], 3126 priv->dino_alive.time_stamp[4]); 3127 } else { 3128 IPW_DEBUG_INFO("Microcode is not alive\n"); 3129 rc = -EINVAL; 3130 } 3131 } else { 3132 IPW_DEBUG_INFO("No alive response from DINO\n"); 3133 rc = -ETIME; 3134 } 3135 3136 /* disable DINO, otherwise for some reason 3137 firmware have problem getting alive resp. */ 3138 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3139 3140 return rc; 3141} 3142 3143static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) 3144{ 3145 int ret = -1; 3146 int offset = 0; 3147 struct fw_chunk *chunk; 3148 int total_nr = 0; 3149 int i; 3150 struct pci_pool *pool; 3151 u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL]; 3152 dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL]; 3153 3154 IPW_DEBUG_TRACE("<< : \n"); 3155 3156 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0); 3157 if (!pool) { 3158 IPW_ERROR("pci_pool_create failed\n"); 3159 return -ENOMEM; 3160 } 3161 3162 /* Start the Dma */ 3163 ret = ipw_fw_dma_enable(priv); 3164 3165 /* the DMA is already ready this would be a bug. */ 3166 BUG_ON(priv->sram_desc.last_cb_index > 0); 3167 3168 do { 3169 u32 chunk_len; 3170 u8 *start; 3171 int size; 3172 int nr = 0; 3173 3174 chunk = (struct fw_chunk *)(data + offset); 3175 offset += sizeof(struct fw_chunk); 3176 chunk_len = le32_to_cpu(chunk->length); 3177 start = data + offset; 3178 3179 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH; 3180 for (i = 0; i < nr; i++) { 3181 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL, 3182 &phys[total_nr]); 3183 if (!virts[total_nr]) { 3184 ret = -ENOMEM; 3185 goto out; 3186 } 3187 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH, 3188 CB_MAX_LENGTH); 3189 memcpy(virts[total_nr], start, size); 3190 start += size; 3191 total_nr++; 3192 /* We don't support fw chunk larger than 64*8K */ 3193 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL); 3194 } 3195 3196 /* build DMA packet and queue up for sending */ 3197 /* dma to chunk->address, the chunk->length bytes from data + 3198 * offeset*/ 3199 /* Dma loading */ 3200 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr], 3201 nr, le32_to_cpu(chunk->address), 3202 chunk_len); 3203 if (ret) { 3204 IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); 3205 goto out; 3206 } 3207 3208 offset += chunk_len; 3209 } while (offset < len); 3210 3211 /* Run the DMA and wait for the answer */ 3212 ret = ipw_fw_dma_kick(priv); 3213 if (ret) { 3214 IPW_ERROR("dmaKick Failed\n"); 3215 goto out; 3216 } 3217 3218 ret = ipw_fw_dma_wait(priv); 3219 if (ret) { 3220 IPW_ERROR("dmaWaitSync Failed\n"); 3221 goto out; 3222 } 3223 out: 3224 for (i = 0; i < total_nr; i++) 3225 pci_pool_free(pool, virts[i], phys[i]); 3226 3227 pci_pool_destroy(pool); 3228 3229 return ret; 3230} 3231 3232/* stop nic */ 3233static int ipw_stop_nic(struct ipw_priv *priv) 3234{ 3235 int rc = 0; 3236 3237 /* stop */ 3238 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3239 3240 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3241 IPW_RESET_REG_MASTER_DISABLED, 500); 3242 if (rc < 0) { 3243 IPW_ERROR("wait for reg master disabled failed after 500ms\n"); 3244 return rc; 3245 } 3246 3247 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3248 3249 return rc; 3250} 3251 3252static void ipw_start_nic(struct ipw_priv *priv) 3253{ 3254 IPW_DEBUG_TRACE(">>\n"); 3255 3256 /* prvHwStartNic release ARC */ 3257 ipw_clear_bit(priv, IPW_RESET_REG, 3258 IPW_RESET_REG_MASTER_DISABLED | 3259 IPW_RESET_REG_STOP_MASTER | 3260 CBD_RESET_REG_PRINCETON_RESET); 3261 3262 /* enable power management */ 3263 ipw_set_bit(priv, IPW_GP_CNTRL_RW, 3264 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); 3265 3266 IPW_DEBUG_TRACE("<<\n"); 3267} 3268 3269static int ipw_init_nic(struct ipw_priv *priv) 3270{ 3271 int rc; 3272 3273 IPW_DEBUG_TRACE(">>\n"); 3274 /* reset */ 3275 /*prvHwInitNic */ 3276 /* set "initialization complete" bit to move adapter to D0 state */ 3277 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); 3278 3279 /* low-level PLL activation */ 3280 ipw_write32(priv, IPW_READ_INT_REGISTER, 3281 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER); 3282 3283 /* wait for clock stabilization */ 3284 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW, 3285 IPW_GP_CNTRL_BIT_CLOCK_READY, 250); 3286 if (rc < 0) 3287 IPW_DEBUG_INFO("FAILED wait for clock stablization\n"); 3288 3289 /* assert SW reset */ 3290 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET); 3291 3292 udelay(10); 3293 3294 /* set "initialization complete" bit to move adapter to D0 state */ 3295 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); 3296 3297 IPW_DEBUG_TRACE(">>\n"); 3298 return 0; 3299} 3300 3301/* Call this function from process context, it will sleep in request_firmware. 3302 * Probe is an ok place to call this from. 3303 */ 3304static int ipw_reset_nic(struct ipw_priv *priv) 3305{ 3306 int rc = 0; 3307 unsigned long flags; 3308 3309 IPW_DEBUG_TRACE(">>\n"); 3310 3311 rc = ipw_init_nic(priv); 3312 3313 spin_lock_irqsave(&priv->lock, flags); 3314 /* Clear the 'host command active' bit... */ 3315 priv->status &= ~STATUS_HCMD_ACTIVE; 3316 wake_up_interruptible(&priv->wait_command_queue); 3317 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); 3318 wake_up_interruptible(&priv->wait_state); 3319 spin_unlock_irqrestore(&priv->lock, flags); 3320 3321 IPW_DEBUG_TRACE("<<\n"); 3322 return rc; 3323} 3324 3325 3326struct ipw_fw { 3327 __le32 ver; 3328 __le32 boot_size; 3329 __le32 ucode_size; 3330 __le32 fw_size; 3331 u8 data[0]; 3332}; 3333 3334static int ipw_get_fw(struct ipw_priv *priv, 3335 const struct firmware **raw, const char *name) 3336{ 3337 struct ipw_fw *fw; 3338 int rc; 3339 3340 /* ask firmware_class module to get the boot firmware off disk */ 3341 rc = request_firmware(raw, name, &priv->pci_dev->dev); 3342 if (rc < 0) { 3343 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc); 3344 return rc; 3345 } 3346 3347 if ((*raw)->size < sizeof(*fw)) { 3348 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size); 3349 return -EINVAL; 3350 } 3351 3352 fw = (void *)(*raw)->data; 3353 3354 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) + 3355 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) { 3356 IPW_ERROR("%s is too small or corrupt (%zd)\n", 3357 name, (*raw)->size); 3358 return -EINVAL; 3359 } 3360 3361 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n", 3362 name, 3363 le32_to_cpu(fw->ver) >> 16, 3364 le32_to_cpu(fw->ver) & 0xff, 3365 (*raw)->size - sizeof(*fw)); 3366 return 0; 3367} 3368 3369#define IPW_RX_BUF_SIZE (3000) 3370 3371static void ipw_rx_queue_reset(struct ipw_priv *priv, 3372 struct ipw_rx_queue *rxq) 3373{ 3374 unsigned long flags; 3375 int i; 3376 3377 spin_lock_irqsave(&rxq->lock, flags); 3378 3379 INIT_LIST_HEAD(&rxq->rx_free); 3380 INIT_LIST_HEAD(&rxq->rx_used); 3381 3382 /* Fill the rx_used queue with _all_ of the Rx buffers */ 3383 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 3384 /* In the reset function, these buffers may have been allocated 3385 * to an SKB, so we need to unmap and free potential storage */ 3386 if (rxq->pool[i].skb != NULL) { 3387 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 3388 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 3389 dev_kfree_skb(rxq->pool[i].skb); 3390 rxq->pool[i].skb = NULL; 3391 } 3392 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 3393 } 3394 3395 /* Set us so that we have processed and used all buffers, but have 3396 * not restocked the Rx queue with fresh buffers */ 3397 rxq->read = rxq->write = 0; 3398 rxq->free_count = 0; 3399 spin_unlock_irqrestore(&rxq->lock, flags); 3400} 3401 3402#ifdef CONFIG_PM 3403static int fw_loaded = 0; 3404static const struct firmware *raw = NULL; 3405 3406static void free_firmware(void) 3407{ 3408 if (fw_loaded) { 3409 release_firmware(raw); 3410 raw = NULL; 3411 fw_loaded = 0; 3412 } 3413} 3414#else 3415#define free_firmware() do {} while (0) 3416#endif 3417 3418static int ipw_load(struct ipw_priv *priv) 3419{ 3420#ifndef CONFIG_PM 3421 const struct firmware *raw = NULL; 3422#endif 3423 struct ipw_fw *fw; 3424 u8 *boot_img, *ucode_img, *fw_img; 3425 u8 *name = NULL; 3426 int rc = 0, retries = 3; 3427 3428 switch (priv->ieee->iw_mode) { 3429 case IW_MODE_ADHOC: 3430 name = "ipw2200-ibss.fw"; 3431 break; 3432#ifdef CONFIG_IPW2200_MONITOR 3433 case IW_MODE_MONITOR: 3434 name = "ipw2200-sniffer.fw"; 3435 break; 3436#endif 3437 case IW_MODE_INFRA: 3438 name = "ipw2200-bss.fw"; 3439 break; 3440 } 3441 3442 if (!name) { 3443 rc = -EINVAL; 3444 goto error; 3445 } 3446 3447#ifdef CONFIG_PM 3448 if (!fw_loaded) { 3449#endif 3450 rc = ipw_get_fw(priv, &raw, name); 3451 if (rc < 0) 3452 goto error; 3453#ifdef CONFIG_PM 3454 } 3455#endif 3456 3457 fw = (void *)raw->data; 3458 boot_img = &fw->data[0]; 3459 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)]; 3460 fw_img = &fw->data[le32_to_cpu(fw->boot_size) + 3461 le32_to_cpu(fw->ucode_size)]; 3462 3463 if (rc < 0) 3464 goto error; 3465 3466 if (!priv->rxq) 3467 priv->rxq = ipw_rx_queue_alloc(priv); 3468 else 3469 ipw_rx_queue_reset(priv, priv->rxq); 3470 if (!priv->rxq) { 3471 IPW_ERROR("Unable to initialize Rx queue\n"); 3472 goto error; 3473 } 3474 3475 retry: 3476 /* Ensure interrupts are disabled */ 3477 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 3478 priv->status &= ~STATUS_INT_ENABLED; 3479 3480 /* ack pending interrupts */ 3481 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3482 3483 ipw_stop_nic(priv); 3484 3485 rc = ipw_reset_nic(priv); 3486 if (rc < 0) { 3487 IPW_ERROR("Unable to reset NIC\n"); 3488 goto error; 3489 } 3490 3491 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND, 3492 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); 3493 3494 /* DMA the initial boot firmware into the device */ 3495 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size)); 3496 if (rc < 0) { 3497 IPW_ERROR("Unable to load boot firmware: %d\n", rc); 3498 goto error; 3499 } 3500 3501 /* kick start the device */ 3502 ipw_start_nic(priv); 3503 3504 /* wait for the device to finish its initial startup sequence */ 3505 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3506 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3507 if (rc < 0) { 3508 IPW_ERROR("device failed to boot initial fw image\n"); 3509 goto error; 3510 } 3511 IPW_DEBUG_INFO("initial device response after %dms\n", rc); 3512 3513 /* ack fw init done interrupt */ 3514 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3515 3516 /* DMA the ucode into the device */ 3517 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size)); 3518 if (rc < 0) { 3519 IPW_ERROR("Unable to load ucode: %d\n", rc); 3520 goto error; 3521 } 3522 3523 /* stop nic */ 3524 ipw_stop_nic(priv); 3525 3526 /* DMA bss firmware into the device */ 3527 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size)); 3528 if (rc < 0) { 3529 IPW_ERROR("Unable to load firmware: %d\n", rc); 3530 goto error; 3531 } 3532#ifdef CONFIG_PM 3533 fw_loaded = 1; 3534#endif 3535 3536 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 3537 3538 rc = ipw_queue_reset(priv); 3539 if (rc < 0) { 3540 IPW_ERROR("Unable to initialize queues\n"); 3541 goto error; 3542 } 3543 3544 /* Ensure interrupts are disabled */ 3545 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 3546 /* ack pending interrupts */ 3547 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3548 3549 /* kick start the device */ 3550 ipw_start_nic(priv); 3551 3552 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) { 3553 if (retries > 0) { 3554 IPW_WARNING("Parity error. Retrying init.\n"); 3555 retries--; 3556 goto retry; 3557 } 3558 3559 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n"); 3560 rc = -EIO; 3561 goto error; 3562 } 3563 3564 /* wait for the device */ 3565 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3566 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3567 if (rc < 0) { 3568 IPW_ERROR("device failed to start within 500ms\n"); 3569 goto error; 3570 } 3571 IPW_DEBUG_INFO("device response after %dms\n", rc); 3572 3573 /* ack fw init done interrupt */ 3574 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3575 3576 /* read eeprom data and initialize the eeprom region of sram */ 3577 priv->eeprom_delay = 1; 3578 ipw_eeprom_init_sram(priv); 3579 3580 /* enable interrupts */ 3581 ipw_enable_interrupts(priv); 3582 3583 /* Ensure our queue has valid packets */ 3584 ipw_rx_queue_replenish(priv); 3585 3586 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read); 3587 3588 /* ack pending interrupts */ 3589 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3590 3591#ifndef CONFIG_PM 3592 release_firmware(raw); 3593#endif 3594 return 0; 3595 3596 error: 3597 if (priv->rxq) { 3598 ipw_rx_queue_free(priv, priv->rxq); 3599 priv->rxq = NULL; 3600 } 3601 ipw_tx_queue_free(priv); 3602 if (raw) 3603 release_firmware(raw); 3604#ifdef CONFIG_PM 3605 fw_loaded = 0; 3606 raw = NULL; 3607#endif 3608 3609 return rc; 3610} 3611 3612/** 3613 * DMA services 3614 * 3615 * Theory of operation 3616 * 3617 * A queue is a circular buffers with 'Read' and 'Write' pointers. 3618 * 2 empty entries always kept in the buffer to protect from overflow. 3619 * 3620 * For Tx queue, there are low mark and high mark limits. If, after queuing 3621 * the packet for Tx, free space become < low mark, Tx queue stopped. When 3622 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 3623 * Tx queue resumed. 3624 * 3625 * The IPW operates with six queues, one receive queue in the device's 3626 * sram, one transmit queue for sending commands to the device firmware, 3627 * and four transmit queues for data. 3628 * 3629 * The four transmit queues allow for performing quality of service (qos) 3630 * transmissions as per the 802.11 protocol. Currently Linux does not 3631 * provide a mechanism to the user for utilizing prioritized queues, so 3632 * we only utilize the first data transmit queue (queue1). 3633 */ 3634 3635/** 3636 * Driver allocates buffers of this size for Rx 3637 */ 3638 3639/** 3640 * ipw_rx_queue_space - Return number of free slots available in queue. 3641 */ 3642static int ipw_rx_queue_space(const struct ipw_rx_queue *q) 3643{ 3644 int s = q->read - q->write; 3645 if (s <= 0) 3646 s += RX_QUEUE_SIZE; 3647 /* keep some buffer to not confuse full and empty queue */ 3648 s -= 2; 3649 if (s < 0) 3650 s = 0; 3651 return s; 3652} 3653 3654static inline int ipw_tx_queue_space(const struct clx2_queue *q) 3655{ 3656 int s = q->last_used - q->first_empty; 3657 if (s <= 0) 3658 s += q->n_bd; 3659 s -= 2; /* keep some reserve to not confuse empty and full situations */ 3660 if (s < 0) 3661 s = 0; 3662 return s; 3663} 3664 3665static inline int ipw_queue_inc_wrap(int index, int n_bd) 3666{ 3667 return (++index == n_bd) ? 0 : index; 3668} 3669 3670/** 3671 * Initialize common DMA queue structure 3672 * 3673 * @param q queue to init 3674 * @param count Number of BD's to allocate. Should be power of 2 3675 * @param read_register Address for 'read' register 3676 * (not offset within BAR, full address) 3677 * @param write_register Address for 'write' register 3678 * (not offset within BAR, full address) 3679 * @param base_register Address for 'base' register 3680 * (not offset within BAR, full address) 3681 * @param size Address for 'size' register 3682 * (not offset within BAR, full address) 3683 */ 3684static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q, 3685 int count, u32 read, u32 write, u32 base, u32 size) 3686{ 3687 q->n_bd = count; 3688 3689 q->low_mark = q->n_bd / 4; 3690 if (q->low_mark < 4) 3691 q->low_mark = 4; 3692 3693 q->high_mark = q->n_bd / 8; 3694 if (q->high_mark < 2) 3695 q->high_mark = 2; 3696 3697 q->first_empty = q->last_used = 0; 3698 q->reg_r = read; 3699 q->reg_w = write; 3700 3701 ipw_write32(priv, base, q->dma_addr); 3702 ipw_write32(priv, size, count); 3703 ipw_write32(priv, read, 0); 3704 ipw_write32(priv, write, 0); 3705 3706 _ipw_read32(priv, 0x90); 3707} 3708 3709static int ipw_queue_tx_init(struct ipw_priv *priv, 3710 struct clx2_tx_queue *q, 3711 int count, u32 read, u32 write, u32 base, u32 size) 3712{ 3713 struct pci_dev *dev = priv->pci_dev; 3714 3715 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL); 3716 if (!q->txb) { 3717 IPW_ERROR("vmalloc for auxilary BD structures failed\n"); 3718 return -ENOMEM; 3719 } 3720 3721 q->bd = 3722 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); 3723 if (!q->bd) { 3724 IPW_ERROR("pci_alloc_consistent(%zd) failed\n", 3725 sizeof(q->bd[0]) * count); 3726 kfree(q->txb); 3727 q->txb = NULL; 3728 return -ENOMEM; 3729 } 3730 3731 ipw_queue_init(priv, &q->q, count, read, write, base, size); 3732 return 0; 3733} 3734 3735/** 3736 * Free one TFD, those at index [txq->q.last_used]. 3737 * Do NOT advance any indexes 3738 * 3739 * @param dev 3740 * @param txq 3741 */ 3742static void ipw_queue_tx_free_tfd(struct ipw_priv *priv, 3743 struct clx2_tx_queue *txq) 3744{ 3745 struct tfd_frame *bd = &txq->bd[txq->q.last_used]; 3746 struct pci_dev *dev = priv->pci_dev; 3747 int i; 3748 3749 /* classify bd */ 3750 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE) 3751 /* nothing to cleanup after for host commands */ 3752 return; 3753 3754 /* sanity check */ 3755 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) { 3756 IPW_ERROR("Too many chunks: %i\n", 3757 le32_to_cpu(bd->u.data.num_chunks)); 3758 /** @todo issue fatal error, it is quite serious situation */ 3759 return; 3760 } 3761 3762 /* unmap chunks if any */ 3763 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) { 3764 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]), 3765 le16_to_cpu(bd->u.data.chunk_len[i]), 3766 PCI_DMA_TODEVICE); 3767 if (txq->txb[txq->q.last_used]) { 3768 libipw_txb_free(txq->txb[txq->q.last_used]); 3769 txq->txb[txq->q.last_used] = NULL; 3770 } 3771 } 3772} 3773 3774/** 3775 * Deallocate DMA queue. 3776 * 3777 * Empty queue by removing and destroying all BD's. 3778 * Free all buffers. 3779 * 3780 * @param dev 3781 * @param q 3782 */ 3783static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq) 3784{ 3785 struct clx2_queue *q = &txq->q; 3786 struct pci_dev *dev = priv->pci_dev; 3787 3788 if (q->n_bd == 0) 3789 return; 3790 3791 /* first, empty all BD's */ 3792 for (; q->first_empty != q->last_used; 3793 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { 3794 ipw_queue_tx_free_tfd(priv, txq); 3795 } 3796 3797 /* free buffers belonging to queue itself */ 3798 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, 3799 q->dma_addr); 3800 kfree(txq->txb); 3801 3802 /* 0 fill whole structure */ 3803 memset(txq, 0, sizeof(*txq)); 3804} 3805 3806/** 3807 * Destroy all DMA queues and structures 3808 * 3809 * @param priv 3810 */ 3811static void ipw_tx_queue_free(struct ipw_priv *priv) 3812{ 3813 /* Tx CMD queue */ 3814 ipw_queue_tx_free(priv, &priv->txq_cmd); 3815 3816 /* Tx queues */ 3817 ipw_queue_tx_free(priv, &priv->txq[0]); 3818 ipw_queue_tx_free(priv, &priv->txq[1]); 3819 ipw_queue_tx_free(priv, &priv->txq[2]); 3820 ipw_queue_tx_free(priv, &priv->txq[3]); 3821} 3822 3823static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid) 3824{ 3825 /* First 3 bytes are manufacturer */ 3826 bssid[0] = priv->mac_addr[0]; 3827 bssid[1] = priv->mac_addr[1]; 3828 bssid[2] = priv->mac_addr[2]; 3829 3830 /* Last bytes are random */ 3831 get_random_bytes(&bssid[3], ETH_ALEN - 3); 3832 3833 bssid[0] &= 0xfe; /* clear multicast bit */ 3834 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ 3835} 3836 3837static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) 3838{ 3839 struct ipw_station_entry entry; 3840 int i; 3841 3842 for (i = 0; i < priv->num_stations; i++) { 3843 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) { 3844 /* Another node is active in network */ 3845 priv->missed_adhoc_beacons = 0; 3846 if (!(priv->config & CFG_STATIC_CHANNEL)) 3847 /* when other nodes drop out, we drop out */ 3848 priv->config &= ~CFG_ADHOC_PERSIST; 3849 3850 return i; 3851 } 3852 } 3853 3854 if (i == MAX_STATIONS) 3855 return IPW_INVALID_STATION; 3856 3857 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid); 3858 3859 entry.reserved = 0; 3860 entry.support_mode = 0; 3861 memcpy(entry.mac_addr, bssid, ETH_ALEN); 3862 memcpy(priv->stations[i], bssid, ETH_ALEN); 3863 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry), 3864 &entry, sizeof(entry)); 3865 priv->num_stations++; 3866 3867 return i; 3868} 3869 3870static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) 3871{ 3872 int i; 3873 3874 for (i = 0; i < priv->num_stations; i++) 3875 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) 3876 return i; 3877 3878 return IPW_INVALID_STATION; 3879} 3880 3881static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) 3882{ 3883 int err; 3884 3885 if (priv->status & STATUS_ASSOCIATING) { 3886 IPW_DEBUG_ASSOC("Disassociating while associating.\n"); 3887 queue_work(priv->workqueue, &priv->disassociate); 3888 return; 3889 } 3890 3891 if (!(priv->status & STATUS_ASSOCIATED)) { 3892 IPW_DEBUG_ASSOC("Disassociating while not associated.\n"); 3893 return; 3894 } 3895 3896 IPW_DEBUG_ASSOC("Disassocation attempt from %pM " 3897 "on channel %d.\n", 3898 priv->assoc_request.bssid, 3899 priv->assoc_request.channel); 3900 3901 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); 3902 priv->status |= STATUS_DISASSOCIATING; 3903 3904 if (quiet) 3905 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET; 3906 else 3907 priv->assoc_request.assoc_type = HC_DISASSOCIATE; 3908 3909 err = ipw_send_associate(priv, &priv->assoc_request); 3910 if (err) { 3911 IPW_DEBUG_HC("Attempt to send [dis]associate command " 3912 "failed.\n"); 3913 return; 3914 } 3915 3916} 3917 3918static int ipw_disassociate(void *data) 3919{ 3920 struct ipw_priv *priv = data; 3921 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3922 return 0; 3923 ipw_send_disassociate(data, 0); 3924 netif_carrier_off(priv->net_dev); 3925 return 1; 3926} 3927 3928static void ipw_bg_disassociate(struct work_struct *work) 3929{ 3930 struct ipw_priv *priv = 3931 container_of(work, struct ipw_priv, disassociate); 3932 mutex_lock(&priv->mutex); 3933 ipw_disassociate(priv); 3934 mutex_unlock(&priv->mutex); 3935} 3936 3937static void ipw_system_config(struct work_struct *work) 3938{ 3939 struct ipw_priv *priv = 3940 container_of(work, struct ipw_priv, system_config); 3941 3942#ifdef CONFIG_IPW2200_PROMISCUOUS 3943 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 3944 priv->sys_config.accept_all_data_frames = 1; 3945 priv->sys_config.accept_non_directed_frames = 1; 3946 priv->sys_config.accept_all_mgmt_bcpr = 1; 3947 priv->sys_config.accept_all_mgmt_frames = 1; 3948 } 3949#endif 3950 3951 ipw_send_system_config(priv); 3952} 3953 3954struct ipw_status_code { 3955 u16 status; 3956 const char *reason; 3957}; 3958 3959static const struct ipw_status_code ipw_status_codes[] = { 3960 {0x00, "Successful"}, 3961 {0x01, "Unspecified failure"}, 3962 {0x0A, "Cannot support all requested capabilities in the " 3963 "Capability information field"}, 3964 {0x0B, "Reassociation denied due to inability to confirm that " 3965 "association exists"}, 3966 {0x0C, "Association denied due to reason outside the scope of this " 3967 "standard"}, 3968 {0x0D, 3969 "Responding station does not support the specified authentication " 3970 "algorithm"}, 3971 {0x0E, 3972 "Received an Authentication frame with authentication sequence " 3973 "transaction sequence number out of expected sequence"}, 3974 {0x0F, "Authentication rejected because of challenge failure"}, 3975 {0x10, "Authentication rejected due to timeout waiting for next " 3976 "frame in sequence"}, 3977 {0x11, "Association denied because AP is unable to handle additional " 3978 "associated stations"}, 3979 {0x12, 3980 "Association denied due to requesting station not supporting all " 3981 "of the datarates in the BSSBasicServiceSet Parameter"}, 3982 {0x13, 3983 "Association denied due to requesting station not supporting " 3984 "short preamble operation"}, 3985 {0x14, 3986 "Association denied due to requesting station not supporting " 3987 "PBCC encoding"}, 3988 {0x15, 3989 "Association denied due to requesting station not supporting " 3990 "channel agility"}, 3991 {0x19, 3992 "Association denied due to requesting station not supporting " 3993 "short slot operation"}, 3994 {0x1A, 3995 "Association denied due to requesting station not supporting " 3996 "DSSS-OFDM operation"}, 3997 {0x28, "Invalid Information Element"}, 3998 {0x29, "Group Cipher is not valid"}, 3999 {0x2A, "Pairwise Cipher is not valid"}, 4000 {0x2B, "AKMP is not valid"}, 4001 {0x2C, "Unsupported RSN IE version"}, 4002 {0x2D, "Invalid RSN IE Capabilities"}, 4003 {0x2E, "Cipher suite is rejected per security policy"}, 4004}; 4005 4006static const char *ipw_get_status_code(u16 status) 4007{ 4008 int i; 4009 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++) 4010 if (ipw_status_codes[i].status == (status & 0xff)) 4011 return ipw_status_codes[i].reason; 4012 return "Unknown status value."; 4013} 4014 4015static void inline average_init(struct average *avg) 4016{ 4017 memset(avg, 0, sizeof(*avg)); 4018} 4019 4020#define DEPTH_RSSI 8 4021#define DEPTH_NOISE 16 4022static s16 exponential_average(s16 prev_avg, s16 val, u8 depth) 4023{ 4024 return ((depth-1)*prev_avg + val)/depth; 4025} 4026 4027static void average_add(struct average *avg, s16 val) 4028{ 4029 avg->sum -= avg->entries[avg->pos]; 4030 avg->sum += val; 4031 avg->entries[avg->pos++] = val; 4032 if (unlikely(avg->pos == AVG_ENTRIES)) { 4033 avg->init = 1; 4034 avg->pos = 0; 4035 } 4036} 4037 4038static s16 average_value(struct average *avg) 4039{ 4040 if (!unlikely(avg->init)) { 4041 if (avg->pos) 4042 return avg->sum / avg->pos; 4043 return 0; 4044 } 4045 4046 return avg->sum / AVG_ENTRIES; 4047} 4048 4049static void ipw_reset_stats(struct ipw_priv *priv) 4050{ 4051 u32 len = sizeof(u32); 4052 4053 priv->quality = 0; 4054 4055 average_init(&priv->average_missed_beacons); 4056 priv->exp_avg_rssi = -60; 4057 priv->exp_avg_noise = -85 + 0x100; 4058 4059 priv->last_rate = 0; 4060 priv->last_missed_beacons = 0; 4061 priv->last_rx_packets = 0; 4062 priv->last_tx_packets = 0; 4063 priv->last_tx_failures = 0; 4064 4065 /* Firmware managed, reset only when NIC is restarted, so we have to 4066 * normalize on the current value */ 4067 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, 4068 &priv->last_rx_err, &len); 4069 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, 4070 &priv->last_tx_failures, &len); 4071 4072 /* Driver managed, reset with each association */ 4073 priv->missed_adhoc_beacons = 0; 4074 priv->missed_beacons = 0; 4075 priv->tx_packets = 0; 4076 priv->rx_packets = 0; 4077 4078} 4079 4080static u32 ipw_get_max_rate(struct ipw_priv *priv) 4081{ 4082 u32 i = 0x80000000; 4083 u32 mask = priv->rates_mask; 4084 /* If currently associated in B mode, restrict the maximum 4085 * rate match to B rates */ 4086 if (priv->assoc_request.ieee_mode == IPW_B_MODE) 4087 mask &= LIBIPW_CCK_RATES_MASK; 4088 4089 /* TODO: Verify that the rate is supported by the current rates 4090 * list. */ 4091 4092 while (i && !(mask & i)) 4093 i >>= 1; 4094 switch (i) { 4095 case LIBIPW_CCK_RATE_1MB_MASK: 4096 return 1000000; 4097 case LIBIPW_CCK_RATE_2MB_MASK: 4098 return 2000000; 4099 case LIBIPW_CCK_RATE_5MB_MASK: 4100 return 5500000; 4101 case LIBIPW_OFDM_RATE_6MB_MASK: 4102 return 6000000; 4103 case LIBIPW_OFDM_RATE_9MB_MASK: 4104 return 9000000; 4105 case LIBIPW_CCK_RATE_11MB_MASK: 4106 return 11000000; 4107 case LIBIPW_OFDM_RATE_12MB_MASK: 4108 return 12000000; 4109 case LIBIPW_OFDM_RATE_18MB_MASK: 4110 return 18000000; 4111 case LIBIPW_OFDM_RATE_24MB_MASK: 4112 return 24000000; 4113 case LIBIPW_OFDM_RATE_36MB_MASK: 4114 return 36000000; 4115 case LIBIPW_OFDM_RATE_48MB_MASK: 4116 return 48000000; 4117 case LIBIPW_OFDM_RATE_54MB_MASK: 4118 return 54000000; 4119 } 4120 4121 if (priv->ieee->mode == IEEE_B) 4122 return 11000000; 4123 else 4124 return 54000000; 4125} 4126 4127static u32 ipw_get_current_rate(struct ipw_priv *priv) 4128{ 4129 u32 rate, len = sizeof(rate); 4130 int err; 4131 4132 if (!(priv->status & STATUS_ASSOCIATED)) 4133 return 0; 4134 4135 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) { 4136 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate, 4137 &len); 4138 if (err) { 4139 IPW_DEBUG_INFO("failed querying ordinals.\n"); 4140 return 0; 4141 } 4142 } else 4143 return ipw_get_max_rate(priv); 4144 4145 switch (rate) { 4146 case IPW_TX_RATE_1MB: 4147 return 1000000; 4148 case IPW_TX_RATE_2MB: 4149 return 2000000; 4150 case IPW_TX_RATE_5MB: 4151 return 5500000; 4152 case IPW_TX_RATE_6MB: 4153 return 6000000; 4154 case IPW_TX_RATE_9MB: 4155 return 9000000; 4156 case IPW_TX_RATE_11MB: 4157 return 11000000; 4158 case IPW_TX_RATE_12MB: 4159 return 12000000; 4160 case IPW_TX_RATE_18MB: 4161 return 18000000; 4162 case IPW_TX_RATE_24MB: 4163 return 24000000; 4164 case IPW_TX_RATE_36MB: 4165 return 36000000; 4166 case IPW_TX_RATE_48MB: 4167 return 48000000; 4168 case IPW_TX_RATE_54MB: 4169 return 54000000; 4170 } 4171 4172 return 0; 4173} 4174 4175#define IPW_STATS_INTERVAL (2 * HZ) 4176static void ipw_gather_stats(struct ipw_priv *priv) 4177{ 4178 u32 rx_err, rx_err_delta, rx_packets_delta; 4179 u32 tx_failures, tx_failures_delta, tx_packets_delta; 4180 u32 missed_beacons_percent, missed_beacons_delta; 4181 u32 quality = 0; 4182 u32 len = sizeof(u32); 4183 s16 rssi; 4184 u32 beacon_quality, signal_quality, tx_quality, rx_quality, 4185 rate_quality; 4186 u32 max_rate; 4187 4188 if (!(priv->status & STATUS_ASSOCIATED)) { 4189 priv->quality = 0; 4190 return; 4191 } 4192 4193 /* Update the statistics */ 4194 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS, 4195 &priv->missed_beacons, &len); 4196 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons; 4197 priv->last_missed_beacons = priv->missed_beacons; 4198 if (priv->assoc_request.beacon_interval) { 4199 missed_beacons_percent = missed_beacons_delta * 4200 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) / 4201 (IPW_STATS_INTERVAL * 10); 4202 } else { 4203 missed_beacons_percent = 0; 4204 } 4205 average_add(&priv->average_missed_beacons, missed_beacons_percent); 4206 4207 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len); 4208 rx_err_delta = rx_err - priv->last_rx_err; 4209 priv->last_rx_err = rx_err; 4210 4211 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len); 4212 tx_failures_delta = tx_failures - priv->last_tx_failures; 4213 priv->last_tx_failures = tx_failures; 4214 4215 rx_packets_delta = priv->rx_packets - priv->last_rx_packets; 4216 priv->last_rx_packets = priv->rx_packets; 4217 4218 tx_packets_delta = priv->tx_packets - priv->last_tx_packets; 4219 priv->last_tx_packets = priv->tx_packets; 4220 4221 /* Calculate quality based on the following: 4222 * 4223 * Missed beacon: 100% = 0, 0% = 70% missed 4224 * Rate: 60% = 1Mbs, 100% = Max 4225 * Rx and Tx errors represent a straight % of total Rx/Tx 4226 * RSSI: 100% = > -50, 0% = < -80 4227 * Rx errors: 100% = 0, 0% = 50% missed 4228 * 4229 * The lowest computed quality is used. 4230 * 4231 */ 4232#define BEACON_THRESHOLD 5 4233 beacon_quality = 100 - missed_beacons_percent; 4234 if (beacon_quality < BEACON_THRESHOLD) 4235 beacon_quality = 0; 4236 else 4237 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 / 4238 (100 - BEACON_THRESHOLD); 4239 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n", 4240 beacon_quality, missed_beacons_percent); 4241 4242 priv->last_rate = ipw_get_current_rate(priv); 4243 max_rate = ipw_get_max_rate(priv); 4244 rate_quality = priv->last_rate * 40 / max_rate + 60; 4245 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n", 4246 rate_quality, priv->last_rate / 1000000); 4247 4248 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta) 4249 rx_quality = 100 - (rx_err_delta * 100) / 4250 (rx_packets_delta + rx_err_delta); 4251 else 4252 rx_quality = 100; 4253 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n", 4254 rx_quality, rx_err_delta, rx_packets_delta); 4255 4256 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta) 4257 tx_quality = 100 - (tx_failures_delta * 100) / 4258 (tx_packets_delta + tx_failures_delta); 4259 else 4260 tx_quality = 100; 4261 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", 4262 tx_quality, tx_failures_delta, tx_packets_delta); 4263 4264 rssi = priv->exp_avg_rssi; 4265 signal_quality = 4266 (100 * 4267 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * 4268 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) - 4269 (priv->ieee->perfect_rssi - rssi) * 4270 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) + 4271 62 * (priv->ieee->perfect_rssi - rssi))) / 4272 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * 4273 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi)); 4274 if (signal_quality > 100) 4275 signal_quality = 100; 4276 else if (signal_quality < 1) 4277 signal_quality = 0; 4278 4279 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n", 4280 signal_quality, rssi); 4281 4282 quality = min(rx_quality, signal_quality); 4283 quality = min(tx_quality, quality); 4284 quality = min(rate_quality, quality); 4285 quality = min(beacon_quality, quality); 4286 if (quality == beacon_quality) 4287 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n", 4288 quality); 4289 if (quality == rate_quality) 4290 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n", 4291 quality); 4292 if (quality == tx_quality) 4293 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n", 4294 quality); 4295 if (quality == rx_quality) 4296 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n", 4297 quality); 4298 if (quality == signal_quality) 4299 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n", 4300 quality); 4301 4302 priv->quality = quality; 4303 4304 queue_delayed_work(priv->workqueue, &priv->gather_stats, 4305 IPW_STATS_INTERVAL); 4306} 4307 4308static void ipw_bg_gather_stats(struct work_struct *work) 4309{ 4310 struct ipw_priv *priv = 4311 container_of(work, struct ipw_priv, gather_stats.work); 4312 mutex_lock(&priv->mutex); 4313 ipw_gather_stats(priv); 4314 mutex_unlock(&priv->mutex); 4315} 4316 4317/* Missed beacon behavior: 4318 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam. 4319 * roaming_threshold -> disassociate_threshold, scan and roam for better signal. 4320 * Above disassociate threshold, give up and stop scanning. 4321 * Roaming is disabled if disassociate_threshold <= roaming_threshold */ 4322static void ipw_handle_missed_beacon(struct ipw_priv *priv, 4323 int missed_count) 4324{ 4325 priv->notif_missed_beacons = missed_count; 4326 4327 if (missed_count > priv->disassociate_threshold && 4328 priv->status & STATUS_ASSOCIATED) { 4329 /* If associated and we've hit the missed 4330 * beacon threshold, disassociate, turn 4331 * off roaming, and abort any active scans */ 4332 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4333 IPW_DL_STATE | IPW_DL_ASSOC, 4334 "Missed beacon: %d - disassociate\n", missed_count); 4335 priv->status &= ~STATUS_ROAMING; 4336 if (priv->status & STATUS_SCANNING) { 4337 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4338 IPW_DL_STATE, 4339 "Aborting scan with missed beacon.\n"); 4340 queue_work(priv->workqueue, &priv->abort_scan); 4341 } 4342 4343 queue_work(priv->workqueue, &priv->disassociate); 4344 return; 4345 } 4346 4347 if (priv->status & STATUS_ROAMING) { 4348 /* If we are currently roaming, then just 4349 * print a debug statement... */ 4350 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4351 "Missed beacon: %d - roam in progress\n", 4352 missed_count); 4353 return; 4354 } 4355 4356 if (roaming && 4357 (missed_count > priv->roaming_threshold && 4358 missed_count <= priv->disassociate_threshold)) { 4359 /* If we are not already roaming, set the ROAM 4360 * bit in the status and kick off a scan. 4361 * This can happen several times before we reach 4362 * disassociate_threshold. */ 4363 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4364 "Missed beacon: %d - initiate " 4365 "roaming\n", missed_count); 4366 if (!(priv->status & STATUS_ROAMING)) { 4367 priv->status |= STATUS_ROAMING; 4368 if (!(priv->status & STATUS_SCANNING)) 4369 queue_delayed_work(priv->workqueue, 4370 &priv->request_scan, 0); 4371 } 4372 return; 4373 } 4374 4375 if (priv->status & STATUS_SCANNING && 4376 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) { 4377 /* Stop scan to keep fw from getting 4378 * stuck (only if we aren't roaming -- 4379 * otherwise we'll never scan more than 2 or 3 4380 * channels..) */ 4381 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, 4382 "Aborting scan with missed beacon.\n"); 4383 queue_work(priv->workqueue, &priv->abort_scan); 4384 } 4385 4386 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); 4387} 4388 4389static void ipw_scan_event(struct work_struct *work) 4390{ 4391 union iwreq_data wrqu; 4392 4393 struct ipw_priv *priv = 4394 container_of(work, struct ipw_priv, scan_event.work); 4395 4396 wrqu.data.length = 0; 4397 wrqu.data.flags = 0; 4398 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); 4399} 4400 4401static void handle_scan_event(struct ipw_priv *priv) 4402{ 4403 /* Only userspace-requested scan completion events go out immediately */ 4404 if (!priv->user_requested_scan) { 4405 if (!delayed_work_pending(&priv->scan_event)) 4406 queue_delayed_work(priv->workqueue, &priv->scan_event, 4407 round_jiffies_relative(msecs_to_jiffies(4000))); 4408 } else { 4409 union iwreq_data wrqu; 4410 4411 priv->user_requested_scan = 0; 4412 cancel_delayed_work(&priv->scan_event); 4413 4414 wrqu.data.length = 0; 4415 wrqu.data.flags = 0; 4416 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); 4417 } 4418} 4419 4420/** 4421 * Handle host notification packet. 4422 * Called from interrupt routine 4423 */ 4424static void ipw_rx_notification(struct ipw_priv *priv, 4425 struct ipw_rx_notification *notif) 4426{ 4427 DECLARE_SSID_BUF(ssid); 4428 u16 size = le16_to_cpu(notif->size); 4429 4430 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size); 4431 4432 switch (notif->subtype) { 4433 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{ 4434 struct notif_association *assoc = ¬if->u.assoc; 4435 4436 switch (assoc->state) { 4437 case CMAS_ASSOCIATED:{ 4438 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4439 IPW_DL_ASSOC, 4440 "associated: '%s' %pM \n", 4441 print_ssid(ssid, priv->essid, 4442 priv->essid_len), 4443 priv->bssid); 4444 4445 switch (priv->ieee->iw_mode) { 4446 case IW_MODE_INFRA: 4447 memcpy(priv->ieee->bssid, 4448 priv->bssid, ETH_ALEN); 4449 break; 4450 4451 case IW_MODE_ADHOC: 4452 memcpy(priv->ieee->bssid, 4453 priv->bssid, ETH_ALEN); 4454 4455 /* clear out the station table */ 4456 priv->num_stations = 0; 4457 4458 IPW_DEBUG_ASSOC 4459 ("queueing adhoc check\n"); 4460 queue_delayed_work(priv-> 4461 workqueue, 4462 &priv-> 4463 adhoc_check, 4464 le16_to_cpu(priv-> 4465 assoc_request. 4466 beacon_interval)); 4467 break; 4468 } 4469 4470 priv->status &= ~STATUS_ASSOCIATING; 4471 priv->status |= STATUS_ASSOCIATED; 4472 queue_work(priv->workqueue, 4473 &priv->system_config); 4474 4475#ifdef CONFIG_IPW2200_QOS 4476#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ 4477 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control)) 4478 if ((priv->status & STATUS_AUTH) && 4479 (IPW_GET_PACKET_STYPE(¬if->u.raw) 4480 == IEEE80211_STYPE_ASSOC_RESP)) { 4481 if ((sizeof 4482 (struct 4483 libipw_assoc_response) 4484 <= size) 4485 && (size <= 2314)) { 4486 struct 4487 libipw_rx_stats 4488 stats = { 4489 .len = size - 1, 4490 }; 4491 4492 IPW_DEBUG_QOS 4493 ("QoS Associate " 4494 "size %d\n", size); 4495 libipw_rx_mgt(priv-> 4496 ieee, 4497 (struct 4498 libipw_hdr_4addr 4499 *) 4500 ¬if->u.raw, &stats); 4501 } 4502 } 4503#endif 4504 4505 schedule_work(&priv->link_up); 4506 4507 break; 4508 } 4509 4510 case CMAS_AUTHENTICATED:{ 4511 if (priv-> 4512 status & (STATUS_ASSOCIATED | 4513 STATUS_AUTH)) { 4514 struct notif_authenticate *auth 4515 = ¬if->u.auth; 4516 IPW_DEBUG(IPW_DL_NOTIF | 4517 IPW_DL_STATE | 4518 IPW_DL_ASSOC, 4519 "deauthenticated: '%s' " 4520 "%pM" 4521 ": (0x%04X) - %s \n", 4522 print_ssid(ssid, 4523 priv-> 4524 essid, 4525 priv-> 4526 essid_len), 4527 priv->bssid, 4528 le16_to_cpu(auth->status), 4529 ipw_get_status_code 4530 (le16_to_cpu 4531 (auth->status))); 4532 4533 priv->status &= 4534 ~(STATUS_ASSOCIATING | 4535 STATUS_AUTH | 4536 STATUS_ASSOCIATED); 4537 4538 schedule_work(&priv->link_down); 4539 break; 4540 } 4541 4542 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4543 IPW_DL_ASSOC, 4544 "authenticated: '%s' %pM\n", 4545 print_ssid(ssid, priv->essid, 4546 priv->essid_len), 4547 priv->bssid); 4548 break; 4549 } 4550 4551 case CMAS_INIT:{ 4552 if (priv->status & STATUS_AUTH) { 4553 struct 4554 libipw_assoc_response 4555 *resp; 4556 resp = 4557 (struct 4558 libipw_assoc_response 4559 *)¬if->u.raw; 4560 IPW_DEBUG(IPW_DL_NOTIF | 4561 IPW_DL_STATE | 4562 IPW_DL_ASSOC, 4563 "association failed (0x%04X): %s\n", 4564 le16_to_cpu(resp->status), 4565 ipw_get_status_code 4566 (le16_to_cpu 4567 (resp->status))); 4568 } 4569 4570 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4571 IPW_DL_ASSOC, 4572 "disassociated: '%s' %pM \n", 4573 print_ssid(ssid, priv->essid, 4574 priv->essid_len), 4575 priv->bssid); 4576 4577 priv->status &= 4578 ~(STATUS_DISASSOCIATING | 4579 STATUS_ASSOCIATING | 4580 STATUS_ASSOCIATED | STATUS_AUTH); 4581 if (priv->assoc_network 4582 && (priv->assoc_network-> 4583 capability & 4584 WLAN_CAPABILITY_IBSS)) 4585 ipw_remove_current_network 4586 (priv); 4587 4588 schedule_work(&priv->link_down); 4589 4590 break; 4591 } 4592 4593 case CMAS_RX_ASSOC_RESP: 4594 break; 4595 4596 default: 4597 IPW_ERROR("assoc: unknown (%d)\n", 4598 assoc->state); 4599 break; 4600 } 4601 4602 break; 4603 } 4604 4605 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{ 4606 struct notif_authenticate *auth = ¬if->u.auth; 4607 switch (auth->state) { 4608 case CMAS_AUTHENTICATED: 4609 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4610 "authenticated: '%s' %pM \n", 4611 print_ssid(ssid, priv->essid, 4612 priv->essid_len), 4613 priv->bssid); 4614 priv->status |= STATUS_AUTH; 4615 break; 4616 4617 case CMAS_INIT: 4618 if (priv->status & STATUS_AUTH) { 4619 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4620 IPW_DL_ASSOC, 4621 "authentication failed (0x%04X): %s\n", 4622 le16_to_cpu(auth->status), 4623 ipw_get_status_code(le16_to_cpu 4624 (auth-> 4625 status))); 4626 } 4627 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4628 IPW_DL_ASSOC, 4629 "deauthenticated: '%s' %pM\n", 4630 print_ssid(ssid, priv->essid, 4631 priv->essid_len), 4632 priv->bssid); 4633 4634 priv->status &= ~(STATUS_ASSOCIATING | 4635 STATUS_AUTH | 4636 STATUS_ASSOCIATED); 4637 4638 schedule_work(&priv->link_down); 4639 break; 4640 4641 case CMAS_TX_AUTH_SEQ_1: 4642 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4643 IPW_DL_ASSOC, "AUTH_SEQ_1\n"); 4644 break; 4645 case CMAS_RX_AUTH_SEQ_2: 4646 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4647 IPW_DL_ASSOC, "AUTH_SEQ_2\n"); 4648 break; 4649 case CMAS_AUTH_SEQ_1_PASS: 4650 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4651 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n"); 4652 break; 4653 case CMAS_AUTH_SEQ_1_FAIL: 4654 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4655 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n"); 4656 break; 4657 case CMAS_TX_AUTH_SEQ_3: 4658 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4659 IPW_DL_ASSOC, "AUTH_SEQ_3\n"); 4660 break; 4661 case CMAS_RX_AUTH_SEQ_4: 4662 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4663 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n"); 4664 break; 4665 case CMAS_AUTH_SEQ_2_PASS: 4666 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4667 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n"); 4668 break; 4669 case CMAS_AUTH_SEQ_2_FAIL: 4670 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4671 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n"); 4672 break; 4673 case CMAS_TX_ASSOC: 4674 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4675 IPW_DL_ASSOC, "TX_ASSOC\n"); 4676 break; 4677 case CMAS_RX_ASSOC_RESP: 4678 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4679 IPW_DL_ASSOC, "RX_ASSOC_RESP\n"); 4680 4681 break; 4682 case CMAS_ASSOCIATED: 4683 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4684 IPW_DL_ASSOC, "ASSOCIATED\n"); 4685 break; 4686 default: 4687 IPW_DEBUG_NOTIF("auth: failure - %d\n", 4688 auth->state); 4689 break; 4690 } 4691 break; 4692 } 4693 4694 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{ 4695 struct notif_channel_result *x = 4696 ¬if->u.channel_result; 4697 4698 if (size == sizeof(*x)) { 4699 IPW_DEBUG_SCAN("Scan result for channel %d\n", 4700 x->channel_num); 4701 } else { 4702 IPW_DEBUG_SCAN("Scan result of wrong size %d " 4703 "(should be %zd)\n", 4704 size, sizeof(*x)); 4705 } 4706 break; 4707 } 4708 4709 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{ 4710 struct notif_scan_complete *x = ¬if->u.scan_complete; 4711 if (size == sizeof(*x)) { 4712 IPW_DEBUG_SCAN 4713 ("Scan completed: type %d, %d channels, " 4714 "%d status\n", x->scan_type, 4715 x->num_channels, x->status); 4716 } else { 4717 IPW_ERROR("Scan completed of wrong size %d " 4718 "(should be %zd)\n", 4719 size, sizeof(*x)); 4720 } 4721 4722 priv->status &= 4723 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); 4724 4725 wake_up_interruptible(&priv->wait_state); 4726 cancel_delayed_work(&priv->scan_check); 4727 4728 if (priv->status & STATUS_EXIT_PENDING) 4729 break; 4730 4731 priv->ieee->scans++; 4732 4733#ifdef CONFIG_IPW2200_MONITOR 4734 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4735 priv->status |= STATUS_SCAN_FORCED; 4736 queue_delayed_work(priv->workqueue, 4737 &priv->request_scan, 0); 4738 break; 4739 } 4740 priv->status &= ~STATUS_SCAN_FORCED; 4741#endif /* CONFIG_IPW2200_MONITOR */ 4742 4743 /* Do queued direct scans first */ 4744 if (priv->status & STATUS_DIRECT_SCAN_PENDING) { 4745 queue_delayed_work(priv->workqueue, 4746 &priv->request_direct_scan, 0); 4747 } 4748 4749 if (!(priv->status & (STATUS_ASSOCIATED | 4750 STATUS_ASSOCIATING | 4751 STATUS_ROAMING | 4752 STATUS_DISASSOCIATING))) 4753 queue_work(priv->workqueue, &priv->associate); 4754 else if (priv->status & STATUS_ROAMING) { 4755 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4756 /* If a scan completed and we are in roam mode, then 4757 * the scan that completed was the one requested as a 4758 * result of entering roam... so, schedule the 4759 * roam work */ 4760 queue_work(priv->workqueue, 4761 &priv->roam); 4762 else 4763 /* Don't schedule if we aborted the scan */ 4764 priv->status &= ~STATUS_ROAMING; 4765 } else if (priv->status & STATUS_SCAN_PENDING) 4766 queue_delayed_work(priv->workqueue, 4767 &priv->request_scan, 0); 4768 else if (priv->config & CFG_BACKGROUND_SCAN 4769 && priv->status & STATUS_ASSOCIATED) 4770 queue_delayed_work(priv->workqueue, 4771 &priv->request_scan, 4772 round_jiffies_relative(HZ)); 4773 4774 /* Send an empty event to user space. 4775 * We don't send the received data on the event because 4776 * it would require us to do complex transcoding, and 4777 * we want to minimise the work done in the irq handler 4778 * Use a request to extract the data. 4779 * Also, we generate this even for any scan, regardless 4780 * on how the scan was initiated. User space can just 4781 * sync on periodic scan to get fresh data... 4782 * Jean II */ 4783 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4784 handle_scan_event(priv); 4785 break; 4786 } 4787 4788 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{ 4789 struct notif_frag_length *x = ¬if->u.frag_len; 4790 4791 if (size == sizeof(*x)) 4792 IPW_ERROR("Frag length: %d\n", 4793 le16_to_cpu(x->frag_length)); 4794 else 4795 IPW_ERROR("Frag length of wrong size %d " 4796 "(should be %zd)\n", 4797 size, sizeof(*x)); 4798 break; 4799 } 4800 4801 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{ 4802 struct notif_link_deterioration *x = 4803 ¬if->u.link_deterioration; 4804 4805 if (size == sizeof(*x)) { 4806 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4807 "link deterioration: type %d, cnt %d\n", 4808 x->silence_notification_type, 4809 x->silence_count); 4810 memcpy(&priv->last_link_deterioration, x, 4811 sizeof(*x)); 4812 } else { 4813 IPW_ERROR("Link Deterioration of wrong size %d " 4814 "(should be %zd)\n", 4815 size, sizeof(*x)); 4816 } 4817 break; 4818 } 4819 4820 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{ 4821 IPW_ERROR("Dino config\n"); 4822 if (priv->hcmd 4823 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG) 4824 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n"); 4825 4826 break; 4827 } 4828 4829 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{ 4830 struct notif_beacon_state *x = ¬if->u.beacon_state; 4831 if (size != sizeof(*x)) { 4832 IPW_ERROR 4833 ("Beacon state of wrong size %d (should " 4834 "be %zd)\n", size, sizeof(*x)); 4835 break; 4836 } 4837 4838 if (le32_to_cpu(x->state) == 4839 HOST_NOTIFICATION_STATUS_BEACON_MISSING) 4840 ipw_handle_missed_beacon(priv, 4841 le32_to_cpu(x-> 4842 number)); 4843 4844 break; 4845 } 4846 4847 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{ 4848 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key; 4849 if (size == sizeof(*x)) { 4850 IPW_ERROR("TGi Tx Key: state 0x%02x sec type " 4851 "0x%02x station %d\n", 4852 x->key_state, x->security_type, 4853 x->station_index); 4854 break; 4855 } 4856 4857 IPW_ERROR 4858 ("TGi Tx Key of wrong size %d (should be %zd)\n", 4859 size, sizeof(*x)); 4860 break; 4861 } 4862 4863 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{ 4864 struct notif_calibration *x = ¬if->u.calibration; 4865 4866 if (size == sizeof(*x)) { 4867 memcpy(&priv->calib, x, sizeof(*x)); 4868 IPW_DEBUG_INFO("TODO: Calibration\n"); 4869 break; 4870 } 4871 4872 IPW_ERROR 4873 ("Calibration of wrong size %d (should be %zd)\n", 4874 size, sizeof(*x)); 4875 break; 4876 } 4877 4878 case HOST_NOTIFICATION_NOISE_STATS:{ 4879 if (size == sizeof(u32)) { 4880 priv->exp_avg_noise = 4881 exponential_average(priv->exp_avg_noise, 4882 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff), 4883 DEPTH_NOISE); 4884 break; 4885 } 4886 4887 IPW_ERROR 4888 ("Noise stat is wrong size %d (should be %zd)\n", 4889 size, sizeof(u32)); 4890 break; 4891 } 4892 4893 default: 4894 IPW_DEBUG_NOTIF("Unknown notification: " 4895 "subtype=%d,flags=0x%2x,size=%d\n", 4896 notif->subtype, notif->flags, size); 4897 } 4898} 4899 4900/** 4901 * Destroys all DMA structures and initialise them again 4902 * 4903 * @param priv 4904 * @return error code 4905 */ 4906static int ipw_queue_reset(struct ipw_priv *priv) 4907{ 4908 int rc = 0; 4909 /** @todo customize queue sizes */ 4910 int nTx = 64, nTxCmd = 8; 4911 ipw_tx_queue_free(priv); 4912 /* Tx CMD queue */ 4913 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd, 4914 IPW_TX_CMD_QUEUE_READ_INDEX, 4915 IPW_TX_CMD_QUEUE_WRITE_INDEX, 4916 IPW_TX_CMD_QUEUE_BD_BASE, 4917 IPW_TX_CMD_QUEUE_BD_SIZE); 4918 if (rc) { 4919 IPW_ERROR("Tx Cmd queue init failed\n"); 4920 goto error; 4921 } 4922 /* Tx queue(s) */ 4923 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx, 4924 IPW_TX_QUEUE_0_READ_INDEX, 4925 IPW_TX_QUEUE_0_WRITE_INDEX, 4926 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE); 4927 if (rc) { 4928 IPW_ERROR("Tx 0 queue init failed\n"); 4929 goto error; 4930 } 4931 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx, 4932 IPW_TX_QUEUE_1_READ_INDEX, 4933 IPW_TX_QUEUE_1_WRITE_INDEX, 4934 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE); 4935 if (rc) { 4936 IPW_ERROR("Tx 1 queue init failed\n"); 4937 goto error; 4938 } 4939 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx, 4940 IPW_TX_QUEUE_2_READ_INDEX, 4941 IPW_TX_QUEUE_2_WRITE_INDEX, 4942 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE); 4943 if (rc) { 4944 IPW_ERROR("Tx 2 queue init failed\n"); 4945 goto error; 4946 } 4947 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx, 4948 IPW_TX_QUEUE_3_READ_INDEX, 4949 IPW_TX_QUEUE_3_WRITE_INDEX, 4950 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE); 4951 if (rc) { 4952 IPW_ERROR("Tx 3 queue init failed\n"); 4953 goto error; 4954 } 4955 /* statistics */ 4956 priv->rx_bufs_min = 0; 4957 priv->rx_pend_max = 0; 4958 return rc; 4959 4960 error: 4961 ipw_tx_queue_free(priv); 4962 return rc; 4963} 4964 4965/** 4966 * Reclaim Tx queue entries no more used by NIC. 4967 * 4968 * When FW advances 'R' index, all entries between old and 4969 * new 'R' index need to be reclaimed. As result, some free space 4970 * forms. If there is enough free space (> low mark), wake Tx queue. 4971 * 4972 * @note Need to protect against garbage in 'R' index 4973 * @param priv 4974 * @param txq 4975 * @param qindex 4976 * @return Number of used entries remains in the queue 4977 */ 4978static int ipw_queue_tx_reclaim(struct ipw_priv *priv, 4979 struct clx2_tx_queue *txq, int qindex) 4980{ 4981 u32 hw_tail; 4982 int used; 4983 struct clx2_queue *q = &txq->q; 4984 4985 hw_tail = ipw_read32(priv, q->reg_r); 4986 if (hw_tail >= q->n_bd) { 4987 IPW_ERROR 4988 ("Read index for DMA queue (%d) is out of range [0-%d)\n", 4989 hw_tail, q->n_bd); 4990 goto done; 4991 } 4992 for (; q->last_used != hw_tail; 4993 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { 4994 ipw_queue_tx_free_tfd(priv, txq); 4995 priv->tx_packets++; 4996 } 4997 done: 4998 if ((ipw_tx_queue_space(q) > q->low_mark) && 4999 (qindex >= 0)) 5000 netif_wake_queue(priv->net_dev); 5001 used = q->first_empty - q->last_used; 5002 if (used < 0) 5003 used += q->n_bd; 5004 5005 return used; 5006} 5007 5008static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, 5009 int len, int sync) 5010{ 5011 struct clx2_tx_queue *txq = &priv->txq_cmd; 5012 struct clx2_queue *q = &txq->q; 5013 struct tfd_frame *tfd; 5014 5015 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) { 5016 IPW_ERROR("No space for Tx\n"); 5017 return -EBUSY; 5018 } 5019 5020 tfd = &txq->bd[q->first_empty]; 5021 txq->txb[q->first_empty] = NULL; 5022 5023 memset(tfd, 0, sizeof(*tfd)); 5024 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE; 5025 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; 5026 priv->hcmd_seq++; 5027 tfd->u.cmd.index = hcmd; 5028 tfd->u.cmd.length = len; 5029 memcpy(tfd->u.cmd.payload, buf, len); 5030 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 5031 ipw_write32(priv, q->reg_w, q->first_empty); 5032 _ipw_read32(priv, 0x90); 5033 5034 return 0; 5035} 5036 5037/* 5038 * Rx theory of operation 5039 * 5040 * The host allocates 32 DMA target addresses and passes the host address 5041 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is 5042 * 0 to 31 5043 * 5044 * Rx Queue Indexes 5045 * The host/firmware share two index registers for managing the Rx buffers. 5046 * 5047 * The READ index maps to the first position that the firmware may be writing 5048 * to -- the driver can read up to (but not including) this position and get 5049 * good data. 5050 * The READ index is managed by the firmware once the card is enabled. 5051 * 5052 * The WRITE index maps to the last position the driver has read from -- the 5053 * position preceding WRITE is the last slot the firmware can place a packet. 5054 * 5055 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 5056 * WRITE = READ. 5057 * 5058 * During initialization the host sets up the READ queue position to the first 5059 * INDEX position, and WRITE to the last (READ - 1 wrapped) 5060 * 5061 * When the firmware places a packet in a buffer it will advance the READ index 5062 * and fire the RX interrupt. The driver can then query the READ index and 5063 * process as many packets as possible, moving the WRITE index forward as it 5064 * resets the Rx queue buffers with new memory. 5065 * 5066 * The management in the driver is as follows: 5067 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When 5068 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 5069 * to replensish the ipw->rxq->rx_free. 5070 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the 5071 * ipw->rxq is replenished and the READ INDEX is updated (updating the 5072 * 'processed' and 'read' driver indexes as well) 5073 * + A received packet is processed and handed to the kernel network stack, 5074 * detached from the ipw->rxq. The driver 'processed' index is updated. 5075 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free 5076 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ 5077 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there 5078 * were enough free buffers and RX_STALLED is set it is cleared. 5079 * 5080 * 5081 * Driver sequence: 5082 * 5083 * ipw_rx_queue_alloc() Allocates rx_free 5084 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls 5085 * ipw_rx_queue_restock 5086 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx 5087 * queue, updates firmware pointers, and updates 5088 * the WRITE index. If insufficient rx_free buffers 5089 * are available, schedules ipw_rx_queue_replenish 5090 * 5091 * -- enable interrupts -- 5092 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the 5093 * READ INDEX, detaching the SKB from the pool. 5094 * Moves the packet buffer from queue to rx_used. 5095 * Calls ipw_rx_queue_restock to refill any empty 5096 * slots. 5097 * ... 5098 * 5099 */ 5100 5101/* 5102 * If there are slots in the RX queue that need to be restocked, 5103 * and we have free pre-allocated buffers, fill the ranks as much 5104 * as we can pulling from rx_free. 5105 * 5106 * This moves the 'write' index forward to catch up with 'processed', and 5107 * also updates the memory address in the firmware to reference the new 5108 * target buffer. 5109 */ 5110static void ipw_rx_queue_restock(struct ipw_priv *priv) 5111{ 5112 struct ipw_rx_queue *rxq = priv->rxq; 5113 struct list_head *element; 5114 struct ipw_rx_mem_buffer *rxb; 5115 unsigned long flags; 5116 int write; 5117 5118 spin_lock_irqsave(&rxq->lock, flags); 5119 write = rxq->write; 5120 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 5121 element = rxq->rx_free.next; 5122 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5123 list_del(element); 5124 5125 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE, 5126 rxb->dma_addr); 5127 rxq->queue[rxq->write] = rxb; 5128 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE; 5129 rxq->free_count--; 5130 } 5131 spin_unlock_irqrestore(&rxq->lock, flags); 5132 5133 /* If the pre-allocated buffer pool is dropping low, schedule to 5134 * refill it */ 5135 if (rxq->free_count <= RX_LOW_WATERMARK) 5136 queue_work(priv->workqueue, &priv->rx_replenish); 5137 5138 /* If we've added more space for the firmware to place data, tell it */ 5139 if (write != rxq->write) 5140 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write); 5141} 5142 5143/* 5144 * Move all used packet from rx_used to rx_free, allocating a new SKB for each. 5145 * Also restock the Rx queue via ipw_rx_queue_restock. 5146 * 5147 * This is called as a scheduled work item (except for during intialization) 5148 */ 5149static void ipw_rx_queue_replenish(void *data) 5150{ 5151 struct ipw_priv *priv = data; 5152 struct ipw_rx_queue *rxq = priv->rxq; 5153 struct list_head *element; 5154 struct ipw_rx_mem_buffer *rxb; 5155 unsigned long flags; 5156 5157 spin_lock_irqsave(&rxq->lock, flags); 5158 while (!list_empty(&rxq->rx_used)) { 5159 element = rxq->rx_used.next; 5160 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5161 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC); 5162 if (!rxb->skb) { 5163 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n", 5164 priv->net_dev->name); 5165 /* We don't reschedule replenish work here -- we will 5166 * call the restock method and if it still needs 5167 * more buffers it will schedule replenish */ 5168 break; 5169 } 5170 list_del(element); 5171 5172 rxb->dma_addr = 5173 pci_map_single(priv->pci_dev, rxb->skb->data, 5174 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5175 5176 list_add_tail(&rxb->list, &rxq->rx_free); 5177 rxq->free_count++; 5178 } 5179 spin_unlock_irqrestore(&rxq->lock, flags); 5180 5181 ipw_rx_queue_restock(priv); 5182} 5183 5184static void ipw_bg_rx_queue_replenish(struct work_struct *work) 5185{ 5186 struct ipw_priv *priv = 5187 container_of(work, struct ipw_priv, rx_replenish); 5188 mutex_lock(&priv->mutex); 5189 ipw_rx_queue_replenish(priv); 5190 mutex_unlock(&priv->mutex); 5191} 5192 5193/* Assumes that the skb field of the buffers in 'pool' is kept accurate. 5194 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 5195 * This free routine walks the list of POOL entries and if SKB is set to 5196 * non NULL it is unmapped and freed 5197 */ 5198static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq) 5199{ 5200 int i; 5201 5202 if (!rxq) 5203 return; 5204 5205 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 5206 if (rxq->pool[i].skb != NULL) { 5207 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 5208 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5209 dev_kfree_skb(rxq->pool[i].skb); 5210 } 5211 } 5212 5213 kfree(rxq); 5214} 5215 5216static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv) 5217{ 5218 struct ipw_rx_queue *rxq; 5219 int i; 5220 5221 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); 5222 if (unlikely(!rxq)) { 5223 IPW_ERROR("memory allocation failed\n"); 5224 return NULL; 5225 } 5226 spin_lock_init(&rxq->lock); 5227 INIT_LIST_HEAD(&rxq->rx_free); 5228 INIT_LIST_HEAD(&rxq->rx_used); 5229 5230 /* Fill the rx_used queue with _all_ of the Rx buffers */ 5231 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 5232 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 5233 5234 /* Set us so that we have processed and used all buffers, but have 5235 * not restocked the Rx queue with fresh buffers */ 5236 rxq->read = rxq->write = 0; 5237 rxq->free_count = 0; 5238 5239 return rxq; 5240} 5241 5242static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate) 5243{ 5244 rate &= ~LIBIPW_BASIC_RATE_MASK; 5245 if (ieee_mode == IEEE_A) { 5246 switch (rate) { 5247 case LIBIPW_OFDM_RATE_6MB: 5248 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 5249 1 : 0; 5250 case LIBIPW_OFDM_RATE_9MB: 5251 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 5252 1 : 0; 5253 case LIBIPW_OFDM_RATE_12MB: 5254 return priv-> 5255 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; 5256 case LIBIPW_OFDM_RATE_18MB: 5257 return priv-> 5258 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; 5259 case LIBIPW_OFDM_RATE_24MB: 5260 return priv-> 5261 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; 5262 case LIBIPW_OFDM_RATE_36MB: 5263 return priv-> 5264 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; 5265 case LIBIPW_OFDM_RATE_48MB: 5266 return priv-> 5267 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; 5268 case LIBIPW_OFDM_RATE_54MB: 5269 return priv-> 5270 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; 5271 default: 5272 return 0; 5273 } 5274 } 5275 5276 /* B and G mixed */ 5277 switch (rate) { 5278 case LIBIPW_CCK_RATE_1MB: 5279 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0; 5280 case LIBIPW_CCK_RATE_2MB: 5281 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0; 5282 case LIBIPW_CCK_RATE_5MB: 5283 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0; 5284 case LIBIPW_CCK_RATE_11MB: 5285 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0; 5286 } 5287 5288 /* If we are limited to B modulations, bail at this point */ 5289 if (ieee_mode == IEEE_B) 5290 return 0; 5291 5292 /* G */ 5293 switch (rate) { 5294 case LIBIPW_OFDM_RATE_6MB: 5295 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0; 5296 case LIBIPW_OFDM_RATE_9MB: 5297 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0; 5298 case LIBIPW_OFDM_RATE_12MB: 5299 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; 5300 case LIBIPW_OFDM_RATE_18MB: 5301 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; 5302 case LIBIPW_OFDM_RATE_24MB: 5303 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; 5304 case LIBIPW_OFDM_RATE_36MB: 5305 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; 5306 case LIBIPW_OFDM_RATE_48MB: 5307 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; 5308 case LIBIPW_OFDM_RATE_54MB: 5309 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; 5310 } 5311 5312 return 0; 5313} 5314 5315static int ipw_compatible_rates(struct ipw_priv *priv, 5316 const struct libipw_network *network, 5317 struct ipw_supported_rates *rates) 5318{ 5319 int num_rates, i; 5320 5321 memset(rates, 0, sizeof(*rates)); 5322 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES); 5323 rates->num_rates = 0; 5324 for (i = 0; i < num_rates; i++) { 5325 if (!ipw_is_rate_in_mask(priv, network->mode, 5326 network->rates[i])) { 5327 5328 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) { 5329 IPW_DEBUG_SCAN("Adding masked mandatory " 5330 "rate %02X\n", 5331 network->rates[i]); 5332 rates->supported_rates[rates->num_rates++] = 5333 network->rates[i]; 5334 continue; 5335 } 5336 5337 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 5338 network->rates[i], priv->rates_mask); 5339 continue; 5340 } 5341 5342 rates->supported_rates[rates->num_rates++] = network->rates[i]; 5343 } 5344 5345 num_rates = min(network->rates_ex_len, 5346 (u8) (IPW_MAX_RATES - num_rates)); 5347 for (i = 0; i < num_rates; i++) { 5348 if (!ipw_is_rate_in_mask(priv, network->mode, 5349 network->rates_ex[i])) { 5350 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) { 5351 IPW_DEBUG_SCAN("Adding masked mandatory " 5352 "rate %02X\n", 5353 network->rates_ex[i]); 5354 rates->supported_rates[rates->num_rates++] = 5355 network->rates[i]; 5356 continue; 5357 } 5358 5359 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 5360 network->rates_ex[i], priv->rates_mask); 5361 continue; 5362 } 5363 5364 rates->supported_rates[rates->num_rates++] = 5365 network->rates_ex[i]; 5366 } 5367 5368 return 1; 5369} 5370 5371static void ipw_copy_rates(struct ipw_supported_rates *dest, 5372 const struct ipw_supported_rates *src) 5373{ 5374 u8 i; 5375 for (i = 0; i < src->num_rates; i++) 5376 dest->supported_rates[i] = src->supported_rates[i]; 5377 dest->num_rates = src->num_rates; 5378} 5379 5380/* TODO: Look at sniffed packets in the air to determine if the basic rate 5381 * mask should ever be used -- right now all callers to add the scan rates are 5382 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */ 5383static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates, 5384 u8 modulation, u32 rate_mask) 5385{ 5386 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? 5387 LIBIPW_BASIC_RATE_MASK : 0; 5388 5389 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK) 5390 rates->supported_rates[rates->num_rates++] = 5391 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB; 5392 5393 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK) 5394 rates->supported_rates[rates->num_rates++] = 5395 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB; 5396 5397 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK) 5398 rates->supported_rates[rates->num_rates++] = basic_mask | 5399 LIBIPW_CCK_RATE_5MB; 5400 5401 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK) 5402 rates->supported_rates[rates->num_rates++] = basic_mask | 5403 LIBIPW_CCK_RATE_11MB; 5404} 5405 5406static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates, 5407 u8 modulation, u32 rate_mask) 5408{ 5409 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? 5410 LIBIPW_BASIC_RATE_MASK : 0; 5411 5412 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK) 5413 rates->supported_rates[rates->num_rates++] = basic_mask | 5414 LIBIPW_OFDM_RATE_6MB; 5415 5416 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK) 5417 rates->supported_rates[rates->num_rates++] = 5418 LIBIPW_OFDM_RATE_9MB; 5419 5420 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK) 5421 rates->supported_rates[rates->num_rates++] = basic_mask | 5422 LIBIPW_OFDM_RATE_12MB; 5423 5424 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK) 5425 rates->supported_rates[rates->num_rates++] = 5426 LIBIPW_OFDM_RATE_18MB; 5427 5428 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK) 5429 rates->supported_rates[rates->num_rates++] = basic_mask | 5430 LIBIPW_OFDM_RATE_24MB; 5431 5432 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK) 5433 rates->supported_rates[rates->num_rates++] = 5434 LIBIPW_OFDM_RATE_36MB; 5435 5436 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK) 5437 rates->supported_rates[rates->num_rates++] = 5438 LIBIPW_OFDM_RATE_48MB; 5439 5440 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK) 5441 rates->supported_rates[rates->num_rates++] = 5442 LIBIPW_OFDM_RATE_54MB; 5443} 5444 5445struct ipw_network_match { 5446 struct libipw_network *network; 5447 struct ipw_supported_rates rates; 5448}; 5449 5450static int ipw_find_adhoc_network(struct ipw_priv *priv, 5451 struct ipw_network_match *match, 5452 struct libipw_network *network, 5453 int roaming) 5454{ 5455 struct ipw_supported_rates rates; 5456 DECLARE_SSID_BUF(ssid); 5457 5458 /* Verify that this network's capability is compatible with the 5459 * current mode (AdHoc or Infrastructure) */ 5460 if ((priv->ieee->iw_mode == IW_MODE_ADHOC && 5461 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5462 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to " 5463 "capability mismatch.\n", 5464 print_ssid(ssid, network->ssid, 5465 network->ssid_len), 5466 network->bssid); 5467 return 0; 5468 } 5469 5470 if (unlikely(roaming)) { 5471 /* If we are roaming, then ensure check if this is a valid 5472 * network to try and roam to */ 5473 if ((network->ssid_len != match->network->ssid_len) || 5474 memcmp(network->ssid, match->network->ssid, 5475 network->ssid_len)) { 5476 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5477 "because of non-network ESSID.\n", 5478 print_ssid(ssid, network->ssid, 5479 network->ssid_len), 5480 network->bssid); 5481 return 0; 5482 } 5483 } else { 5484 /* If an ESSID has been configured then compare the broadcast 5485 * ESSID to ours */ 5486 if ((priv->config & CFG_STATIC_ESSID) && 5487 ((network->ssid_len != priv->essid_len) || 5488 memcmp(network->ssid, priv->essid, 5489 min(network->ssid_len, priv->essid_len)))) { 5490 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5491 5492 strncpy(escaped, 5493 print_ssid(ssid, network->ssid, 5494 network->ssid_len), 5495 sizeof(escaped)); 5496 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5497 "because of ESSID mismatch: '%s'.\n", 5498 escaped, network->bssid, 5499 print_ssid(ssid, priv->essid, 5500 priv->essid_len)); 5501 return 0; 5502 } 5503 } 5504 5505 /* If the old network rate is better than this one, don't bother 5506 * testing everything else. */ 5507 5508 if (network->time_stamp[0] < match->network->time_stamp[0]) { 5509 IPW_DEBUG_MERGE("Network '%s excluded because newer than " 5510 "current network.\n", 5511 print_ssid(ssid, match->network->ssid, 5512 match->network->ssid_len)); 5513 return 0; 5514 } else if (network->time_stamp[1] < match->network->time_stamp[1]) { 5515 IPW_DEBUG_MERGE("Network '%s excluded because newer than " 5516 "current network.\n", 5517 print_ssid(ssid, match->network->ssid, 5518 match->network->ssid_len)); 5519 return 0; 5520 } 5521 5522 /* Now go through and see if the requested network is valid... */ 5523 if (priv->ieee->scan_age != 0 && 5524 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5525 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5526 "because of age: %ums.\n", 5527 print_ssid(ssid, network->ssid, 5528 network->ssid_len), 5529 network->bssid, 5530 jiffies_to_msecs(jiffies - 5531 network->last_scanned)); 5532 return 0; 5533 } 5534 5535 if ((priv->config & CFG_STATIC_CHANNEL) && 5536 (network->channel != priv->channel)) { 5537 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5538 "because of channel mismatch: %d != %d.\n", 5539 print_ssid(ssid, network->ssid, 5540 network->ssid_len), 5541 network->bssid, 5542 network->channel, priv->channel); 5543 return 0; 5544 } 5545 5546 /* Verify privacy compatability */ 5547 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5548 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5549 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5550 "because of privacy mismatch: %s != %s.\n", 5551 print_ssid(ssid, network->ssid, 5552 network->ssid_len), 5553 network->bssid, 5554 priv-> 5555 capability & CAP_PRIVACY_ON ? "on" : "off", 5556 network-> 5557 capability & WLAN_CAPABILITY_PRIVACY ? "on" : 5558 "off"); 5559 return 0; 5560 } 5561 5562 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 5563 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5564 "because of the same BSSID match: %pM" 5565 ".\n", print_ssid(ssid, network->ssid, 5566 network->ssid_len), 5567 network->bssid, 5568 priv->bssid); 5569 return 0; 5570 } 5571 5572 /* Filter out any incompatible freq / mode combinations */ 5573 if (!libipw_is_valid_mode(priv->ieee, network->mode)) { 5574 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5575 "because of invalid frequency/mode " 5576 "combination.\n", 5577 print_ssid(ssid, network->ssid, 5578 network->ssid_len), 5579 network->bssid); 5580 return 0; 5581 } 5582 5583 /* Ensure that the rates supported by the driver are compatible with 5584 * this AP, including verification of basic rates (mandatory) */ 5585 if (!ipw_compatible_rates(priv, network, &rates)) { 5586 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5587 "because configured rate mask excludes " 5588 "AP mandatory rate.\n", 5589 print_ssid(ssid, network->ssid, 5590 network->ssid_len), 5591 network->bssid); 5592 return 0; 5593 } 5594 5595 if (rates.num_rates == 0) { 5596 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5597 "because of no compatible rates.\n", 5598 print_ssid(ssid, network->ssid, 5599 network->ssid_len), 5600 network->bssid); 5601 return 0; 5602 } 5603 5604 /* TODO: Perform any further minimal comparititive tests. We do not 5605 * want to put too much policy logic here; intelligent scan selection 5606 * should occur within a generic IEEE 802.11 user space tool. */ 5607 5608 /* Set up 'new' AP to this network */ 5609 ipw_copy_rates(&match->rates, &rates); 5610 match->network = network; 5611 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n", 5612 print_ssid(ssid, network->ssid, network->ssid_len), 5613 network->bssid); 5614 5615 return 1; 5616} 5617 5618static void ipw_merge_adhoc_network(struct work_struct *work) 5619{ 5620 DECLARE_SSID_BUF(ssid); 5621 struct ipw_priv *priv = 5622 container_of(work, struct ipw_priv, merge_networks); 5623 struct libipw_network *network = NULL; 5624 struct ipw_network_match match = { 5625 .network = priv->assoc_network 5626 }; 5627 5628 if ((priv->status & STATUS_ASSOCIATED) && 5629 (priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5630 /* First pass through ROAM process -- look for a better 5631 * network */ 5632 unsigned long flags; 5633 5634 spin_lock_irqsave(&priv->ieee->lock, flags); 5635 list_for_each_entry(network, &priv->ieee->network_list, list) { 5636 if (network != priv->assoc_network) 5637 ipw_find_adhoc_network(priv, &match, network, 5638 1); 5639 } 5640 spin_unlock_irqrestore(&priv->ieee->lock, flags); 5641 5642 if (match.network == priv->assoc_network) { 5643 IPW_DEBUG_MERGE("No better ADHOC in this network to " 5644 "merge to.\n"); 5645 return; 5646 } 5647 5648 mutex_lock(&priv->mutex); 5649 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5650 IPW_DEBUG_MERGE("remove network %s\n", 5651 print_ssid(ssid, priv->essid, 5652 priv->essid_len)); 5653 ipw_remove_current_network(priv); 5654 } 5655 5656 ipw_disassociate(priv); 5657 priv->assoc_network = match.network; 5658 mutex_unlock(&priv->mutex); 5659 return; 5660 } 5661} 5662 5663static int ipw_best_network(struct ipw_priv *priv, 5664 struct ipw_network_match *match, 5665 struct libipw_network *network, int roaming) 5666{ 5667 struct ipw_supported_rates rates; 5668 DECLARE_SSID_BUF(ssid); 5669 5670 /* Verify that this network's capability is compatible with the 5671 * current mode (AdHoc or Infrastructure) */ 5672 if ((priv->ieee->iw_mode == IW_MODE_INFRA && 5673 !(network->capability & WLAN_CAPABILITY_ESS)) || 5674 (priv->ieee->iw_mode == IW_MODE_ADHOC && 5675 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5676 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to " 5677 "capability mismatch.\n", 5678 print_ssid(ssid, network->ssid, 5679 network->ssid_len), 5680 network->bssid); 5681 return 0; 5682 } 5683 5684 if (unlikely(roaming)) { 5685 /* If we are roaming, then ensure check if this is a valid 5686 * network to try and roam to */ 5687 if ((network->ssid_len != match->network->ssid_len) || 5688 memcmp(network->ssid, match->network->ssid, 5689 network->ssid_len)) { 5690 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5691 "because of non-network ESSID.\n", 5692 print_ssid(ssid, network->ssid, 5693 network->ssid_len), 5694 network->bssid); 5695 return 0; 5696 } 5697 } else { 5698 /* If an ESSID has been configured then compare the broadcast 5699 * ESSID to ours */ 5700 if ((priv->config & CFG_STATIC_ESSID) && 5701 ((network->ssid_len != priv->essid_len) || 5702 memcmp(network->ssid, priv->essid, 5703 min(network->ssid_len, priv->essid_len)))) { 5704 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5705 strncpy(escaped, 5706 print_ssid(ssid, network->ssid, 5707 network->ssid_len), 5708 sizeof(escaped)); 5709 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5710 "because of ESSID mismatch: '%s'.\n", 5711 escaped, network->bssid, 5712 print_ssid(ssid, priv->essid, 5713 priv->essid_len)); 5714 return 0; 5715 } 5716 } 5717 5718 /* If the old network rate is better than this one, don't bother 5719 * testing everything else. */ 5720 if (match->network && match->network->stats.rssi > network->stats.rssi) { 5721 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5722 strncpy(escaped, 5723 print_ssid(ssid, network->ssid, network->ssid_len), 5724 sizeof(escaped)); 5725 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because " 5726 "'%s (%pM)' has a stronger signal.\n", 5727 escaped, network->bssid, 5728 print_ssid(ssid, match->network->ssid, 5729 match->network->ssid_len), 5730 match->network->bssid); 5731 return 0; 5732 } 5733 5734 /* If this network has already had an association attempt within the 5735 * last 3 seconds, do not try and associate again... */ 5736 if (network->last_associate && 5737 time_after(network->last_associate + (HZ * 3UL), jiffies)) { 5738 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5739 "because of storming (%ums since last " 5740 "assoc attempt).\n", 5741 print_ssid(ssid, network->ssid, 5742 network->ssid_len), 5743 network->bssid, 5744 jiffies_to_msecs(jiffies - 5745 network->last_associate)); 5746 return 0; 5747 } 5748 5749 /* Now go through and see if the requested network is valid... */ 5750 if (priv->ieee->scan_age != 0 && 5751 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5752 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5753 "because of age: %ums.\n", 5754 print_ssid(ssid, network->ssid, 5755 network->ssid_len), 5756 network->bssid, 5757 jiffies_to_msecs(jiffies - 5758 network->last_scanned)); 5759 return 0; 5760 } 5761 5762 if ((priv->config & CFG_STATIC_CHANNEL) && 5763 (network->channel != priv->channel)) { 5764 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5765 "because of channel mismatch: %d != %d.\n", 5766 print_ssid(ssid, network->ssid, 5767 network->ssid_len), 5768 network->bssid, 5769 network->channel, priv->channel); 5770 return 0; 5771 } 5772 5773 /* Verify privacy compatability */ 5774 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5775 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5776 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5777 "because of privacy mismatch: %s != %s.\n", 5778 print_ssid(ssid, network->ssid, 5779 network->ssid_len), 5780 network->bssid, 5781 priv->capability & CAP_PRIVACY_ON ? "on" : 5782 "off", 5783 network->capability & 5784 WLAN_CAPABILITY_PRIVACY ? "on" : "off"); 5785 return 0; 5786 } 5787 5788 if ((priv->config & CFG_STATIC_BSSID) && 5789 memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 5790 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5791 "because of BSSID mismatch: %pM.\n", 5792 print_ssid(ssid, network->ssid, 5793 network->ssid_len), 5794 network->bssid, priv->bssid); 5795 return 0; 5796 } 5797 5798 /* Filter out any incompatible freq / mode combinations */ 5799 if (!libipw_is_valid_mode(priv->ieee, network->mode)) { 5800 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5801 "because of invalid frequency/mode " 5802 "combination.\n", 5803 print_ssid(ssid, network->ssid, 5804 network->ssid_len), 5805 network->bssid); 5806 return 0; 5807 } 5808 5809 /* Filter out invalid channel in current GEO */ 5810 if (!libipw_is_valid_channel(priv->ieee, network->channel)) { 5811 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5812 "because of invalid channel in current GEO\n", 5813 print_ssid(ssid, network->ssid, 5814 network->ssid_len), 5815 network->bssid); 5816 return 0; 5817 } 5818 5819 /* Ensure that the rates supported by the driver are compatible with 5820 * this AP, including verification of basic rates (mandatory) */ 5821 if (!ipw_compatible_rates(priv, network, &rates)) { 5822 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5823 "because configured rate mask excludes " 5824 "AP mandatory rate.\n", 5825 print_ssid(ssid, network->ssid, 5826 network->ssid_len), 5827 network->bssid); 5828 return 0; 5829 } 5830 5831 if (rates.num_rates == 0) { 5832 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5833 "because of no compatible rates.\n", 5834 print_ssid(ssid, network->ssid, 5835 network->ssid_len), 5836 network->bssid); 5837 return 0; 5838 } 5839 5840 /* TODO: Perform any further minimal comparititive tests. We do not 5841 * want to put too much policy logic here; intelligent scan selection 5842 * should occur within a generic IEEE 802.11 user space tool. */ 5843 5844 /* Set up 'new' AP to this network */ 5845 ipw_copy_rates(&match->rates, &rates); 5846 match->network = network; 5847 5848 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n", 5849 print_ssid(ssid, network->ssid, network->ssid_len), 5850 network->bssid); 5851 5852 return 1; 5853} 5854 5855static void ipw_adhoc_create(struct ipw_priv *priv, 5856 struct libipw_network *network) 5857{ 5858 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 5859 int i; 5860 5861 /* 5862 * For the purposes of scanning, we can set our wireless mode 5863 * to trigger scans across combinations of bands, but when it 5864 * comes to creating a new ad-hoc network, we have tell the FW 5865 * exactly which band to use. 5866 * 5867 * We also have the possibility of an invalid channel for the 5868 * chossen band. Attempting to create a new ad-hoc network 5869 * with an invalid channel for wireless mode will trigger a 5870 * FW fatal error. 5871 * 5872 */ 5873 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 5874 case LIBIPW_52GHZ_BAND: 5875 network->mode = IEEE_A; 5876 i = libipw_channel_to_index(priv->ieee, priv->channel); 5877 BUG_ON(i == -1); 5878 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) { 5879 IPW_WARNING("Overriding invalid channel\n"); 5880 priv->channel = geo->a[0].channel; 5881 } 5882 break; 5883 5884 case LIBIPW_24GHZ_BAND: 5885 if (priv->ieee->mode & IEEE_G) 5886 network->mode = IEEE_G; 5887 else 5888 network->mode = IEEE_B; 5889 i = libipw_channel_to_index(priv->ieee, priv->channel); 5890 BUG_ON(i == -1); 5891 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) { 5892 IPW_WARNING("Overriding invalid channel\n"); 5893 priv->channel = geo->bg[0].channel; 5894 } 5895 break; 5896 5897 default: 5898 IPW_WARNING("Overriding invalid channel\n"); 5899 if (priv->ieee->mode & IEEE_A) { 5900 network->mode = IEEE_A; 5901 priv->channel = geo->a[0].channel; 5902 } else if (priv->ieee->mode & IEEE_G) { 5903 network->mode = IEEE_G; 5904 priv->channel = geo->bg[0].channel; 5905 } else { 5906 network->mode = IEEE_B; 5907 priv->channel = geo->bg[0].channel; 5908 } 5909 break; 5910 } 5911 5912 network->channel = priv->channel; 5913 priv->config |= CFG_ADHOC_PERSIST; 5914 ipw_create_bssid(priv, network->bssid); 5915 network->ssid_len = priv->essid_len; 5916 memcpy(network->ssid, priv->essid, priv->essid_len); 5917 memset(&network->stats, 0, sizeof(network->stats)); 5918 network->capability = WLAN_CAPABILITY_IBSS; 5919 if (!(priv->config & CFG_PREAMBLE_LONG)) 5920 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE; 5921 if (priv->capability & CAP_PRIVACY_ON) 5922 network->capability |= WLAN_CAPABILITY_PRIVACY; 5923 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH); 5924 memcpy(network->rates, priv->rates.supported_rates, network->rates_len); 5925 network->rates_ex_len = priv->rates.num_rates - network->rates_len; 5926 memcpy(network->rates_ex, 5927 &priv->rates.supported_rates[network->rates_len], 5928 network->rates_ex_len); 5929 network->last_scanned = 0; 5930 network->flags = 0; 5931 network->last_associate = 0; 5932 network->time_stamp[0] = 0; 5933 network->time_stamp[1] = 0; 5934 network->beacon_interval = 100; /* Default */ 5935 network->listen_interval = 10; /* Default */ 5936 network->atim_window = 0; /* Default */ 5937 network->wpa_ie_len = 0; 5938 network->rsn_ie_len = 0; 5939} 5940 5941static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) 5942{ 5943 struct ipw_tgi_tx_key key; 5944 5945 if (!(priv->ieee->sec.flags & (1 << index))) 5946 return; 5947 5948 key.key_id = index; 5949 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); 5950 key.security_type = type; 5951 key.station_index = 0; /* always 0 for BSS */ 5952 key.flags = 0; 5953 /* 0 for new key; previous value of counter (after fatal error) */ 5954 key.tx_counter[0] = cpu_to_le32(0); 5955 key.tx_counter[1] = cpu_to_le32(0); 5956 5957 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); 5958} 5959 5960static void ipw_send_wep_keys(struct ipw_priv *priv, int type) 5961{ 5962 struct ipw_wep_key key; 5963 int i; 5964 5965 key.cmd_id = DINO_CMD_WEP_KEY; 5966 key.seq_num = 0; 5967 5968 /* Note: AES keys cannot be set for multiple times. 5969 * Only set it at the first time. */ 5970 for (i = 0; i < 4; i++) { 5971 key.key_index = i | type; 5972 if (!(priv->ieee->sec.flags & (1 << i))) { 5973 key.key_size = 0; 5974 continue; 5975 } 5976 5977 key.key_size = priv->ieee->sec.key_sizes[i]; 5978 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size); 5979 5980 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key); 5981 } 5982} 5983 5984static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level) 5985{ 5986 if (priv->ieee->host_encrypt) 5987 return; 5988 5989 switch (level) { 5990 case SEC_LEVEL_3: 5991 priv->sys_config.disable_unicast_decryption = 0; 5992 priv->ieee->host_decrypt = 0; 5993 break; 5994 case SEC_LEVEL_2: 5995 priv->sys_config.disable_unicast_decryption = 1; 5996 priv->ieee->host_decrypt = 1; 5997 break; 5998 case SEC_LEVEL_1: 5999 priv->sys_config.disable_unicast_decryption = 0; 6000 priv->ieee->host_decrypt = 0; 6001 break; 6002 case SEC_LEVEL_0: 6003 priv->sys_config.disable_unicast_decryption = 1; 6004 break; 6005 default: 6006 break; 6007 } 6008} 6009 6010static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level) 6011{ 6012 if (priv->ieee->host_encrypt) 6013 return; 6014 6015 switch (level) { 6016 case SEC_LEVEL_3: 6017 priv->sys_config.disable_multicast_decryption = 0; 6018 break; 6019 case SEC_LEVEL_2: 6020 priv->sys_config.disable_multicast_decryption = 1; 6021 break; 6022 case SEC_LEVEL_1: 6023 priv->sys_config.disable_multicast_decryption = 0; 6024 break; 6025 case SEC_LEVEL_0: 6026 priv->sys_config.disable_multicast_decryption = 1; 6027 break; 6028 default: 6029 break; 6030 } 6031} 6032 6033static void ipw_set_hwcrypto_keys(struct ipw_priv *priv) 6034{ 6035 switch (priv->ieee->sec.level) { 6036 case SEC_LEVEL_3: 6037 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) 6038 ipw_send_tgi_tx_key(priv, 6039 DCT_FLAG_EXT_SECURITY_CCM, 6040 priv->ieee->sec.active_key); 6041 6042 if (!priv->ieee->host_mc_decrypt) 6043 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM); 6044 break; 6045 case SEC_LEVEL_2: 6046 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) 6047 ipw_send_tgi_tx_key(priv, 6048 DCT_FLAG_EXT_SECURITY_TKIP, 6049 priv->ieee->sec.active_key); 6050 break; 6051 case SEC_LEVEL_1: 6052 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 6053 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level); 6054 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level); 6055 break; 6056 case SEC_LEVEL_0: 6057 default: 6058 break; 6059 } 6060} 6061 6062static void ipw_adhoc_check(void *data) 6063{ 6064 struct ipw_priv *priv = data; 6065 6066 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold && 6067 !(priv->config & CFG_ADHOC_PERSIST)) { 6068 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 6069 IPW_DL_STATE | IPW_DL_ASSOC, 6070 "Missed beacon: %d - disassociate\n", 6071 priv->missed_adhoc_beacons); 6072 ipw_remove_current_network(priv); 6073 ipw_disassociate(priv); 6074 return; 6075 } 6076 6077 queue_delayed_work(priv->workqueue, &priv->adhoc_check, 6078 le16_to_cpu(priv->assoc_request.beacon_interval)); 6079} 6080 6081static void ipw_bg_adhoc_check(struct work_struct *work) 6082{ 6083 struct ipw_priv *priv = 6084 container_of(work, struct ipw_priv, adhoc_check.work); 6085 mutex_lock(&priv->mutex); 6086 ipw_adhoc_check(priv); 6087 mutex_unlock(&priv->mutex); 6088} 6089 6090static void ipw_debug_config(struct ipw_priv *priv) 6091{ 6092 DECLARE_SSID_BUF(ssid); 6093 IPW_DEBUG_INFO("Scan completed, no valid APs matched " 6094 "[CFG 0x%08X]\n", priv->config); 6095 if (priv->config & CFG_STATIC_CHANNEL) 6096 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel); 6097 else 6098 IPW_DEBUG_INFO("Channel unlocked.\n"); 6099 if (priv->config & CFG_STATIC_ESSID) 6100 IPW_DEBUG_INFO("ESSID locked to '%s'\n", 6101 print_ssid(ssid, priv->essid, priv->essid_len)); 6102 else 6103 IPW_DEBUG_INFO("ESSID unlocked.\n"); 6104 if (priv->config & CFG_STATIC_BSSID) 6105 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid); 6106 else 6107 IPW_DEBUG_INFO("BSSID unlocked.\n"); 6108 if (priv->capability & CAP_PRIVACY_ON) 6109 IPW_DEBUG_INFO("PRIVACY on\n"); 6110 else 6111 IPW_DEBUG_INFO("PRIVACY off\n"); 6112 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); 6113} 6114 6115static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) 6116{ 6117 /* TODO: Verify that this works... */ 6118 struct ipw_fixed_rate fr; 6119 u32 reg; 6120 u16 mask = 0; 6121 u16 new_tx_rates = priv->rates_mask; 6122 6123 /* Identify 'current FW band' and match it with the fixed 6124 * Tx rates */ 6125 6126 switch (priv->ieee->freq_band) { 6127 case LIBIPW_52GHZ_BAND: /* A only */ 6128 /* IEEE_A */ 6129 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) { 6130 /* Invalid fixed rate mask */ 6131 IPW_DEBUG_WX 6132 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6133 new_tx_rates = 0; 6134 break; 6135 } 6136 6137 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A; 6138 break; 6139 6140 default: /* 2.4Ghz or Mixed */ 6141 /* IEEE_B */ 6142 if (mode == IEEE_B) { 6143 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) { 6144 /* Invalid fixed rate mask */ 6145 IPW_DEBUG_WX 6146 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6147 new_tx_rates = 0; 6148 } 6149 break; 6150 } 6151 6152 /* IEEE_G */ 6153 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK | 6154 LIBIPW_OFDM_RATES_MASK)) { 6155 /* Invalid fixed rate mask */ 6156 IPW_DEBUG_WX 6157 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6158 new_tx_rates = 0; 6159 break; 6160 } 6161 6162 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) { 6163 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1); 6164 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK; 6165 } 6166 6167 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) { 6168 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1); 6169 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK; 6170 } 6171 6172 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) { 6173 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1); 6174 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK; 6175 } 6176 6177 new_tx_rates |= mask; 6178 break; 6179 } 6180 6181 fr.tx_rates = cpu_to_le16(new_tx_rates); 6182 6183 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE); 6184 ipw_write_reg32(priv, reg, *(u32 *) & fr); 6185} 6186 6187static void ipw_abort_scan(struct ipw_priv *priv) 6188{ 6189 int err; 6190 6191 if (priv->status & STATUS_SCAN_ABORTING) { 6192 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n"); 6193 return; 6194 } 6195 priv->status |= STATUS_SCAN_ABORTING; 6196 6197 err = ipw_send_scan_abort(priv); 6198 if (err) 6199 IPW_DEBUG_HC("Request to abort scan failed.\n"); 6200} 6201 6202static void ipw_add_scan_channels(struct ipw_priv *priv, 6203 struct ipw_scan_request_ext *scan, 6204 int scan_type) 6205{ 6206 int channel_index = 0; 6207 const struct libipw_geo *geo; 6208 int i; 6209 6210 geo = libipw_get_geo(priv->ieee); 6211 6212 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) { 6213 int start = channel_index; 6214 for (i = 0; i < geo->a_channels; i++) { 6215 if ((priv->status & STATUS_ASSOCIATED) && 6216 geo->a[i].channel == priv->channel) 6217 continue; 6218 channel_index++; 6219 scan->channels_list[channel_index] = geo->a[i].channel; 6220 ipw_set_scan_type(scan, channel_index, 6221 geo->a[i]. 6222 flags & LIBIPW_CH_PASSIVE_ONLY ? 6223 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN : 6224 scan_type); 6225 } 6226 6227 if (start != channel_index) { 6228 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) | 6229 (channel_index - start); 6230 channel_index++; 6231 } 6232 } 6233 6234 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) { 6235 int start = channel_index; 6236 if (priv->config & CFG_SPEED_SCAN) { 6237 int index; 6238 u8 channels[LIBIPW_24GHZ_CHANNELS] = { 6239 /* nop out the list */ 6240 [0] = 0 6241 }; 6242 6243 u8 channel; 6244 while (channel_index < IPW_SCAN_CHANNELS - 1) { 6245 channel = 6246 priv->speed_scan[priv->speed_scan_pos]; 6247 if (channel == 0) { 6248 priv->speed_scan_pos = 0; 6249 channel = priv->speed_scan[0]; 6250 } 6251 if ((priv->status & STATUS_ASSOCIATED) && 6252 channel == priv->channel) { 6253 priv->speed_scan_pos++; 6254 continue; 6255 } 6256 6257 /* If this channel has already been 6258 * added in scan, break from loop 6259 * and this will be the first channel 6260 * in the next scan. 6261 */ 6262 if (channels[channel - 1] != 0) 6263 break; 6264 6265 channels[channel - 1] = 1; 6266 priv->speed_scan_pos++; 6267 channel_index++; 6268 scan->channels_list[channel_index] = channel; 6269 index = 6270 libipw_channel_to_index(priv->ieee, channel); 6271 ipw_set_scan_type(scan, channel_index, 6272 geo->bg[index]. 6273 flags & 6274 LIBIPW_CH_PASSIVE_ONLY ? 6275 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN 6276 : scan_type); 6277 } 6278 } else { 6279 for (i = 0; i < geo->bg_channels; i++) { 6280 if ((priv->status & STATUS_ASSOCIATED) && 6281 geo->bg[i].channel == priv->channel) 6282 continue; 6283 channel_index++; 6284 scan->channels_list[channel_index] = 6285 geo->bg[i].channel; 6286 ipw_set_scan_type(scan, channel_index, 6287 geo->bg[i]. 6288 flags & 6289 LIBIPW_CH_PASSIVE_ONLY ? 6290 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN 6291 : scan_type); 6292 } 6293 } 6294 6295 if (start != channel_index) { 6296 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) | 6297 (channel_index - start); 6298 } 6299 } 6300} 6301 6302static int ipw_passive_dwell_time(struct ipw_priv *priv) 6303{ 6304 /* staying on passive channels longer than the DTIM interval during a 6305 * scan, while associated, causes the firmware to cancel the scan 6306 * without notification. Hence, don't stay on passive channels longer 6307 * than the beacon interval. 6308 */ 6309 if (priv->status & STATUS_ASSOCIATED 6310 && priv->assoc_network->beacon_interval > 10) 6311 return priv->assoc_network->beacon_interval - 10; 6312 else 6313 return 120; 6314} 6315 6316static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) 6317{ 6318 struct ipw_scan_request_ext scan; 6319 int err = 0, scan_type; 6320 6321 if (!(priv->status & STATUS_INIT) || 6322 (priv->status & STATUS_EXIT_PENDING)) 6323 return 0; 6324 6325 mutex_lock(&priv->mutex); 6326 6327 if (direct && (priv->direct_scan_ssid_len == 0)) { 6328 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n"); 6329 priv->status &= ~STATUS_DIRECT_SCAN_PENDING; 6330 goto done; 6331 } 6332 6333 if (priv->status & STATUS_SCANNING) { 6334 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n"); 6335 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6336 STATUS_SCAN_PENDING; 6337 goto done; 6338 } 6339 6340 if (!(priv->status & STATUS_SCAN_FORCED) && 6341 priv->status & STATUS_SCAN_ABORTING) { 6342 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); 6343 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6344 STATUS_SCAN_PENDING; 6345 goto done; 6346 } 6347 6348 if (priv->status & STATUS_RF_KILL_MASK) { 6349 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n"); 6350 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6351 STATUS_SCAN_PENDING; 6352 goto done; 6353 } 6354 6355 memset(&scan, 0, sizeof(scan)); 6356 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee)); 6357 6358 if (type == IW_SCAN_TYPE_PASSIVE) { 6359 IPW_DEBUG_WX("use passive scanning\n"); 6360 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN; 6361 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6362 cpu_to_le16(ipw_passive_dwell_time(priv)); 6363 ipw_add_scan_channels(priv, &scan, scan_type); 6364 goto send_request; 6365 } 6366 6367 /* Use active scan by default. */ 6368 if (priv->config & CFG_SPEED_SCAN) 6369 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6370 cpu_to_le16(30); 6371 else 6372 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6373 cpu_to_le16(20); 6374 6375 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 6376 cpu_to_le16(20); 6377 6378 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6379 cpu_to_le16(ipw_passive_dwell_time(priv)); 6380 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); 6381 6382#ifdef CONFIG_IPW2200_MONITOR 6383 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 6384 u8 channel; 6385 u8 band = 0; 6386 6387 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 6388 case LIBIPW_52GHZ_BAND: 6389 band = (u8) (IPW_A_MODE << 6) | 1; 6390 channel = priv->channel; 6391 break; 6392 6393 case LIBIPW_24GHZ_BAND: 6394 band = (u8) (IPW_B_MODE << 6) | 1; 6395 channel = priv->channel; 6396 break; 6397 6398 default: 6399 band = (u8) (IPW_B_MODE << 6) | 1; 6400 channel = 9; 6401 break; 6402 } 6403 6404 scan.channels_list[0] = band; 6405 scan.channels_list[1] = channel; 6406 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN); 6407 6408 /* NOTE: The card will sit on this channel for this time 6409 * period. Scan aborts are timing sensitive and frequently 6410 * result in firmware restarts. As such, it is best to 6411 * set a small dwell_time here and just keep re-issuing 6412 * scans. Otherwise fast channel hopping will not actually 6413 * hop channels. 6414 * 6415 * TODO: Move SPEED SCAN support to all modes and bands */ 6416 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6417 cpu_to_le16(2000); 6418 } else { 6419#endif /* CONFIG_IPW2200_MONITOR */ 6420 /* Honor direct scans first, otherwise if we are roaming make 6421 * this a direct scan for the current network. Finally, 6422 * ensure that every other scan is a fast channel hop scan */ 6423 if (direct) { 6424 err = ipw_send_ssid(priv, priv->direct_scan_ssid, 6425 priv->direct_scan_ssid_len); 6426 if (err) { 6427 IPW_DEBUG_HC("Attempt to send SSID command " 6428 "failed\n"); 6429 goto done; 6430 } 6431 6432 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; 6433 } else if ((priv->status & STATUS_ROAMING) 6434 || (!(priv->status & STATUS_ASSOCIATED) 6435 && (priv->config & CFG_STATIC_ESSID) 6436 && (le32_to_cpu(scan.full_scan_index) % 2))) { 6437 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 6438 if (err) { 6439 IPW_DEBUG_HC("Attempt to send SSID command " 6440 "failed.\n"); 6441 goto done; 6442 } 6443 6444 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; 6445 } else 6446 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN; 6447 6448 ipw_add_scan_channels(priv, &scan, scan_type); 6449#ifdef CONFIG_IPW2200_MONITOR 6450 } 6451#endif 6452 6453send_request: 6454 err = ipw_send_scan_request_ext(priv, &scan); 6455 if (err) { 6456 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); 6457 goto done; 6458 } 6459 6460 priv->status |= STATUS_SCANNING; 6461 if (direct) { 6462 priv->status &= ~STATUS_DIRECT_SCAN_PENDING; 6463 priv->direct_scan_ssid_len = 0; 6464 } else 6465 priv->status &= ~STATUS_SCAN_PENDING; 6466 6467 queue_delayed_work(priv->workqueue, &priv->scan_check, 6468 IPW_SCAN_CHECK_WATCHDOG); 6469done: 6470 mutex_unlock(&priv->mutex); 6471 return err; 6472} 6473 6474static void ipw_request_passive_scan(struct work_struct *work) 6475{ 6476 struct ipw_priv *priv = 6477 container_of(work, struct ipw_priv, request_passive_scan.work); 6478 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0); 6479} 6480 6481static void ipw_request_scan(struct work_struct *work) 6482{ 6483 struct ipw_priv *priv = 6484 container_of(work, struct ipw_priv, request_scan.work); 6485 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0); 6486} 6487 6488static void ipw_request_direct_scan(struct work_struct *work) 6489{ 6490 struct ipw_priv *priv = 6491 container_of(work, struct ipw_priv, request_direct_scan.work); 6492 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1); 6493} 6494 6495static void ipw_bg_abort_scan(struct work_struct *work) 6496{ 6497 struct ipw_priv *priv = 6498 container_of(work, struct ipw_priv, abort_scan); 6499 mutex_lock(&priv->mutex); 6500 ipw_abort_scan(priv); 6501 mutex_unlock(&priv->mutex); 6502} 6503 6504static int ipw_wpa_enable(struct ipw_priv *priv, int value) 6505{ 6506 /* This is called when wpa_supplicant loads and closes the driver 6507 * interface. */ 6508 priv->ieee->wpa_enabled = value; 6509 return 0; 6510} 6511 6512static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) 6513{ 6514 struct libipw_device *ieee = priv->ieee; 6515 struct libipw_security sec = { 6516 .flags = SEC_AUTH_MODE, 6517 }; 6518 int ret = 0; 6519 6520 if (value & IW_AUTH_ALG_SHARED_KEY) { 6521 sec.auth_mode = WLAN_AUTH_SHARED_KEY; 6522 ieee->open_wep = 0; 6523 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { 6524 sec.auth_mode = WLAN_AUTH_OPEN; 6525 ieee->open_wep = 1; 6526 } else if (value & IW_AUTH_ALG_LEAP) { 6527 sec.auth_mode = WLAN_AUTH_LEAP; 6528 ieee->open_wep = 1; 6529 } else 6530 return -EINVAL; 6531 6532 if (ieee->set_security) 6533 ieee->set_security(ieee->dev, &sec); 6534 else 6535 ret = -EOPNOTSUPP; 6536 6537 return ret; 6538} 6539 6540static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, 6541 int wpa_ie_len) 6542{ 6543 /* make sure WPA is enabled */ 6544 ipw_wpa_enable(priv, 1); 6545} 6546 6547static int ipw_set_rsn_capa(struct ipw_priv *priv, 6548 char *capabilities, int length) 6549{ 6550 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); 6551 6552 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length, 6553 capabilities); 6554} 6555 6556/* 6557 * WE-18 support 6558 */ 6559 6560/* SIOCSIWGENIE */ 6561static int ipw_wx_set_genie(struct net_device *dev, 6562 struct iw_request_info *info, 6563 union iwreq_data *wrqu, char *extra) 6564{ 6565 struct ipw_priv *priv = libipw_priv(dev); 6566 struct libipw_device *ieee = priv->ieee; 6567 u8 *buf; 6568 int err = 0; 6569 6570 if (wrqu->data.length > MAX_WPA_IE_LEN || 6571 (wrqu->data.length && extra == NULL)) 6572 return -EINVAL; 6573 6574 if (wrqu->data.length) { 6575 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 6576 if (buf == NULL) { 6577 err = -ENOMEM; 6578 goto out; 6579 } 6580 6581 memcpy(buf, extra, wrqu->data.length); 6582 kfree(ieee->wpa_ie); 6583 ieee->wpa_ie = buf; 6584 ieee->wpa_ie_len = wrqu->data.length; 6585 } else { 6586 kfree(ieee->wpa_ie); 6587 ieee->wpa_ie = NULL; 6588 ieee->wpa_ie_len = 0; 6589 } 6590 6591 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); 6592 out: 6593 return err; 6594} 6595 6596/* SIOCGIWGENIE */ 6597static int ipw_wx_get_genie(struct net_device *dev, 6598 struct iw_request_info *info, 6599 union iwreq_data *wrqu, char *extra) 6600{ 6601 struct ipw_priv *priv = libipw_priv(dev); 6602 struct libipw_device *ieee = priv->ieee; 6603 int err = 0; 6604 6605 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { 6606 wrqu->data.length = 0; 6607 goto out; 6608 } 6609 6610 if (wrqu->data.length < ieee->wpa_ie_len) { 6611 err = -E2BIG; 6612 goto out; 6613 } 6614 6615 wrqu->data.length = ieee->wpa_ie_len; 6616 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); 6617 6618 out: 6619 return err; 6620} 6621 6622static int wext_cipher2level(int cipher) 6623{ 6624 switch (cipher) { 6625 case IW_AUTH_CIPHER_NONE: 6626 return SEC_LEVEL_0; 6627 case IW_AUTH_CIPHER_WEP40: 6628 case IW_AUTH_CIPHER_WEP104: 6629 return SEC_LEVEL_1; 6630 case IW_AUTH_CIPHER_TKIP: 6631 return SEC_LEVEL_2; 6632 case IW_AUTH_CIPHER_CCMP: 6633 return SEC_LEVEL_3; 6634 default: 6635 return -1; 6636 } 6637} 6638 6639/* SIOCSIWAUTH */ 6640static int ipw_wx_set_auth(struct net_device *dev, 6641 struct iw_request_info *info, 6642 union iwreq_data *wrqu, char *extra) 6643{ 6644 struct ipw_priv *priv = libipw_priv(dev); 6645 struct libipw_device *ieee = priv->ieee; 6646 struct iw_param *param = &wrqu->param; 6647 struct lib80211_crypt_data *crypt; 6648 unsigned long flags; 6649 int ret = 0; 6650 6651 switch (param->flags & IW_AUTH_INDEX) { 6652 case IW_AUTH_WPA_VERSION: 6653 break; 6654 case IW_AUTH_CIPHER_PAIRWISE: 6655 ipw_set_hw_decrypt_unicast(priv, 6656 wext_cipher2level(param->value)); 6657 break; 6658 case IW_AUTH_CIPHER_GROUP: 6659 ipw_set_hw_decrypt_multicast(priv, 6660 wext_cipher2level(param->value)); 6661 break; 6662 case IW_AUTH_KEY_MGMT: 6663 /* 6664 * ipw2200 does not use these parameters 6665 */ 6666 break; 6667 6668 case IW_AUTH_TKIP_COUNTERMEASURES: 6669 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6670 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) 6671 break; 6672 6673 flags = crypt->ops->get_flags(crypt->priv); 6674 6675 if (param->value) 6676 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; 6677 else 6678 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; 6679 6680 crypt->ops->set_flags(flags, crypt->priv); 6681 6682 break; 6683 6684 case IW_AUTH_DROP_UNENCRYPTED:{ 6685 /* HACK: 6686 * 6687 * wpa_supplicant calls set_wpa_enabled when the driver 6688 * is loaded and unloaded, regardless of if WPA is being 6689 * used. No other calls are made which can be used to 6690 * determine if encryption will be used or not prior to 6691 * association being expected. If encryption is not being 6692 * used, drop_unencrypted is set to false, else true -- we 6693 * can use this to determine if the CAP_PRIVACY_ON bit should 6694 * be set. 6695 */ 6696 struct libipw_security sec = { 6697 .flags = SEC_ENABLED, 6698 .enabled = param->value, 6699 }; 6700 priv->ieee->drop_unencrypted = param->value; 6701 /* We only change SEC_LEVEL for open mode. Others 6702 * are set by ipw_wpa_set_encryption. 6703 */ 6704 if (!param->value) { 6705 sec.flags |= SEC_LEVEL; 6706 sec.level = SEC_LEVEL_0; 6707 } else { 6708 sec.flags |= SEC_LEVEL; 6709 sec.level = SEC_LEVEL_1; 6710 } 6711 if (priv->ieee->set_security) 6712 priv->ieee->set_security(priv->ieee->dev, &sec); 6713 break; 6714 } 6715 6716 case IW_AUTH_80211_AUTH_ALG: 6717 ret = ipw_wpa_set_auth_algs(priv, param->value); 6718 break; 6719 6720 case IW_AUTH_WPA_ENABLED: 6721 ret = ipw_wpa_enable(priv, param->value); 6722 ipw_disassociate(priv); 6723 break; 6724 6725 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 6726 ieee->ieee802_1x = param->value; 6727 break; 6728 6729 case IW_AUTH_PRIVACY_INVOKED: 6730 ieee->privacy_invoked = param->value; 6731 break; 6732 6733 default: 6734 return -EOPNOTSUPP; 6735 } 6736 return ret; 6737} 6738 6739/* SIOCGIWAUTH */ 6740static int ipw_wx_get_auth(struct net_device *dev, 6741 struct iw_request_info *info, 6742 union iwreq_data *wrqu, char *extra) 6743{ 6744 struct ipw_priv *priv = libipw_priv(dev); 6745 struct libipw_device *ieee = priv->ieee; 6746 struct lib80211_crypt_data *crypt; 6747 struct iw_param *param = &wrqu->param; 6748 int ret = 0; 6749 6750 switch (param->flags & IW_AUTH_INDEX) { 6751 case IW_AUTH_WPA_VERSION: 6752 case IW_AUTH_CIPHER_PAIRWISE: 6753 case IW_AUTH_CIPHER_GROUP: 6754 case IW_AUTH_KEY_MGMT: 6755 /* 6756 * wpa_supplicant will control these internally 6757 */ 6758 ret = -EOPNOTSUPP; 6759 break; 6760 6761 case IW_AUTH_TKIP_COUNTERMEASURES: 6762 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6763 if (!crypt || !crypt->ops->get_flags) 6764 break; 6765 6766 param->value = (crypt->ops->get_flags(crypt->priv) & 6767 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0; 6768 6769 break; 6770 6771 case IW_AUTH_DROP_UNENCRYPTED: 6772 param->value = ieee->drop_unencrypted; 6773 break; 6774 6775 case IW_AUTH_80211_AUTH_ALG: 6776 param->value = ieee->sec.auth_mode; 6777 break; 6778 6779 case IW_AUTH_WPA_ENABLED: 6780 param->value = ieee->wpa_enabled; 6781 break; 6782 6783 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 6784 param->value = ieee->ieee802_1x; 6785 break; 6786 6787 case IW_AUTH_ROAMING_CONTROL: 6788 case IW_AUTH_PRIVACY_INVOKED: 6789 param->value = ieee->privacy_invoked; 6790 break; 6791 6792 default: 6793 return -EOPNOTSUPP; 6794 } 6795 return 0; 6796} 6797 6798/* SIOCSIWENCODEEXT */ 6799static int ipw_wx_set_encodeext(struct net_device *dev, 6800 struct iw_request_info *info, 6801 union iwreq_data *wrqu, char *extra) 6802{ 6803 struct ipw_priv *priv = libipw_priv(dev); 6804 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 6805 6806 if (hwcrypto) { 6807 if (ext->alg == IW_ENCODE_ALG_TKIP) { 6808 /* IPW HW can't build TKIP MIC, 6809 host decryption still needed */ 6810 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) 6811 priv->ieee->host_mc_decrypt = 1; 6812 else { 6813 priv->ieee->host_encrypt = 0; 6814 priv->ieee->host_encrypt_msdu = 1; 6815 priv->ieee->host_decrypt = 1; 6816 } 6817 } else { 6818 priv->ieee->host_encrypt = 0; 6819 priv->ieee->host_encrypt_msdu = 0; 6820 priv->ieee->host_decrypt = 0; 6821 priv->ieee->host_mc_decrypt = 0; 6822 } 6823 } 6824 6825 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra); 6826} 6827 6828/* SIOCGIWENCODEEXT */ 6829static int ipw_wx_get_encodeext(struct net_device *dev, 6830 struct iw_request_info *info, 6831 union iwreq_data *wrqu, char *extra) 6832{ 6833 struct ipw_priv *priv = libipw_priv(dev); 6834 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra); 6835} 6836 6837/* SIOCSIWMLME */ 6838static int ipw_wx_set_mlme(struct net_device *dev, 6839 struct iw_request_info *info, 6840 union iwreq_data *wrqu, char *extra) 6841{ 6842 struct ipw_priv *priv = libipw_priv(dev); 6843 struct iw_mlme *mlme = (struct iw_mlme *)extra; 6844 __le16 reason; 6845 6846 reason = cpu_to_le16(mlme->reason_code); 6847 6848 switch (mlme->cmd) { 6849 case IW_MLME_DEAUTH: 6850 /* silently ignore */ 6851 break; 6852 6853 case IW_MLME_DISASSOC: 6854 ipw_disassociate(priv); 6855 break; 6856 6857 default: 6858 return -EOPNOTSUPP; 6859 } 6860 return 0; 6861} 6862 6863#ifdef CONFIG_IPW2200_QOS 6864 6865/* QoS */ 6866/* 6867* get the modulation type of the current network or 6868* the card current mode 6869*/ 6870static u8 ipw_qos_current_mode(struct ipw_priv * priv) 6871{ 6872 u8 mode = 0; 6873 6874 if (priv->status & STATUS_ASSOCIATED) { 6875 unsigned long flags; 6876 6877 spin_lock_irqsave(&priv->ieee->lock, flags); 6878 mode = priv->assoc_network->mode; 6879 spin_unlock_irqrestore(&priv->ieee->lock, flags); 6880 } else { 6881 mode = priv->ieee->mode; 6882 } 6883 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode); 6884 return mode; 6885} 6886 6887/* 6888* Handle management frame beacon and probe response 6889*/ 6890static int ipw_qos_handle_probe_response(struct ipw_priv *priv, 6891 int active_network, 6892 struct libipw_network *network) 6893{ 6894 u32 size = sizeof(struct libipw_qos_parameters); 6895 6896 if (network->capability & WLAN_CAPABILITY_IBSS) 6897 network->qos_data.active = network->qos_data.supported; 6898 6899 if (network->flags & NETWORK_HAS_QOS_MASK) { 6900 if (active_network && 6901 (network->flags & NETWORK_HAS_QOS_PARAMETERS)) 6902 network->qos_data.active = network->qos_data.supported; 6903 6904 if ((network->qos_data.active == 1) && (active_network == 1) && 6905 (network->flags & NETWORK_HAS_QOS_PARAMETERS) && 6906 (network->qos_data.old_param_count != 6907 network->qos_data.param_count)) { 6908 network->qos_data.old_param_count = 6909 network->qos_data.param_count; 6910 schedule_work(&priv->qos_activate); 6911 IPW_DEBUG_QOS("QoS parameters change call " 6912 "qos_activate\n"); 6913 } 6914 } else { 6915 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B)) 6916 memcpy(&network->qos_data.parameters, 6917 &def_parameters_CCK, size); 6918 else 6919 memcpy(&network->qos_data.parameters, 6920 &def_parameters_OFDM, size); 6921 6922 if ((network->qos_data.active == 1) && (active_network == 1)) { 6923 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n"); 6924 schedule_work(&priv->qos_activate); 6925 } 6926 6927 network->qos_data.active = 0; 6928 network->qos_data.supported = 0; 6929 } 6930 if ((priv->status & STATUS_ASSOCIATED) && 6931 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) { 6932 if (memcmp(network->bssid, priv->bssid, ETH_ALEN)) 6933 if (network->capability & WLAN_CAPABILITY_IBSS) 6934 if ((network->ssid_len == 6935 priv->assoc_network->ssid_len) && 6936 !memcmp(network->ssid, 6937 priv->assoc_network->ssid, 6938 network->ssid_len)) { 6939 queue_work(priv->workqueue, 6940 &priv->merge_networks); 6941 } 6942 } 6943 6944 return 0; 6945} 6946 6947/* 6948* This function set up the firmware to support QoS. It sends 6949* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO 6950*/ 6951static int ipw_qos_activate(struct ipw_priv *priv, 6952 struct libipw_qos_data *qos_network_data) 6953{ 6954 int err; 6955 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS]; 6956 struct libipw_qos_parameters *active_one = NULL; 6957 u32 size = sizeof(struct libipw_qos_parameters); 6958 u32 burst_duration; 6959 int i; 6960 u8 type; 6961 6962 type = ipw_qos_current_mode(priv); 6963 6964 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]); 6965 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size); 6966 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]); 6967 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size); 6968 6969 if (qos_network_data == NULL) { 6970 if (type == IEEE_B) { 6971 IPW_DEBUG_QOS("QoS activate network mode %d\n", type); 6972 active_one = &def_parameters_CCK; 6973 } else 6974 active_one = &def_parameters_OFDM; 6975 6976 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 6977 burst_duration = ipw_qos_get_burst_duration(priv); 6978 for (i = 0; i < QOS_QUEUE_NUM; i++) 6979 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] = 6980 cpu_to_le16(burst_duration); 6981 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 6982 if (type == IEEE_B) { 6983 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", 6984 type); 6985 if (priv->qos_data.qos_enable == 0) 6986 active_one = &def_parameters_CCK; 6987 else 6988 active_one = priv->qos_data.def_qos_parm_CCK; 6989 } else { 6990 if (priv->qos_data.qos_enable == 0) 6991 active_one = &def_parameters_OFDM; 6992 else 6993 active_one = priv->qos_data.def_qos_parm_OFDM; 6994 } 6995 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 6996 } else { 6997 unsigned long flags; 6998 int active; 6999 7000 spin_lock_irqsave(&priv->ieee->lock, flags); 7001 active_one = &(qos_network_data->parameters); 7002 qos_network_data->old_param_count = 7003 qos_network_data->param_count; 7004 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 7005 active = qos_network_data->supported; 7006 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7007 7008 if (active == 0) { 7009 burst_duration = ipw_qos_get_burst_duration(priv); 7010 for (i = 0; i < QOS_QUEUE_NUM; i++) 7011 qos_parameters[QOS_PARAM_SET_ACTIVE]. 7012 tx_op_limit[i] = cpu_to_le16(burst_duration); 7013 } 7014 } 7015 7016 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); 7017 err = ipw_send_qos_params_command(priv, 7018 (struct libipw_qos_parameters *) 7019 &(qos_parameters[0])); 7020 if (err) 7021 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n"); 7022 7023 return err; 7024} 7025 7026/* 7027* send IPW_CMD_WME_INFO to the firmware 7028*/ 7029static int ipw_qos_set_info_element(struct ipw_priv *priv) 7030{ 7031 int ret = 0; 7032 struct libipw_qos_information_element qos_info; 7033 7034 if (priv == NULL) 7035 return -1; 7036 7037 qos_info.elementID = QOS_ELEMENT_ID; 7038 qos_info.length = sizeof(struct libipw_qos_information_element) - 2; 7039 7040 qos_info.version = QOS_VERSION_1; 7041 qos_info.ac_info = 0; 7042 7043 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN); 7044 qos_info.qui_type = QOS_OUI_TYPE; 7045 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE; 7046 7047 ret = ipw_send_qos_info_command(priv, &qos_info); 7048 if (ret != 0) { 7049 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n"); 7050 } 7051 return ret; 7052} 7053 7054/* 7055* Set the QoS parameter with the association request structure 7056*/ 7057static int ipw_qos_association(struct ipw_priv *priv, 7058 struct libipw_network *network) 7059{ 7060 int err = 0; 7061 struct libipw_qos_data *qos_data = NULL; 7062 struct libipw_qos_data ibss_data = { 7063 .supported = 1, 7064 .active = 1, 7065 }; 7066 7067 switch (priv->ieee->iw_mode) { 7068 case IW_MODE_ADHOC: 7069 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS)); 7070 7071 qos_data = &ibss_data; 7072 break; 7073 7074 case IW_MODE_INFRA: 7075 qos_data = &network->qos_data; 7076 break; 7077 7078 default: 7079 BUG(); 7080 break; 7081 } 7082 7083 err = ipw_qos_activate(priv, qos_data); 7084 if (err) { 7085 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC; 7086 return err; 7087 } 7088 7089 if (priv->qos_data.qos_enable && qos_data->supported) { 7090 IPW_DEBUG_QOS("QoS will be enabled for this association\n"); 7091 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC; 7092 return ipw_qos_set_info_element(priv); 7093 } 7094 7095 return 0; 7096} 7097 7098/* 7099* handling the beaconing responses. if we get different QoS setting 7100* off the network from the associated setting, adjust the QoS 7101* setting 7102*/ 7103static int ipw_qos_association_resp(struct ipw_priv *priv, 7104 struct libipw_network *network) 7105{ 7106 int ret = 0; 7107 unsigned long flags; 7108 u32 size = sizeof(struct libipw_qos_parameters); 7109 int set_qos_param = 0; 7110 7111 if ((priv == NULL) || (network == NULL) || 7112 (priv->assoc_network == NULL)) 7113 return ret; 7114 7115 if (!(priv->status & STATUS_ASSOCIATED)) 7116 return ret; 7117 7118 if ((priv->ieee->iw_mode != IW_MODE_INFRA)) 7119 return ret; 7120 7121 spin_lock_irqsave(&priv->ieee->lock, flags); 7122 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) { 7123 memcpy(&priv->assoc_network->qos_data, &network->qos_data, 7124 sizeof(struct libipw_qos_data)); 7125 priv->assoc_network->qos_data.active = 1; 7126 if ((network->qos_data.old_param_count != 7127 network->qos_data.param_count)) { 7128 set_qos_param = 1; 7129 network->qos_data.old_param_count = 7130 network->qos_data.param_count; 7131 } 7132 7133 } else { 7134 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B)) 7135 memcpy(&priv->assoc_network->qos_data.parameters, 7136 &def_parameters_CCK, size); 7137 else 7138 memcpy(&priv->assoc_network->qos_data.parameters, 7139 &def_parameters_OFDM, size); 7140 priv->assoc_network->qos_data.active = 0; 7141 priv->assoc_network->qos_data.supported = 0; 7142 set_qos_param = 1; 7143 } 7144 7145 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7146 7147 if (set_qos_param == 1) 7148 schedule_work(&priv->qos_activate); 7149 7150 return ret; 7151} 7152 7153static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv) 7154{ 7155 u32 ret = 0; 7156 7157 if ((priv == NULL)) 7158 return 0; 7159 7160 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION)) 7161 ret = priv->qos_data.burst_duration_CCK; 7162 else 7163 ret = priv->qos_data.burst_duration_OFDM; 7164 7165 return ret; 7166} 7167 7168/* 7169* Initialize the setting of QoS global 7170*/ 7171static void ipw_qos_init(struct ipw_priv *priv, int enable, 7172 int burst_enable, u32 burst_duration_CCK, 7173 u32 burst_duration_OFDM) 7174{ 7175 priv->qos_data.qos_enable = enable; 7176 7177 if (priv->qos_data.qos_enable) { 7178 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK; 7179 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM; 7180 IPW_DEBUG_QOS("QoS is enabled\n"); 7181 } else { 7182 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK; 7183 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM; 7184 IPW_DEBUG_QOS("QoS is not enabled\n"); 7185 } 7186 7187 priv->qos_data.burst_enable = burst_enable; 7188 7189 if (burst_enable) { 7190 priv->qos_data.burst_duration_CCK = burst_duration_CCK; 7191 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM; 7192 } else { 7193 priv->qos_data.burst_duration_CCK = 0; 7194 priv->qos_data.burst_duration_OFDM = 0; 7195 } 7196} 7197 7198/* 7199* map the packet priority to the right TX Queue 7200*/ 7201static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority) 7202{ 7203 if (priority > 7 || !priv->qos_data.qos_enable) 7204 priority = 0; 7205 7206 return from_priority_to_tx_queue[priority] - 1; 7207} 7208 7209static int ipw_is_qos_active(struct net_device *dev, 7210 struct sk_buff *skb) 7211{ 7212 struct ipw_priv *priv = libipw_priv(dev); 7213 struct libipw_qos_data *qos_data = NULL; 7214 int active, supported; 7215 u8 *daddr = skb->data + ETH_ALEN; 7216 int unicast = !is_multicast_ether_addr(daddr); 7217 7218 if (!(priv->status & STATUS_ASSOCIATED)) 7219 return 0; 7220 7221 qos_data = &priv->assoc_network->qos_data; 7222 7223 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7224 if (unicast == 0) 7225 qos_data->active = 0; 7226 else 7227 qos_data->active = qos_data->supported; 7228 } 7229 active = qos_data->active; 7230 supported = qos_data->supported; 7231 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d " 7232 "unicast %d\n", 7233 priv->qos_data.qos_enable, active, supported, unicast); 7234 if (active && priv->qos_data.qos_enable) 7235 return 1; 7236 7237 return 0; 7238 7239} 7240/* 7241* add QoS parameter to the TX command 7242*/ 7243static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, 7244 u16 priority, 7245 struct tfd_data *tfd) 7246{ 7247 int tx_queue_id = 0; 7248 7249 7250 tx_queue_id = from_priority_to_tx_queue[priority] - 1; 7251 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED; 7252 7253 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { 7254 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; 7255 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK); 7256 } 7257 return 0; 7258} 7259 7260/* 7261* background support to run QoS activate functionality 7262*/ 7263static void ipw_bg_qos_activate(struct work_struct *work) 7264{ 7265 struct ipw_priv *priv = 7266 container_of(work, struct ipw_priv, qos_activate); 7267 7268 mutex_lock(&priv->mutex); 7269 7270 if (priv->status & STATUS_ASSOCIATED) 7271 ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); 7272 7273 mutex_unlock(&priv->mutex); 7274} 7275 7276static int ipw_handle_probe_response(struct net_device *dev, 7277 struct libipw_probe_response *resp, 7278 struct libipw_network *network) 7279{ 7280 struct ipw_priv *priv = libipw_priv(dev); 7281 int active_network = ((priv->status & STATUS_ASSOCIATED) && 7282 (network == priv->assoc_network)); 7283 7284 ipw_qos_handle_probe_response(priv, active_network, network); 7285 7286 return 0; 7287} 7288 7289static int ipw_handle_beacon(struct net_device *dev, 7290 struct libipw_beacon *resp, 7291 struct libipw_network *network) 7292{ 7293 struct ipw_priv *priv = libipw_priv(dev); 7294 int active_network = ((priv->status & STATUS_ASSOCIATED) && 7295 (network == priv->assoc_network)); 7296 7297 ipw_qos_handle_probe_response(priv, active_network, network); 7298 7299 return 0; 7300} 7301 7302static int ipw_handle_assoc_response(struct net_device *dev, 7303 struct libipw_assoc_response *resp, 7304 struct libipw_network *network) 7305{ 7306 struct ipw_priv *priv = libipw_priv(dev); 7307 ipw_qos_association_resp(priv, network); 7308 return 0; 7309} 7310 7311static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters 7312 *qos_param) 7313{ 7314 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, 7315 sizeof(*qos_param) * 3, qos_param); 7316} 7317 7318static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element 7319 *qos_param) 7320{ 7321 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param), 7322 qos_param); 7323} 7324 7325#endif /* CONFIG_IPW2200_QOS */ 7326 7327static int ipw_associate_network(struct ipw_priv *priv, 7328 struct libipw_network *network, 7329 struct ipw_supported_rates *rates, int roaming) 7330{ 7331 int err; 7332 DECLARE_SSID_BUF(ssid); 7333 7334 if (priv->config & CFG_FIXED_RATE) 7335 ipw_set_fixed_rate(priv, network->mode); 7336 7337 if (!(priv->config & CFG_STATIC_ESSID)) { 7338 priv->essid_len = min(network->ssid_len, 7339 (u8) IW_ESSID_MAX_SIZE); 7340 memcpy(priv->essid, network->ssid, priv->essid_len); 7341 } 7342 7343 network->last_associate = jiffies; 7344 7345 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); 7346 priv->assoc_request.channel = network->channel; 7347 priv->assoc_request.auth_key = 0; 7348 7349 if ((priv->capability & CAP_PRIVACY_ON) && 7350 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) { 7351 priv->assoc_request.auth_type = AUTH_SHARED_KEY; 7352 priv->assoc_request.auth_key = priv->ieee->sec.active_key; 7353 7354 if (priv->ieee->sec.level == SEC_LEVEL_1) 7355 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 7356 7357 } else if ((priv->capability & CAP_PRIVACY_ON) && 7358 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)) 7359 priv->assoc_request.auth_type = AUTH_LEAP; 7360 else 7361 priv->assoc_request.auth_type = AUTH_OPEN; 7362 7363 if (priv->ieee->wpa_ie_len) { 7364 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */ 7365 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie, 7366 priv->ieee->wpa_ie_len); 7367 } 7368 7369 /* 7370 * It is valid for our ieee device to support multiple modes, but 7371 * when it comes to associating to a given network we have to choose 7372 * just one mode. 7373 */ 7374 if (network->mode & priv->ieee->mode & IEEE_A) 7375 priv->assoc_request.ieee_mode = IPW_A_MODE; 7376 else if (network->mode & priv->ieee->mode & IEEE_G) 7377 priv->assoc_request.ieee_mode = IPW_G_MODE; 7378 else if (network->mode & priv->ieee->mode & IEEE_B) 7379 priv->assoc_request.ieee_mode = IPW_B_MODE; 7380 7381 priv->assoc_request.capability = cpu_to_le16(network->capability); 7382 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 7383 && !(priv->config & CFG_PREAMBLE_LONG)) { 7384 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE; 7385 } else { 7386 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE; 7387 7388 /* Clear the short preamble if we won't be supporting it */ 7389 priv->assoc_request.capability &= 7390 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); 7391 } 7392 7393 /* Clear capability bits that aren't used in Ad Hoc */ 7394 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 7395 priv->assoc_request.capability &= 7396 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME); 7397 7398 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, " 7399 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n", 7400 roaming ? "Rea" : "A", 7401 print_ssid(ssid, priv->essid, priv->essid_len), 7402 network->channel, 7403 ipw_modes[priv->assoc_request.ieee_mode], 7404 rates->num_rates, 7405 (priv->assoc_request.preamble_length == 7406 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short", 7407 network->capability & 7408 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long", 7409 priv->capability & CAP_PRIVACY_ON ? "on " : "off", 7410 priv->capability & CAP_PRIVACY_ON ? 7411 (priv->capability & CAP_SHARED_KEY ? "(shared)" : 7412 "(open)") : "", 7413 priv->capability & CAP_PRIVACY_ON ? " key=" : "", 7414 priv->capability & CAP_PRIVACY_ON ? 7415 '1' + priv->ieee->sec.active_key : '.', 7416 priv->capability & CAP_PRIVACY_ON ? '.' : ' '); 7417 7418 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval); 7419 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 7420 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) { 7421 priv->assoc_request.assoc_type = HC_IBSS_START; 7422 priv->assoc_request.assoc_tsf_msw = 0; 7423 priv->assoc_request.assoc_tsf_lsw = 0; 7424 } else { 7425 if (unlikely(roaming)) 7426 priv->assoc_request.assoc_type = HC_REASSOCIATE; 7427 else 7428 priv->assoc_request.assoc_type = HC_ASSOCIATE; 7429 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]); 7430 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]); 7431 } 7432 7433 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); 7434 7435 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7436 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN); 7437 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); 7438 } else { 7439 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); 7440 priv->assoc_request.atim_window = 0; 7441 } 7442 7443 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval); 7444 7445 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 7446 if (err) { 7447 IPW_DEBUG_HC("Attempt to send SSID command failed.\n"); 7448 return err; 7449 } 7450 7451 rates->ieee_mode = priv->assoc_request.ieee_mode; 7452 rates->purpose = IPW_RATE_CONNECT; 7453 ipw_send_supported_rates(priv, rates); 7454 7455 if (priv->assoc_request.ieee_mode == IPW_G_MODE) 7456 priv->sys_config.dot11g_auto_detection = 1; 7457 else 7458 priv->sys_config.dot11g_auto_detection = 0; 7459 7460 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 7461 priv->sys_config.answer_broadcast_ssid_probe = 1; 7462 else 7463 priv->sys_config.answer_broadcast_ssid_probe = 0; 7464 7465 err = ipw_send_system_config(priv); 7466 if (err) { 7467 IPW_DEBUG_HC("Attempt to send sys config command failed.\n"); 7468 return err; 7469 } 7470 7471 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi); 7472 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM); 7473 if (err) { 7474 IPW_DEBUG_HC("Attempt to send associate command failed.\n"); 7475 return err; 7476 } 7477 7478 /* 7479 * If preemption is enabled, it is possible for the association 7480 * to complete before we return from ipw_send_associate. Therefore 7481 * we have to be sure and update our priviate data first. 7482 */ 7483 priv->channel = network->channel; 7484 memcpy(priv->bssid, network->bssid, ETH_ALEN); 7485 priv->status |= STATUS_ASSOCIATING; 7486 priv->status &= ~STATUS_SECURITY_UPDATED; 7487 7488 priv->assoc_network = network; 7489 7490#ifdef CONFIG_IPW2200_QOS 7491 ipw_qos_association(priv, network); 7492#endif 7493 7494 err = ipw_send_associate(priv, &priv->assoc_request); 7495 if (err) { 7496 IPW_DEBUG_HC("Attempt to send associate command failed.\n"); 7497 return err; 7498 } 7499 7500 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n", 7501 print_ssid(ssid, priv->essid, priv->essid_len), 7502 priv->bssid); 7503 7504 return 0; 7505} 7506 7507static void ipw_roam(void *data) 7508{ 7509 struct ipw_priv *priv = data; 7510 struct libipw_network *network = NULL; 7511 struct ipw_network_match match = { 7512 .network = priv->assoc_network 7513 }; 7514 7515 /* The roaming process is as follows: 7516 * 7517 * 1. Missed beacon threshold triggers the roaming process by 7518 * setting the status ROAM bit and requesting a scan. 7519 * 2. When the scan completes, it schedules the ROAM work 7520 * 3. The ROAM work looks at all of the known networks for one that 7521 * is a better network than the currently associated. If none 7522 * found, the ROAM process is over (ROAM bit cleared) 7523 * 4. If a better network is found, a disassociation request is 7524 * sent. 7525 * 5. When the disassociation completes, the roam work is again 7526 * scheduled. The second time through, the driver is no longer 7527 * associated, and the newly selected network is sent an 7528 * association request. 7529 * 6. At this point ,the roaming process is complete and the ROAM 7530 * status bit is cleared. 7531 */ 7532 7533 /* If we are no longer associated, and the roaming bit is no longer 7534 * set, then we are not actively roaming, so just return */ 7535 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING))) 7536 return; 7537 7538 if (priv->status & STATUS_ASSOCIATED) { 7539 /* First pass through ROAM process -- look for a better 7540 * network */ 7541 unsigned long flags; 7542 u8 rssi = priv->assoc_network->stats.rssi; 7543 priv->assoc_network->stats.rssi = -128; 7544 spin_lock_irqsave(&priv->ieee->lock, flags); 7545 list_for_each_entry(network, &priv->ieee->network_list, list) { 7546 if (network != priv->assoc_network) 7547 ipw_best_network(priv, &match, network, 1); 7548 } 7549 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7550 priv->assoc_network->stats.rssi = rssi; 7551 7552 if (match.network == priv->assoc_network) { 7553 IPW_DEBUG_ASSOC("No better APs in this network to " 7554 "roam to.\n"); 7555 priv->status &= ~STATUS_ROAMING; 7556 ipw_debug_config(priv); 7557 return; 7558 } 7559 7560 ipw_send_disassociate(priv, 1); 7561 priv->assoc_network = match.network; 7562 7563 return; 7564 } 7565 7566 /* Second pass through ROAM process -- request association */ 7567 ipw_compatible_rates(priv, priv->assoc_network, &match.rates); 7568 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1); 7569 priv->status &= ~STATUS_ROAMING; 7570} 7571 7572static void ipw_bg_roam(struct work_struct *work) 7573{ 7574 struct ipw_priv *priv = 7575 container_of(work, struct ipw_priv, roam); 7576 mutex_lock(&priv->mutex); 7577 ipw_roam(priv); 7578 mutex_unlock(&priv->mutex); 7579} 7580 7581static int ipw_associate(void *data) 7582{ 7583 struct ipw_priv *priv = data; 7584 7585 struct libipw_network *network = NULL; 7586 struct ipw_network_match match = { 7587 .network = NULL 7588 }; 7589 struct ipw_supported_rates *rates; 7590 struct list_head *element; 7591 unsigned long flags; 7592 DECLARE_SSID_BUF(ssid); 7593 7594 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 7595 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n"); 7596 return 0; 7597 } 7598 7599 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 7600 IPW_DEBUG_ASSOC("Not attempting association (already in " 7601 "progress)\n"); 7602 return 0; 7603 } 7604 7605 if (priv->status & STATUS_DISASSOCIATING) { 7606 IPW_DEBUG_ASSOC("Not attempting association (in " 7607 "disassociating)\n "); 7608 queue_work(priv->workqueue, &priv->associate); 7609 return 0; 7610 } 7611 7612 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) { 7613 IPW_DEBUG_ASSOC("Not attempting association (scanning or not " 7614 "initialized)\n"); 7615 return 0; 7616 } 7617 7618 if (!(priv->config & CFG_ASSOCIATE) && 7619 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) { 7620 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n"); 7621 return 0; 7622 } 7623 7624 /* Protect our use of the network_list */ 7625 spin_lock_irqsave(&priv->ieee->lock, flags); 7626 list_for_each_entry(network, &priv->ieee->network_list, list) 7627 ipw_best_network(priv, &match, network, 0); 7628 7629 network = match.network; 7630 rates = &match.rates; 7631 7632 if (network == NULL && 7633 priv->ieee->iw_mode == IW_MODE_ADHOC && 7634 priv->config & CFG_ADHOC_CREATE && 7635 priv->config & CFG_STATIC_ESSID && 7636 priv->config & CFG_STATIC_CHANNEL) { 7637 /* Use oldest network if the free list is empty */ 7638 if (list_empty(&priv->ieee->network_free_list)) { 7639 struct libipw_network *oldest = NULL; 7640 struct libipw_network *target; 7641 7642 list_for_each_entry(target, &priv->ieee->network_list, list) { 7643 if ((oldest == NULL) || 7644 (target->last_scanned < oldest->last_scanned)) 7645 oldest = target; 7646 } 7647 7648 /* If there are no more slots, expire the oldest */ 7649 list_del(&oldest->list); 7650 target = oldest; 7651 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from " 7652 "network list.\n", 7653 print_ssid(ssid, target->ssid, 7654 target->ssid_len), 7655 target->bssid); 7656 list_add_tail(&target->list, 7657 &priv->ieee->network_free_list); 7658 } 7659 7660 element = priv->ieee->network_free_list.next; 7661 network = list_entry(element, struct libipw_network, list); 7662 ipw_adhoc_create(priv, network); 7663 rates = &priv->rates; 7664 list_del(element); 7665 list_add_tail(&network->list, &priv->ieee->network_list); 7666 } 7667 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7668 7669 /* If we reached the end of the list, then we don't have any valid 7670 * matching APs */ 7671 if (!network) { 7672 ipw_debug_config(priv); 7673 7674 if (!(priv->status & STATUS_SCANNING)) { 7675 if (!(priv->config & CFG_SPEED_SCAN)) 7676 queue_delayed_work(priv->workqueue, 7677 &priv->request_scan, 7678 SCAN_INTERVAL); 7679 else 7680 queue_delayed_work(priv->workqueue, 7681 &priv->request_scan, 0); 7682 } 7683 7684 return 0; 7685 } 7686 7687 ipw_associate_network(priv, network, rates, 0); 7688 7689 return 1; 7690} 7691 7692static void ipw_bg_associate(struct work_struct *work) 7693{ 7694 struct ipw_priv *priv = 7695 container_of(work, struct ipw_priv, associate); 7696 mutex_lock(&priv->mutex); 7697 ipw_associate(priv); 7698 mutex_unlock(&priv->mutex); 7699} 7700 7701static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, 7702 struct sk_buff *skb) 7703{ 7704 struct ieee80211_hdr *hdr; 7705 u16 fc; 7706 7707 hdr = (struct ieee80211_hdr *)skb->data; 7708 fc = le16_to_cpu(hdr->frame_control); 7709 if (!(fc & IEEE80211_FCTL_PROTECTED)) 7710 return; 7711 7712 fc &= ~IEEE80211_FCTL_PROTECTED; 7713 hdr->frame_control = cpu_to_le16(fc); 7714 switch (priv->ieee->sec.level) { 7715 case SEC_LEVEL_3: 7716 /* Remove CCMP HDR */ 7717 memmove(skb->data + LIBIPW_3ADDR_LEN, 7718 skb->data + LIBIPW_3ADDR_LEN + 8, 7719 skb->len - LIBIPW_3ADDR_LEN - 8); 7720 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */ 7721 break; 7722 case SEC_LEVEL_2: 7723 break; 7724 case SEC_LEVEL_1: 7725 /* Remove IV */ 7726 memmove(skb->data + LIBIPW_3ADDR_LEN, 7727 skb->data + LIBIPW_3ADDR_LEN + 4, 7728 skb->len - LIBIPW_3ADDR_LEN - 4); 7729 skb_trim(skb, skb->len - 8); /* IV + ICV */ 7730 break; 7731 case SEC_LEVEL_0: 7732 break; 7733 default: 7734 printk(KERN_ERR "Unknow security level %d\n", 7735 priv->ieee->sec.level); 7736 break; 7737 } 7738} 7739 7740static void ipw_handle_data_packet(struct ipw_priv *priv, 7741 struct ipw_rx_mem_buffer *rxb, 7742 struct libipw_rx_stats *stats) 7743{ 7744 struct net_device *dev = priv->net_dev; 7745 struct libipw_hdr_4addr *hdr; 7746 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7747 7748 /* We received data from the HW, so stop the watchdog */ 7749 dev->trans_start = jiffies; 7750 7751 /* We only process data packets if the 7752 * interface is open */ 7753 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > 7754 skb_tailroom(rxb->skb))) { 7755 dev->stats.rx_errors++; 7756 priv->wstats.discard.misc++; 7757 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7758 return; 7759 } else if (unlikely(!netif_running(priv->net_dev))) { 7760 dev->stats.rx_dropped++; 7761 priv->wstats.discard.misc++; 7762 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7763 return; 7764 } 7765 7766 /* Advance skb->data to the start of the actual payload */ 7767 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data)); 7768 7769 /* Set the size of the skb to the size of the frame */ 7770 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length)); 7771 7772 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); 7773 7774 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */ 7775 hdr = (struct libipw_hdr_4addr *)rxb->skb->data; 7776 if (priv->ieee->iw_mode != IW_MODE_MONITOR && 7777 (is_multicast_ether_addr(hdr->addr1) ? 7778 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt)) 7779 ipw_rebuild_decrypted_skb(priv, rxb->skb); 7780 7781 if (!libipw_rx(priv->ieee, rxb->skb, stats)) 7782 dev->stats.rx_errors++; 7783 else { /* libipw_rx succeeded, so it now owns the SKB */ 7784 rxb->skb = NULL; 7785 __ipw_led_activity_on(priv); 7786 } 7787} 7788 7789#ifdef CONFIG_IPW2200_RADIOTAP 7790static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, 7791 struct ipw_rx_mem_buffer *rxb, 7792 struct libipw_rx_stats *stats) 7793{ 7794 struct net_device *dev = priv->net_dev; 7795 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7796 struct ipw_rx_frame *frame = &pkt->u.frame; 7797 7798 /* initial pull of some data */ 7799 u16 received_channel = frame->received_channel; 7800 u8 antennaAndPhy = frame->antennaAndPhy; 7801 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */ 7802 u16 pktrate = frame->rate; 7803 7804 /* Magic struct that slots into the radiotap header -- no reason 7805 * to build this manually element by element, we can write it much 7806 * more efficiently than we can parse it. ORDER MATTERS HERE */ 7807 struct ipw_rt_hdr *ipw_rt; 7808 7809 short len = le16_to_cpu(pkt->u.frame.length); 7810 7811 /* We received data from the HW, so stop the watchdog */ 7812 dev->trans_start = jiffies; 7813 7814 /* We only process data packets if the 7815 * interface is open */ 7816 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > 7817 skb_tailroom(rxb->skb))) { 7818 dev->stats.rx_errors++; 7819 priv->wstats.discard.misc++; 7820 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7821 return; 7822 } else if (unlikely(!netif_running(priv->net_dev))) { 7823 dev->stats.rx_dropped++; 7824 priv->wstats.discard.misc++; 7825 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7826 return; 7827 } 7828 7829 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use 7830 * that now */ 7831 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { 7832 /* FIXME: Should alloc bigger skb instead */ 7833 dev->stats.rx_dropped++; 7834 priv->wstats.discard.misc++; 7835 IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); 7836 return; 7837 } 7838 7839 /* copy the frame itself */ 7840 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr), 7841 rxb->skb->data + IPW_RX_FRAME_SIZE, len); 7842 7843 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data; 7844 7845 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 7846 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 7847 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */ 7848 7849 /* Big bitfield of all the fields we provide in radiotap */ 7850 ipw_rt->rt_hdr.it_present = cpu_to_le32( 7851 (1 << IEEE80211_RADIOTAP_TSFT) | 7852 (1 << IEEE80211_RADIOTAP_FLAGS) | 7853 (1 << IEEE80211_RADIOTAP_RATE) | 7854 (1 << IEEE80211_RADIOTAP_CHANNEL) | 7855 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 7856 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 7857 (1 << IEEE80211_RADIOTAP_ANTENNA)); 7858 7859 /* Zero the flags, we'll add to them as we go */ 7860 ipw_rt->rt_flags = 0; 7861 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | 7862 frame->parent_tsf[2] << 16 | 7863 frame->parent_tsf[1] << 8 | 7864 frame->parent_tsf[0]); 7865 7866 /* Convert signal to DBM */ 7867 ipw_rt->rt_dbmsignal = antsignal; 7868 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise); 7869 7870 /* Convert the channel data and set the flags */ 7871 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel)); 7872 if (received_channel > 14) { /* 802.11a */ 7873 ipw_rt->rt_chbitmask = 7874 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); 7875 } else if (antennaAndPhy & 32) { /* 802.11b */ 7876 ipw_rt->rt_chbitmask = 7877 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); 7878 } else { /* 802.11g */ 7879 ipw_rt->rt_chbitmask = 7880 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); 7881 } 7882 7883 /* set the rate in multiples of 500k/s */ 7884 switch (pktrate) { 7885 case IPW_TX_RATE_1MB: 7886 ipw_rt->rt_rate = 2; 7887 break; 7888 case IPW_TX_RATE_2MB: 7889 ipw_rt->rt_rate = 4; 7890 break; 7891 case IPW_TX_RATE_5MB: 7892 ipw_rt->rt_rate = 10; 7893 break; 7894 case IPW_TX_RATE_6MB: 7895 ipw_rt->rt_rate = 12; 7896 break; 7897 case IPW_TX_RATE_9MB: 7898 ipw_rt->rt_rate = 18; 7899 break; 7900 case IPW_TX_RATE_11MB: 7901 ipw_rt->rt_rate = 22; 7902 break; 7903 case IPW_TX_RATE_12MB: 7904 ipw_rt->rt_rate = 24; 7905 break; 7906 case IPW_TX_RATE_18MB: 7907 ipw_rt->rt_rate = 36; 7908 break; 7909 case IPW_TX_RATE_24MB: 7910 ipw_rt->rt_rate = 48; 7911 break; 7912 case IPW_TX_RATE_36MB: 7913 ipw_rt->rt_rate = 72; 7914 break; 7915 case IPW_TX_RATE_48MB: 7916 ipw_rt->rt_rate = 96; 7917 break; 7918 case IPW_TX_RATE_54MB: 7919 ipw_rt->rt_rate = 108; 7920 break; 7921 default: 7922 ipw_rt->rt_rate = 0; 7923 break; 7924 } 7925 7926 /* antenna number */ 7927 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */ 7928 7929 /* set the preamble flag if we have it */ 7930 if ((antennaAndPhy & 64)) 7931 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 7932 7933 /* Set the size of the skb to the size of the frame */ 7934 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr)); 7935 7936 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); 7937 7938 if (!libipw_rx(priv->ieee, rxb->skb, stats)) 7939 dev->stats.rx_errors++; 7940 else { /* libipw_rx succeeded, so it now owns the SKB */ 7941 rxb->skb = NULL; 7942 /* no LED during capture */ 7943 } 7944} 7945#endif 7946 7947#ifdef CONFIG_IPW2200_PROMISCUOUS 7948#define libipw_is_probe_response(fc) \ 7949 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \ 7950 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP ) 7951 7952#define libipw_is_management(fc) \ 7953 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) 7954 7955#define libipw_is_control(fc) \ 7956 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) 7957 7958#define libipw_is_data(fc) \ 7959 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 7960 7961#define libipw_is_assoc_request(fc) \ 7962 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ) 7963 7964#define libipw_is_reassoc_request(fc) \ 7965 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) 7966 7967static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, 7968 struct ipw_rx_mem_buffer *rxb, 7969 struct libipw_rx_stats *stats) 7970{ 7971 struct net_device *dev = priv->prom_net_dev; 7972 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7973 struct ipw_rx_frame *frame = &pkt->u.frame; 7974 struct ipw_rt_hdr *ipw_rt; 7975 7976 /* First cache any information we need before we overwrite 7977 * the information provided in the skb from the hardware */ 7978 struct ieee80211_hdr *hdr; 7979 u16 channel = frame->received_channel; 7980 u8 phy_flags = frame->antennaAndPhy; 7981 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM; 7982 s8 noise = (s8) le16_to_cpu(frame->noise); 7983 u8 rate = frame->rate; 7984 short len = le16_to_cpu(pkt->u.frame.length); 7985 struct sk_buff *skb; 7986 int hdr_only = 0; 7987 u16 filter = priv->prom_priv->filter; 7988 7989 /* If the filter is set to not include Rx frames then return */ 7990 if (filter & IPW_PROM_NO_RX) 7991 return; 7992 7993 /* We received data from the HW, so stop the watchdog */ 7994 dev->trans_start = jiffies; 7995 7996 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { 7997 dev->stats.rx_errors++; 7998 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7999 return; 8000 } 8001 8002 /* We only process data packets if the interface is open */ 8003 if (unlikely(!netif_running(dev))) { 8004 dev->stats.rx_dropped++; 8005 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 8006 return; 8007 } 8008 8009 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use 8010 * that now */ 8011 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { 8012 /* FIXME: Should alloc bigger skb instead */ 8013 dev->stats.rx_dropped++; 8014 IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); 8015 return; 8016 } 8017 8018 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; 8019 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { 8020 if (filter & IPW_PROM_NO_MGMT) 8021 return; 8022 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 8023 hdr_only = 1; 8024 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { 8025 if (filter & IPW_PROM_NO_CTL) 8026 return; 8027 if (filter & IPW_PROM_CTL_HEADER_ONLY) 8028 hdr_only = 1; 8029 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { 8030 if (filter & IPW_PROM_NO_DATA) 8031 return; 8032 if (filter & IPW_PROM_DATA_HEADER_ONLY) 8033 hdr_only = 1; 8034 } 8035 8036 /* Copy the SKB since this is for the promiscuous side */ 8037 skb = skb_copy(rxb->skb, GFP_ATOMIC); 8038 if (skb == NULL) { 8039 IPW_ERROR("skb_clone failed for promiscuous copy.\n"); 8040 return; 8041 } 8042 8043 /* copy the frame data to write after where the radiotap header goes */ 8044 ipw_rt = (void *)skb->data; 8045 8046 if (hdr_only) 8047 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); 8048 8049 memcpy(ipw_rt->payload, hdr, len); 8050 8051 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 8052 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 8053 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */ 8054 8055 /* Set the size of the skb to the size of the frame */ 8056 skb_put(skb, sizeof(*ipw_rt) + len); 8057 8058 /* Big bitfield of all the fields we provide in radiotap */ 8059 ipw_rt->rt_hdr.it_present = cpu_to_le32( 8060 (1 << IEEE80211_RADIOTAP_TSFT) | 8061 (1 << IEEE80211_RADIOTAP_FLAGS) | 8062 (1 << IEEE80211_RADIOTAP_RATE) | 8063 (1 << IEEE80211_RADIOTAP_CHANNEL) | 8064 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 8065 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 8066 (1 << IEEE80211_RADIOTAP_ANTENNA)); 8067 8068 /* Zero the flags, we'll add to them as we go */ 8069 ipw_rt->rt_flags = 0; 8070 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | 8071 frame->parent_tsf[2] << 16 | 8072 frame->parent_tsf[1] << 8 | 8073 frame->parent_tsf[0]); 8074 8075 /* Convert to DBM */ 8076 ipw_rt->rt_dbmsignal = signal; 8077 ipw_rt->rt_dbmnoise = noise; 8078 8079 /* Convert the channel data and set the flags */ 8080 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel)); 8081 if (channel > 14) { /* 802.11a */ 8082 ipw_rt->rt_chbitmask = 8083 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); 8084 } else if (phy_flags & (1 << 5)) { /* 802.11b */ 8085 ipw_rt->rt_chbitmask = 8086 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); 8087 } else { /* 802.11g */ 8088 ipw_rt->rt_chbitmask = 8089 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); 8090 } 8091 8092 /* set the rate in multiples of 500k/s */ 8093 switch (rate) { 8094 case IPW_TX_RATE_1MB: 8095 ipw_rt->rt_rate = 2; 8096 break; 8097 case IPW_TX_RATE_2MB: 8098 ipw_rt->rt_rate = 4; 8099 break; 8100 case IPW_TX_RATE_5MB: 8101 ipw_rt->rt_rate = 10; 8102 break; 8103 case IPW_TX_RATE_6MB: 8104 ipw_rt->rt_rate = 12; 8105 break; 8106 case IPW_TX_RATE_9MB: 8107 ipw_rt->rt_rate = 18; 8108 break; 8109 case IPW_TX_RATE_11MB: 8110 ipw_rt->rt_rate = 22; 8111 break; 8112 case IPW_TX_RATE_12MB: 8113 ipw_rt->rt_rate = 24; 8114 break; 8115 case IPW_TX_RATE_18MB: 8116 ipw_rt->rt_rate = 36; 8117 break; 8118 case IPW_TX_RATE_24MB: 8119 ipw_rt->rt_rate = 48; 8120 break; 8121 case IPW_TX_RATE_36MB: 8122 ipw_rt->rt_rate = 72; 8123 break; 8124 case IPW_TX_RATE_48MB: 8125 ipw_rt->rt_rate = 96; 8126 break; 8127 case IPW_TX_RATE_54MB: 8128 ipw_rt->rt_rate = 108; 8129 break; 8130 default: 8131 ipw_rt->rt_rate = 0; 8132 break; 8133 } 8134 8135 /* antenna number */ 8136 ipw_rt->rt_antenna = (phy_flags & 3); 8137 8138 /* set the preamble flag if we have it */ 8139 if (phy_flags & (1 << 6)) 8140 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 8141 8142 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len); 8143 8144 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) { 8145 dev->stats.rx_errors++; 8146 dev_kfree_skb_any(skb); 8147 } 8148} 8149#endif 8150 8151static int is_network_packet(struct ipw_priv *priv, 8152 struct libipw_hdr_4addr *header) 8153{ 8154 /* Filter incoming packets to determine if they are targetted toward 8155 * this network, discarding packets coming from ourselves */ 8156 switch (priv->ieee->iw_mode) { 8157 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */ 8158 /* packets from our adapter are dropped (echo) */ 8159 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN)) 8160 return 0; 8161 8162 /* {broad,multi}cast packets to our BSSID go through */ 8163 if (is_multicast_ether_addr(header->addr1)) 8164 return !memcmp(header->addr3, priv->bssid, ETH_ALEN); 8165 8166 /* packets to our adapter go through */ 8167 return !memcmp(header->addr1, priv->net_dev->dev_addr, 8168 ETH_ALEN); 8169 8170 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */ 8171 /* packets from our adapter are dropped (echo) */ 8172 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN)) 8173 return 0; 8174 8175 /* {broad,multi}cast packets to our BSS go through */ 8176 if (is_multicast_ether_addr(header->addr1)) 8177 return !memcmp(header->addr2, priv->bssid, ETH_ALEN); 8178 8179 /* packets to our adapter go through */ 8180 return !memcmp(header->addr1, priv->net_dev->dev_addr, 8181 ETH_ALEN); 8182 } 8183 8184 return 1; 8185} 8186 8187#define IPW_PACKET_RETRY_TIME HZ 8188 8189static int is_duplicate_packet(struct ipw_priv *priv, 8190 struct libipw_hdr_4addr *header) 8191{ 8192 u16 sc = le16_to_cpu(header->seq_ctl); 8193 u16 seq = WLAN_GET_SEQ_SEQ(sc); 8194 u16 frag = WLAN_GET_SEQ_FRAG(sc); 8195 u16 *last_seq, *last_frag; 8196 unsigned long *last_time; 8197 8198 switch (priv->ieee->iw_mode) { 8199 case IW_MODE_ADHOC: 8200 { 8201 struct list_head *p; 8202 struct ipw_ibss_seq *entry = NULL; 8203 u8 *mac = header->addr2; 8204 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE; 8205 8206 __list_for_each(p, &priv->ibss_mac_hash[index]) { 8207 entry = 8208 list_entry(p, struct ipw_ibss_seq, list); 8209 if (!memcmp(entry->mac, mac, ETH_ALEN)) 8210 break; 8211 } 8212 if (p == &priv->ibss_mac_hash[index]) { 8213 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 8214 if (!entry) { 8215 IPW_ERROR 8216 ("Cannot malloc new mac entry\n"); 8217 return 0; 8218 } 8219 memcpy(entry->mac, mac, ETH_ALEN); 8220 entry->seq_num = seq; 8221 entry->frag_num = frag; 8222 entry->packet_time = jiffies; 8223 list_add(&entry->list, 8224 &priv->ibss_mac_hash[index]); 8225 return 0; 8226 } 8227 last_seq = &entry->seq_num; 8228 last_frag = &entry->frag_num; 8229 last_time = &entry->packet_time; 8230 break; 8231 } 8232 case IW_MODE_INFRA: 8233 last_seq = &priv->last_seq_num; 8234 last_frag = &priv->last_frag_num; 8235 last_time = &priv->last_packet_time; 8236 break; 8237 default: 8238 return 0; 8239 } 8240 if ((*last_seq == seq) && 8241 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) { 8242 if (*last_frag == frag) 8243 goto drop; 8244 if (*last_frag + 1 != frag) 8245 /* out-of-order fragment */ 8246 goto drop; 8247 } else 8248 *last_seq = seq; 8249 8250 *last_frag = frag; 8251 *last_time = jiffies; 8252 return 0; 8253 8254 drop: 8255 /* Comment this line now since we observed the card receives 8256 * duplicate packets but the FCTL_RETRY bit is not set in the 8257 * IBSS mode with fragmentation enabled. 8258 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */ 8259 return 1; 8260} 8261 8262static void ipw_handle_mgmt_packet(struct ipw_priv *priv, 8263 struct ipw_rx_mem_buffer *rxb, 8264 struct libipw_rx_stats *stats) 8265{ 8266 struct sk_buff *skb = rxb->skb; 8267 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data; 8268 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *) 8269 (skb->data + IPW_RX_FRAME_SIZE); 8270 8271 libipw_rx_mgt(priv->ieee, header, stats); 8272 8273 if (priv->ieee->iw_mode == IW_MODE_ADHOC && 8274 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == 8275 IEEE80211_STYPE_PROBE_RESP) || 8276 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == 8277 IEEE80211_STYPE_BEACON))) { 8278 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN)) 8279 ipw_add_station(priv, header->addr2); 8280 } 8281 8282 if (priv->config & CFG_NET_STATS) { 8283 IPW_DEBUG_HC("sending stat packet\n"); 8284 8285 /* Set the size of the skb to the size of the full 8286 * ipw header and 802.11 frame */ 8287 skb_put(skb, le16_to_cpu(pkt->u.frame.length) + 8288 IPW_RX_FRAME_SIZE); 8289 8290 /* Advance past the ipw packet header to the 802.11 frame */ 8291 skb_pull(skb, IPW_RX_FRAME_SIZE); 8292 8293 /* Push the libipw_rx_stats before the 802.11 frame */ 8294 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats)); 8295 8296 skb->dev = priv->ieee->dev; 8297 8298 /* Point raw at the libipw_stats */ 8299 skb_reset_mac_header(skb); 8300 8301 skb->pkt_type = PACKET_OTHERHOST; 8302 skb->protocol = cpu_to_be16(ETH_P_80211_STATS); 8303 memset(skb->cb, 0, sizeof(rxb->skb->cb)); 8304 netif_rx(skb); 8305 rxb->skb = NULL; 8306 } 8307} 8308 8309/* 8310 * Main entry function for recieving a packet with 80211 headers. This 8311 * should be called when ever the FW has notified us that there is a new 8312 * skb in the recieve queue. 8313 */ 8314static void ipw_rx(struct ipw_priv *priv) 8315{ 8316 struct ipw_rx_mem_buffer *rxb; 8317 struct ipw_rx_packet *pkt; 8318 struct libipw_hdr_4addr *header; 8319 u32 r, w, i; 8320 u8 network_packet; 8321 u8 fill_rx = 0; 8322 8323 r = ipw_read32(priv, IPW_RX_READ_INDEX); 8324 w = ipw_read32(priv, IPW_RX_WRITE_INDEX); 8325 i = priv->rxq->read; 8326 8327 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2)) 8328 fill_rx = 1; 8329 8330 while (i != r) { 8331 rxb = priv->rxq->queue[i]; 8332 if (unlikely(rxb == NULL)) { 8333 printk(KERN_CRIT "Queue not allocated!\n"); 8334 break; 8335 } 8336 priv->rxq->queue[i] = NULL; 8337 8338 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 8339 IPW_RX_BUF_SIZE, 8340 PCI_DMA_FROMDEVICE); 8341 8342 pkt = (struct ipw_rx_packet *)rxb->skb->data; 8343 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", 8344 pkt->header.message_type, 8345 pkt->header.rx_seq_num, pkt->header.control_bits); 8346 8347 switch (pkt->header.message_type) { 8348 case RX_FRAME_TYPE: /* 802.11 frame */ { 8349 struct libipw_rx_stats stats = { 8350 .rssi = pkt->u.frame.rssi_dbm - 8351 IPW_RSSI_TO_DBM, 8352 .signal = 8353 pkt->u.frame.rssi_dbm - 8354 IPW_RSSI_TO_DBM + 0x100, 8355 .noise = 8356 le16_to_cpu(pkt->u.frame.noise), 8357 .rate = pkt->u.frame.rate, 8358 .mac_time = jiffies, 8359 .received_channel = 8360 pkt->u.frame.received_channel, 8361 .freq = 8362 (pkt->u.frame. 8363 control & (1 << 0)) ? 8364 LIBIPW_24GHZ_BAND : 8365 LIBIPW_52GHZ_BAND, 8366 .len = le16_to_cpu(pkt->u.frame.length), 8367 }; 8368 8369 if (stats.rssi != 0) 8370 stats.mask |= LIBIPW_STATMASK_RSSI; 8371 if (stats.signal != 0) 8372 stats.mask |= LIBIPW_STATMASK_SIGNAL; 8373 if (stats.noise != 0) 8374 stats.mask |= LIBIPW_STATMASK_NOISE; 8375 if (stats.rate != 0) 8376 stats.mask |= LIBIPW_STATMASK_RATE; 8377 8378 priv->rx_packets++; 8379 8380#ifdef CONFIG_IPW2200_PROMISCUOUS 8381 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) 8382 ipw_handle_promiscuous_rx(priv, rxb, &stats); 8383#endif 8384 8385#ifdef CONFIG_IPW2200_MONITOR 8386 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 8387#ifdef CONFIG_IPW2200_RADIOTAP 8388 8389 ipw_handle_data_packet_monitor(priv, 8390 rxb, 8391 &stats); 8392#else 8393 ipw_handle_data_packet(priv, rxb, 8394 &stats); 8395#endif 8396 break; 8397 } 8398#endif 8399 8400 header = 8401 (struct libipw_hdr_4addr *)(rxb->skb-> 8402 data + 8403 IPW_RX_FRAME_SIZE); 8404 /* TODO: Check Ad-Hoc dest/source and make sure 8405 * that we are actually parsing these packets 8406 * correctly -- we should probably use the 8407 * frame control of the packet and disregard 8408 * the current iw_mode */ 8409 8410 network_packet = 8411 is_network_packet(priv, header); 8412 if (network_packet && priv->assoc_network) { 8413 priv->assoc_network->stats.rssi = 8414 stats.rssi; 8415 priv->exp_avg_rssi = 8416 exponential_average(priv->exp_avg_rssi, 8417 stats.rssi, DEPTH_RSSI); 8418 } 8419 8420 IPW_DEBUG_RX("Frame: len=%u\n", 8421 le16_to_cpu(pkt->u.frame.length)); 8422 8423 if (le16_to_cpu(pkt->u.frame.length) < 8424 libipw_get_hdrlen(le16_to_cpu( 8425 header->frame_ctl))) { 8426 IPW_DEBUG_DROP 8427 ("Received packet is too small. " 8428 "Dropping.\n"); 8429 priv->net_dev->stats.rx_errors++; 8430 priv->wstats.discard.misc++; 8431 break; 8432 } 8433 8434 switch (WLAN_FC_GET_TYPE 8435 (le16_to_cpu(header->frame_ctl))) { 8436 8437 case IEEE80211_FTYPE_MGMT: 8438 ipw_handle_mgmt_packet(priv, rxb, 8439 &stats); 8440 break; 8441 8442 case IEEE80211_FTYPE_CTL: 8443 break; 8444 8445 case IEEE80211_FTYPE_DATA: 8446 if (unlikely(!network_packet || 8447 is_duplicate_packet(priv, 8448 header))) 8449 { 8450 IPW_DEBUG_DROP("Dropping: " 8451 "%pM, " 8452 "%pM, " 8453 "%pM\n", 8454 header->addr1, 8455 header->addr2, 8456 header->addr3); 8457 break; 8458 } 8459 8460 ipw_handle_data_packet(priv, rxb, 8461 &stats); 8462 8463 break; 8464 } 8465 break; 8466 } 8467 8468 case RX_HOST_NOTIFICATION_TYPE:{ 8469 IPW_DEBUG_RX 8470 ("Notification: subtype=%02X flags=%02X size=%d\n", 8471 pkt->u.notification.subtype, 8472 pkt->u.notification.flags, 8473 le16_to_cpu(pkt->u.notification.size)); 8474 ipw_rx_notification(priv, &pkt->u.notification); 8475 break; 8476 } 8477 8478 default: 8479 IPW_DEBUG_RX("Bad Rx packet of type %d\n", 8480 pkt->header.message_type); 8481 break; 8482 } 8483 8484 /* For now we just don't re-use anything. We can tweak this 8485 * later to try and re-use notification packets and SKBs that 8486 * fail to Rx correctly */ 8487 if (rxb->skb != NULL) { 8488 dev_kfree_skb_any(rxb->skb); 8489 rxb->skb = NULL; 8490 } 8491 8492 pci_unmap_single(priv->pci_dev, rxb->dma_addr, 8493 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 8494 list_add_tail(&rxb->list, &priv->rxq->rx_used); 8495 8496 i = (i + 1) % RX_QUEUE_SIZE; 8497 8498 /* If there are a lot of unsued frames, restock the Rx queue 8499 * so the ucode won't assert */ 8500 if (fill_rx) { 8501 priv->rxq->read = i; 8502 ipw_rx_queue_replenish(priv); 8503 } 8504 } 8505 8506 /* Backtrack one entry */ 8507 priv->rxq->read = i; 8508 ipw_rx_queue_restock(priv); 8509} 8510 8511#define DEFAULT_RTS_THRESHOLD 2304U 8512#define MIN_RTS_THRESHOLD 1U 8513#define MAX_RTS_THRESHOLD 2304U 8514#define DEFAULT_BEACON_INTERVAL 100U 8515#define DEFAULT_SHORT_RETRY_LIMIT 7U 8516#define DEFAULT_LONG_RETRY_LIMIT 4U 8517 8518/** 8519 * ipw_sw_reset 8520 * @option: options to control different reset behaviour 8521 * 0 = reset everything except the 'disable' module_param 8522 * 1 = reset everything and print out driver info (for probe only) 8523 * 2 = reset everything 8524 */ 8525static int ipw_sw_reset(struct ipw_priv *priv, int option) 8526{ 8527 int band, modulation; 8528 int old_mode = priv->ieee->iw_mode; 8529 8530 /* Initialize module parameter values here */ 8531 priv->config = 0; 8532 8533 /* We default to disabling the LED code as right now it causes 8534 * too many systems to lock up... */ 8535 if (!led_support) 8536 priv->config |= CFG_NO_LED; 8537 8538 if (associate) 8539 priv->config |= CFG_ASSOCIATE; 8540 else 8541 IPW_DEBUG_INFO("Auto associate disabled.\n"); 8542 8543 if (auto_create) 8544 priv->config |= CFG_ADHOC_CREATE; 8545 else 8546 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n"); 8547 8548 priv->config &= ~CFG_STATIC_ESSID; 8549 priv->essid_len = 0; 8550 memset(priv->essid, 0, IW_ESSID_MAX_SIZE); 8551 8552 if (disable && option) { 8553 priv->status |= STATUS_RF_KILL_SW; 8554 IPW_DEBUG_INFO("Radio disabled.\n"); 8555 } 8556 8557 if (default_channel != 0) { 8558 priv->config |= CFG_STATIC_CHANNEL; 8559 priv->channel = default_channel; 8560 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel); 8561 /* TODO: Validate that provided channel is in range */ 8562 } 8563#ifdef CONFIG_IPW2200_QOS 8564 ipw_qos_init(priv, qos_enable, qos_burst_enable, 8565 burst_duration_CCK, burst_duration_OFDM); 8566#endif /* CONFIG_IPW2200_QOS */ 8567 8568 switch (network_mode) { 8569 case 1: 8570 priv->ieee->iw_mode = IW_MODE_ADHOC; 8571 priv->net_dev->type = ARPHRD_ETHER; 8572 8573 break; 8574#ifdef CONFIG_IPW2200_MONITOR 8575 case 2: 8576 priv->ieee->iw_mode = IW_MODE_MONITOR; 8577#ifdef CONFIG_IPW2200_RADIOTAP 8578 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 8579#else 8580 priv->net_dev->type = ARPHRD_IEEE80211; 8581#endif 8582 break; 8583#endif 8584 default: 8585 case 0: 8586 priv->net_dev->type = ARPHRD_ETHER; 8587 priv->ieee->iw_mode = IW_MODE_INFRA; 8588 break; 8589 } 8590 8591 if (hwcrypto) { 8592 priv->ieee->host_encrypt = 0; 8593 priv->ieee->host_encrypt_msdu = 0; 8594 priv->ieee->host_decrypt = 0; 8595 priv->ieee->host_mc_decrypt = 0; 8596 } 8597 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off"); 8598 8599 /* IPW2200/2915 is abled to do hardware fragmentation. */ 8600 priv->ieee->host_open_frag = 0; 8601 8602 if ((priv->pci_dev->device == 0x4223) || 8603 (priv->pci_dev->device == 0x4224)) { 8604 if (option == 1) 8605 printk(KERN_INFO DRV_NAME 8606 ": Detected Intel PRO/Wireless 2915ABG Network " 8607 "Connection\n"); 8608 priv->ieee->abg_true = 1; 8609 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND; 8610 modulation = LIBIPW_OFDM_MODULATION | 8611 LIBIPW_CCK_MODULATION; 8612 priv->adapter = IPW_2915ABG; 8613 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; 8614 } else { 8615 if (option == 1) 8616 printk(KERN_INFO DRV_NAME 8617 ": Detected Intel PRO/Wireless 2200BG Network " 8618 "Connection\n"); 8619 8620 priv->ieee->abg_true = 0; 8621 band = LIBIPW_24GHZ_BAND; 8622 modulation = LIBIPW_OFDM_MODULATION | 8623 LIBIPW_CCK_MODULATION; 8624 priv->adapter = IPW_2200BG; 8625 priv->ieee->mode = IEEE_G | IEEE_B; 8626 } 8627 8628 priv->ieee->freq_band = band; 8629 priv->ieee->modulation = modulation; 8630 8631 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK; 8632 8633 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; 8634 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; 8635 8636 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 8637 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT; 8638 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT; 8639 8640 /* If power management is turned on, default to AC mode */ 8641 priv->power_mode = IPW_POWER_AC; 8642 priv->tx_power = IPW_TX_POWER_DEFAULT; 8643 8644 return old_mode == priv->ieee->iw_mode; 8645} 8646 8647/* 8648 * This file defines the Wireless Extension handlers. It does not 8649 * define any methods of hardware manipulation and relies on the 8650 * functions defined in ipw_main to provide the HW interaction. 8651 * 8652 * The exception to this is the use of the ipw_get_ordinal() 8653 * function used to poll the hardware vs. making unecessary calls. 8654 * 8655 */ 8656 8657static int ipw_wx_get_name(struct net_device *dev, 8658 struct iw_request_info *info, 8659 union iwreq_data *wrqu, char *extra) 8660{ 8661 struct ipw_priv *priv = libipw_priv(dev); 8662 mutex_lock(&priv->mutex); 8663 if (priv->status & STATUS_RF_KILL_MASK) 8664 strcpy(wrqu->name, "radio off"); 8665 else if (!(priv->status & STATUS_ASSOCIATED)) 8666 strcpy(wrqu->name, "unassociated"); 8667 else 8668 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c", 8669 ipw_modes[priv->assoc_request.ieee_mode]); 8670 IPW_DEBUG_WX("Name: %s\n", wrqu->name); 8671 mutex_unlock(&priv->mutex); 8672 return 0; 8673} 8674 8675static int ipw_set_channel(struct ipw_priv *priv, u8 channel) 8676{ 8677 if (channel == 0) { 8678 IPW_DEBUG_INFO("Setting channel to ANY (0)\n"); 8679 priv->config &= ~CFG_STATIC_CHANNEL; 8680 IPW_DEBUG_ASSOC("Attempting to associate with new " 8681 "parameters.\n"); 8682 ipw_associate(priv); 8683 return 0; 8684 } 8685 8686 priv->config |= CFG_STATIC_CHANNEL; 8687 8688 if (priv->channel == channel) { 8689 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n", 8690 channel); 8691 return 0; 8692 } 8693 8694 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel); 8695 priv->channel = channel; 8696 8697#ifdef CONFIG_IPW2200_MONITOR 8698 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 8699 int i; 8700 if (priv->status & STATUS_SCANNING) { 8701 IPW_DEBUG_SCAN("Scan abort triggered due to " 8702 "channel change.\n"); 8703 ipw_abort_scan(priv); 8704 } 8705 8706 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--) 8707 udelay(10); 8708 8709 if (priv->status & STATUS_SCANNING) 8710 IPW_DEBUG_SCAN("Still scanning...\n"); 8711 else 8712 IPW_DEBUG_SCAN("Took %dms to abort current scan\n", 8713 1000 - i); 8714 8715 return 0; 8716 } 8717#endif /* CONFIG_IPW2200_MONITOR */ 8718 8719 /* Network configuration changed -- force [re]association */ 8720 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n"); 8721 if (!ipw_disassociate(priv)) 8722 ipw_associate(priv); 8723 8724 return 0; 8725} 8726 8727static int ipw_wx_set_freq(struct net_device *dev, 8728 struct iw_request_info *info, 8729 union iwreq_data *wrqu, char *extra) 8730{ 8731 struct ipw_priv *priv = libipw_priv(dev); 8732 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 8733 struct iw_freq *fwrq = &wrqu->freq; 8734 int ret = 0, i; 8735 u8 channel, flags; 8736 int band; 8737 8738 if (fwrq->m == 0) { 8739 IPW_DEBUG_WX("SET Freq/Channel -> any\n"); 8740 mutex_lock(&priv->mutex); 8741 ret = ipw_set_channel(priv, 0); 8742 mutex_unlock(&priv->mutex); 8743 return ret; 8744 } 8745 /* if setting by freq convert to channel */ 8746 if (fwrq->e == 1) { 8747 channel = libipw_freq_to_channel(priv->ieee, fwrq->m); 8748 if (channel == 0) 8749 return -EINVAL; 8750 } else 8751 channel = fwrq->m; 8752 8753 if (!(band = libipw_is_valid_channel(priv->ieee, channel))) 8754 return -EINVAL; 8755 8756 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 8757 i = libipw_channel_to_index(priv->ieee, channel); 8758 if (i == -1) 8759 return -EINVAL; 8760 8761 flags = (band == LIBIPW_24GHZ_BAND) ? 8762 geo->bg[i].flags : geo->a[i].flags; 8763 if (flags & LIBIPW_CH_PASSIVE_ONLY) { 8764 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n"); 8765 return -EINVAL; 8766 } 8767 } 8768 8769 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 8770 mutex_lock(&priv->mutex); 8771 ret = ipw_set_channel(priv, channel); 8772 mutex_unlock(&priv->mutex); 8773 return ret; 8774} 8775 8776static int ipw_wx_get_freq(struct net_device *dev, 8777 struct iw_request_info *info, 8778 union iwreq_data *wrqu, char *extra) 8779{ 8780 struct ipw_priv *priv = libipw_priv(dev); 8781 8782 wrqu->freq.e = 0; 8783 8784 /* If we are associated, trying to associate, or have a statically 8785 * configured CHANNEL then return that; otherwise return ANY */ 8786 mutex_lock(&priv->mutex); 8787 if (priv->config & CFG_STATIC_CHANNEL || 8788 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) { 8789 int i; 8790 8791 i = libipw_channel_to_index(priv->ieee, priv->channel); 8792 BUG_ON(i == -1); 8793 wrqu->freq.e = 1; 8794 8795 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 8796 case LIBIPW_52GHZ_BAND: 8797 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000; 8798 break; 8799 8800 case LIBIPW_24GHZ_BAND: 8801 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000; 8802 break; 8803 8804 default: 8805 BUG(); 8806 } 8807 } else 8808 wrqu->freq.m = 0; 8809 8810 mutex_unlock(&priv->mutex); 8811 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); 8812 return 0; 8813} 8814 8815static int ipw_wx_set_mode(struct net_device *dev, 8816 struct iw_request_info *info, 8817 union iwreq_data *wrqu, char *extra) 8818{ 8819 struct ipw_priv *priv = libipw_priv(dev); 8820 int err = 0; 8821 8822 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode); 8823 8824 switch (wrqu->mode) { 8825#ifdef CONFIG_IPW2200_MONITOR 8826 case IW_MODE_MONITOR: 8827#endif 8828 case IW_MODE_ADHOC: 8829 case IW_MODE_INFRA: 8830 break; 8831 case IW_MODE_AUTO: 8832 wrqu->mode = IW_MODE_INFRA; 8833 break; 8834 default: 8835 return -EINVAL; 8836 } 8837 if (wrqu->mode == priv->ieee->iw_mode) 8838 return 0; 8839 8840 mutex_lock(&priv->mutex); 8841 8842 ipw_sw_reset(priv, 0); 8843 8844#ifdef CONFIG_IPW2200_MONITOR 8845 if (priv->ieee->iw_mode == IW_MODE_MONITOR) 8846 priv->net_dev->type = ARPHRD_ETHER; 8847 8848 if (wrqu->mode == IW_MODE_MONITOR) 8849#ifdef CONFIG_IPW2200_RADIOTAP 8850 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 8851#else 8852 priv->net_dev->type = ARPHRD_IEEE80211; 8853#endif 8854#endif /* CONFIG_IPW2200_MONITOR */ 8855 8856 /* Free the existing firmware and reset the fw_loaded 8857 * flag so ipw_load() will bring in the new firmware */ 8858 free_firmware(); 8859 8860 priv->ieee->iw_mode = wrqu->mode; 8861 8862 queue_work(priv->workqueue, &priv->adapter_restart); 8863 mutex_unlock(&priv->mutex); 8864 return err; 8865} 8866 8867static int ipw_wx_get_mode(struct net_device *dev, 8868 struct iw_request_info *info, 8869 union iwreq_data *wrqu, char *extra) 8870{ 8871 struct ipw_priv *priv = libipw_priv(dev); 8872 mutex_lock(&priv->mutex); 8873 wrqu->mode = priv->ieee->iw_mode; 8874 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); 8875 mutex_unlock(&priv->mutex); 8876 return 0; 8877} 8878 8879/* Values are in microsecond */ 8880static const s32 timeout_duration[] = { 8881 350000, 8882 250000, 8883 75000, 8884 37000, 8885 25000, 8886}; 8887 8888static const s32 period_duration[] = { 8889 400000, 8890 700000, 8891 1000000, 8892 1000000, 8893 1000000 8894}; 8895 8896static int ipw_wx_get_range(struct net_device *dev, 8897 struct iw_request_info *info, 8898 union iwreq_data *wrqu, char *extra) 8899{ 8900 struct ipw_priv *priv = libipw_priv(dev); 8901 struct iw_range *range = (struct iw_range *)extra; 8902 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 8903 int i = 0, j; 8904 8905 wrqu->data.length = sizeof(*range); 8906 memset(range, 0, sizeof(*range)); 8907 8908 /* 54Mbs == ~27 Mb/s real (802.11g) */ 8909 range->throughput = 27 * 1000 * 1000; 8910 8911 range->max_qual.qual = 100; 8912 /* TODO: Find real max RSSI and stick here */ 8913 range->max_qual.level = 0; 8914 range->max_qual.noise = 0; 8915 range->max_qual.updated = 7; /* Updated all three */ 8916 8917 range->avg_qual.qual = 70; 8918 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */ 8919 range->avg_qual.level = 0; /* FIXME to real average level */ 8920 range->avg_qual.noise = 0; 8921 range->avg_qual.updated = 7; /* Updated all three */ 8922 mutex_lock(&priv->mutex); 8923 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); 8924 8925 for (i = 0; i < range->num_bitrates; i++) 8926 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) * 8927 500000; 8928 8929 range->max_rts = DEFAULT_RTS_THRESHOLD; 8930 range->min_frag = MIN_FRAG_THRESHOLD; 8931 range->max_frag = MAX_FRAG_THRESHOLD; 8932 8933 range->encoding_size[0] = 5; 8934 range->encoding_size[1] = 13; 8935 range->num_encoding_sizes = 2; 8936 range->max_encoding_tokens = WEP_KEYS; 8937 8938 /* Set the Wireless Extension versions */ 8939 range->we_version_compiled = WIRELESS_EXT; 8940 range->we_version_source = 18; 8941 8942 i = 0; 8943 if (priv->ieee->mode & (IEEE_B | IEEE_G)) { 8944 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) { 8945 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 8946 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY)) 8947 continue; 8948 8949 range->freq[i].i = geo->bg[j].channel; 8950 range->freq[i].m = geo->bg[j].freq * 100000; 8951 range->freq[i].e = 1; 8952 i++; 8953 } 8954 } 8955 8956 if (priv->ieee->mode & IEEE_A) { 8957 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) { 8958 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 8959 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY)) 8960 continue; 8961 8962 range->freq[i].i = geo->a[j].channel; 8963 range->freq[i].m = geo->a[j].freq * 100000; 8964 range->freq[i].e = 1; 8965 i++; 8966 } 8967 } 8968 8969 range->num_channels = i; 8970 range->num_frequency = i; 8971 8972 mutex_unlock(&priv->mutex); 8973 8974 /* Event capability (kernel + driver) */ 8975 range->event_capa[0] = (IW_EVENT_CAPA_K_0 | 8976 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | 8977 IW_EVENT_CAPA_MASK(SIOCGIWAP) | 8978 IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); 8979 range->event_capa[1] = IW_EVENT_CAPA_K_1; 8980 8981 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 8982 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 8983 8984 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE; 8985 8986 IPW_DEBUG_WX("GET Range\n"); 8987 return 0; 8988} 8989 8990static int ipw_wx_set_wap(struct net_device *dev, 8991 struct iw_request_info *info, 8992 union iwreq_data *wrqu, char *extra) 8993{ 8994 struct ipw_priv *priv = libipw_priv(dev); 8995 8996 static const unsigned char any[] = { 8997 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 8998 }; 8999 static const unsigned char off[] = { 9000 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 9001 }; 9002 9003 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 9004 return -EINVAL; 9005 mutex_lock(&priv->mutex); 9006 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || 9007 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { 9008 /* we disable mandatory BSSID association */ 9009 IPW_DEBUG_WX("Setting AP BSSID to ANY\n"); 9010 priv->config &= ~CFG_STATIC_BSSID; 9011 IPW_DEBUG_ASSOC("Attempting to associate with new " 9012 "parameters.\n"); 9013 ipw_associate(priv); 9014 mutex_unlock(&priv->mutex); 9015 return 0; 9016 } 9017 9018 priv->config |= CFG_STATIC_BSSID; 9019 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { 9020 IPW_DEBUG_WX("BSSID set to current BSSID.\n"); 9021 mutex_unlock(&priv->mutex); 9022 return 0; 9023 } 9024 9025 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n", 9026 wrqu->ap_addr.sa_data); 9027 9028 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); 9029 9030 /* Network configuration changed -- force [re]association */ 9031 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n"); 9032 if (!ipw_disassociate(priv)) 9033 ipw_associate(priv); 9034 9035 mutex_unlock(&priv->mutex); 9036 return 0; 9037} 9038 9039static int ipw_wx_get_wap(struct net_device *dev, 9040 struct iw_request_info *info, 9041 union iwreq_data *wrqu, char *extra) 9042{ 9043 struct ipw_priv *priv = libipw_priv(dev); 9044 9045 /* If we are associated, trying to associate, or have a statically 9046 * configured BSSID then return that; otherwise return ANY */ 9047 mutex_lock(&priv->mutex); 9048 if (priv->config & CFG_STATIC_BSSID || 9049 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 9050 wrqu->ap_addr.sa_family = ARPHRD_ETHER; 9051 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); 9052 } else 9053 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); 9054 9055 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", 9056 wrqu->ap_addr.sa_data); 9057 mutex_unlock(&priv->mutex); 9058 return 0; 9059} 9060 9061static int ipw_wx_set_essid(struct net_device *dev, 9062 struct iw_request_info *info, 9063 union iwreq_data *wrqu, char *extra) 9064{ 9065 struct ipw_priv *priv = libipw_priv(dev); 9066 int length; 9067 DECLARE_SSID_BUF(ssid); 9068 9069 mutex_lock(&priv->mutex); 9070 9071 if (!wrqu->essid.flags) 9072 { 9073 IPW_DEBUG_WX("Setting ESSID to ANY\n"); 9074 ipw_disassociate(priv); 9075 priv->config &= ~CFG_STATIC_ESSID; 9076 ipw_associate(priv); 9077 mutex_unlock(&priv->mutex); 9078 return 0; 9079 } 9080 9081 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE); 9082 9083 priv->config |= CFG_STATIC_ESSID; 9084 9085 if (priv->essid_len == length && !memcmp(priv->essid, extra, length) 9086 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) { 9087 IPW_DEBUG_WX("ESSID set to current ESSID.\n"); 9088 mutex_unlock(&priv->mutex); 9089 return 0; 9090 } 9091 9092 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", 9093 print_ssid(ssid, extra, length), length); 9094 9095 priv->essid_len = length; 9096 memcpy(priv->essid, extra, priv->essid_len); 9097 9098 /* Network configuration changed -- force [re]association */ 9099 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n"); 9100 if (!ipw_disassociate(priv)) 9101 ipw_associate(priv); 9102 9103 mutex_unlock(&priv->mutex); 9104 return 0; 9105} 9106 9107static int ipw_wx_get_essid(struct net_device *dev, 9108 struct iw_request_info *info, 9109 union iwreq_data *wrqu, char *extra) 9110{ 9111 struct ipw_priv *priv = libipw_priv(dev); 9112 DECLARE_SSID_BUF(ssid); 9113 9114 /* If we are associated, trying to associate, or have a statically 9115 * configured ESSID then return that; otherwise return ANY */ 9116 mutex_lock(&priv->mutex); 9117 if (priv->config & CFG_STATIC_ESSID || 9118 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 9119 IPW_DEBUG_WX("Getting essid: '%s'\n", 9120 print_ssid(ssid, priv->essid, priv->essid_len)); 9121 memcpy(extra, priv->essid, priv->essid_len); 9122 wrqu->essid.length = priv->essid_len; 9123 wrqu->essid.flags = 1; /* active */ 9124 } else { 9125 IPW_DEBUG_WX("Getting essid: ANY\n"); 9126 wrqu->essid.length = 0; 9127 wrqu->essid.flags = 0; /* active */ 9128 } 9129 mutex_unlock(&priv->mutex); 9130 return 0; 9131} 9132 9133static int ipw_wx_set_nick(struct net_device *dev, 9134 struct iw_request_info *info, 9135 union iwreq_data *wrqu, char *extra) 9136{ 9137 struct ipw_priv *priv = libipw_priv(dev); 9138 9139 IPW_DEBUG_WX("Setting nick to '%s'\n", extra); 9140 if (wrqu->data.length > IW_ESSID_MAX_SIZE) 9141 return -E2BIG; 9142 mutex_lock(&priv->mutex); 9143 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); 9144 memset(priv->nick, 0, sizeof(priv->nick)); 9145 memcpy(priv->nick, extra, wrqu->data.length); 9146 IPW_DEBUG_TRACE("<<\n"); 9147 mutex_unlock(&priv->mutex); 9148 return 0; 9149 9150} 9151 9152static int ipw_wx_get_nick(struct net_device *dev, 9153 struct iw_request_info *info, 9154 union iwreq_data *wrqu, char *extra) 9155{ 9156 struct ipw_priv *priv = libipw_priv(dev); 9157 IPW_DEBUG_WX("Getting nick\n"); 9158 mutex_lock(&priv->mutex); 9159 wrqu->data.length = strlen(priv->nick); 9160 memcpy(extra, priv->nick, wrqu->data.length); 9161 wrqu->data.flags = 1; /* active */ 9162 mutex_unlock(&priv->mutex); 9163 return 0; 9164} 9165 9166static int ipw_wx_set_sens(struct net_device *dev, 9167 struct iw_request_info *info, 9168 union iwreq_data *wrqu, char *extra) 9169{ 9170 struct ipw_priv *priv = libipw_priv(dev); 9171 int err = 0; 9172 9173 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value); 9174 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value); 9175 mutex_lock(&priv->mutex); 9176 9177 if (wrqu->sens.fixed == 0) 9178 { 9179 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; 9180 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; 9181 goto out; 9182 } 9183 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) || 9184 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) { 9185 err = -EINVAL; 9186 goto out; 9187 } 9188 9189 priv->roaming_threshold = wrqu->sens.value; 9190 priv->disassociate_threshold = 3*wrqu->sens.value; 9191 out: 9192 mutex_unlock(&priv->mutex); 9193 return err; 9194} 9195 9196static int ipw_wx_get_sens(struct net_device *dev, 9197 struct iw_request_info *info, 9198 union iwreq_data *wrqu, char *extra) 9199{ 9200 struct ipw_priv *priv = libipw_priv(dev); 9201 mutex_lock(&priv->mutex); 9202 wrqu->sens.fixed = 1; 9203 wrqu->sens.value = priv->roaming_threshold; 9204 mutex_unlock(&priv->mutex); 9205 9206 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n", 9207 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9208 9209 return 0; 9210} 9211 9212static int ipw_wx_set_rate(struct net_device *dev, 9213 struct iw_request_info *info, 9214 union iwreq_data *wrqu, char *extra) 9215{ 9216 /* TODO: We should use semaphores or locks for access to priv */ 9217 struct ipw_priv *priv = libipw_priv(dev); 9218 u32 target_rate = wrqu->bitrate.value; 9219 u32 fixed, mask; 9220 9221 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */ 9222 /* value = X, fixed = 1 means only rate X */ 9223 /* value = X, fixed = 0 means all rates lower equal X */ 9224 9225 if (target_rate == -1) { 9226 fixed = 0; 9227 mask = LIBIPW_DEFAULT_RATES_MASK; 9228 /* Now we should reassociate */ 9229 goto apply; 9230 } 9231 9232 mask = 0; 9233 fixed = wrqu->bitrate.fixed; 9234 9235 if (target_rate == 1000000 || !fixed) 9236 mask |= LIBIPW_CCK_RATE_1MB_MASK; 9237 if (target_rate == 1000000) 9238 goto apply; 9239 9240 if (target_rate == 2000000 || !fixed) 9241 mask |= LIBIPW_CCK_RATE_2MB_MASK; 9242 if (target_rate == 2000000) 9243 goto apply; 9244 9245 if (target_rate == 5500000 || !fixed) 9246 mask |= LIBIPW_CCK_RATE_5MB_MASK; 9247 if (target_rate == 5500000) 9248 goto apply; 9249 9250 if (target_rate == 6000000 || !fixed) 9251 mask |= LIBIPW_OFDM_RATE_6MB_MASK; 9252 if (target_rate == 6000000) 9253 goto apply; 9254 9255 if (target_rate == 9000000 || !fixed) 9256 mask |= LIBIPW_OFDM_RATE_9MB_MASK; 9257 if (target_rate == 9000000) 9258 goto apply; 9259 9260 if (target_rate == 11000000 || !fixed) 9261 mask |= LIBIPW_CCK_RATE_11MB_MASK; 9262 if (target_rate == 11000000) 9263 goto apply; 9264 9265 if (target_rate == 12000000 || !fixed) 9266 mask |= LIBIPW_OFDM_RATE_12MB_MASK; 9267 if (target_rate == 12000000) 9268 goto apply; 9269 9270 if (target_rate == 18000000 || !fixed) 9271 mask |= LIBIPW_OFDM_RATE_18MB_MASK; 9272 if (target_rate == 18000000) 9273 goto apply; 9274 9275 if (target_rate == 24000000 || !fixed) 9276 mask |= LIBIPW_OFDM_RATE_24MB_MASK; 9277 if (target_rate == 24000000) 9278 goto apply; 9279 9280 if (target_rate == 36000000 || !fixed) 9281 mask |= LIBIPW_OFDM_RATE_36MB_MASK; 9282 if (target_rate == 36000000) 9283 goto apply; 9284 9285 if (target_rate == 48000000 || !fixed) 9286 mask |= LIBIPW_OFDM_RATE_48MB_MASK; 9287 if (target_rate == 48000000) 9288 goto apply; 9289 9290 if (target_rate == 54000000 || !fixed) 9291 mask |= LIBIPW_OFDM_RATE_54MB_MASK; 9292 if (target_rate == 54000000) 9293 goto apply; 9294 9295 IPW_DEBUG_WX("invalid rate specified, returning error\n"); 9296 return -EINVAL; 9297 9298 apply: 9299 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", 9300 mask, fixed ? "fixed" : "sub-rates"); 9301 mutex_lock(&priv->mutex); 9302 if (mask == LIBIPW_DEFAULT_RATES_MASK) { 9303 priv->config &= ~CFG_FIXED_RATE; 9304 ipw_set_fixed_rate(priv, priv->ieee->mode); 9305 } else 9306 priv->config |= CFG_FIXED_RATE; 9307 9308 if (priv->rates_mask == mask) { 9309 IPW_DEBUG_WX("Mask set to current mask.\n"); 9310 mutex_unlock(&priv->mutex); 9311 return 0; 9312 } 9313 9314 priv->rates_mask = mask; 9315 9316 /* Network configuration changed -- force [re]association */ 9317 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n"); 9318 if (!ipw_disassociate(priv)) 9319 ipw_associate(priv); 9320 9321 mutex_unlock(&priv->mutex); 9322 return 0; 9323} 9324 9325static int ipw_wx_get_rate(struct net_device *dev, 9326 struct iw_request_info *info, 9327 union iwreq_data *wrqu, char *extra) 9328{ 9329 struct ipw_priv *priv = libipw_priv(dev); 9330 mutex_lock(&priv->mutex); 9331 wrqu->bitrate.value = priv->last_rate; 9332 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0; 9333 mutex_unlock(&priv->mutex); 9334 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 9335 return 0; 9336} 9337 9338static int ipw_wx_set_rts(struct net_device *dev, 9339 struct iw_request_info *info, 9340 union iwreq_data *wrqu, char *extra) 9341{ 9342 struct ipw_priv *priv = libipw_priv(dev); 9343 mutex_lock(&priv->mutex); 9344 if (wrqu->rts.disabled || !wrqu->rts.fixed) 9345 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 9346 else { 9347 if (wrqu->rts.value < MIN_RTS_THRESHOLD || 9348 wrqu->rts.value > MAX_RTS_THRESHOLD) { 9349 mutex_unlock(&priv->mutex); 9350 return -EINVAL; 9351 } 9352 priv->rts_threshold = wrqu->rts.value; 9353 } 9354 9355 ipw_send_rts_threshold(priv, priv->rts_threshold); 9356 mutex_unlock(&priv->mutex); 9357 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); 9358 return 0; 9359} 9360 9361static int ipw_wx_get_rts(struct net_device *dev, 9362 struct iw_request_info *info, 9363 union iwreq_data *wrqu, char *extra) 9364{ 9365 struct ipw_priv *priv = libipw_priv(dev); 9366 mutex_lock(&priv->mutex); 9367 wrqu->rts.value = priv->rts_threshold; 9368 wrqu->rts.fixed = 0; /* no auto select */ 9369 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); 9370 mutex_unlock(&priv->mutex); 9371 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); 9372 return 0; 9373} 9374 9375static int ipw_wx_set_txpow(struct net_device *dev, 9376 struct iw_request_info *info, 9377 union iwreq_data *wrqu, char *extra) 9378{ 9379 struct ipw_priv *priv = libipw_priv(dev); 9380 int err = 0; 9381 9382 mutex_lock(&priv->mutex); 9383 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { 9384 err = -EINPROGRESS; 9385 goto out; 9386 } 9387 9388 if (!wrqu->power.fixed) 9389 wrqu->power.value = IPW_TX_POWER_DEFAULT; 9390 9391 if (wrqu->power.flags != IW_TXPOW_DBM) { 9392 err = -EINVAL; 9393 goto out; 9394 } 9395 9396 if ((wrqu->power.value > IPW_TX_POWER_MAX) || 9397 (wrqu->power.value < IPW_TX_POWER_MIN)) { 9398 err = -EINVAL; 9399 goto out; 9400 } 9401 9402 priv->tx_power = wrqu->power.value; 9403 err = ipw_set_tx_power(priv); 9404 out: 9405 mutex_unlock(&priv->mutex); 9406 return err; 9407} 9408 9409static int ipw_wx_get_txpow(struct net_device *dev, 9410 struct iw_request_info *info, 9411 union iwreq_data *wrqu, char *extra) 9412{ 9413 struct ipw_priv *priv = libipw_priv(dev); 9414 mutex_lock(&priv->mutex); 9415 wrqu->power.value = priv->tx_power; 9416 wrqu->power.fixed = 1; 9417 wrqu->power.flags = IW_TXPOW_DBM; 9418 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; 9419 mutex_unlock(&priv->mutex); 9420 9421 IPW_DEBUG_WX("GET TX Power -> %s %d \n", 9422 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9423 9424 return 0; 9425} 9426 9427static int ipw_wx_set_frag(struct net_device *dev, 9428 struct iw_request_info *info, 9429 union iwreq_data *wrqu, char *extra) 9430{ 9431 struct ipw_priv *priv = libipw_priv(dev); 9432 mutex_lock(&priv->mutex); 9433 if (wrqu->frag.disabled || !wrqu->frag.fixed) 9434 priv->ieee->fts = DEFAULT_FTS; 9435 else { 9436 if (wrqu->frag.value < MIN_FRAG_THRESHOLD || 9437 wrqu->frag.value > MAX_FRAG_THRESHOLD) { 9438 mutex_unlock(&priv->mutex); 9439 return -EINVAL; 9440 } 9441 9442 priv->ieee->fts = wrqu->frag.value & ~0x1; 9443 } 9444 9445 ipw_send_frag_threshold(priv, wrqu->frag.value); 9446 mutex_unlock(&priv->mutex); 9447 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); 9448 return 0; 9449} 9450 9451static int ipw_wx_get_frag(struct net_device *dev, 9452 struct iw_request_info *info, 9453 union iwreq_data *wrqu, char *extra) 9454{ 9455 struct ipw_priv *priv = libipw_priv(dev); 9456 mutex_lock(&priv->mutex); 9457 wrqu->frag.value = priv->ieee->fts; 9458 wrqu->frag.fixed = 0; /* no auto select */ 9459 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); 9460 mutex_unlock(&priv->mutex); 9461 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); 9462 9463 return 0; 9464} 9465 9466static int ipw_wx_set_retry(struct net_device *dev, 9467 struct iw_request_info *info, 9468 union iwreq_data *wrqu, char *extra) 9469{ 9470 struct ipw_priv *priv = libipw_priv(dev); 9471 9472 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) 9473 return -EINVAL; 9474 9475 if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) 9476 return 0; 9477 9478 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255) 9479 return -EINVAL; 9480 9481 mutex_lock(&priv->mutex); 9482 if (wrqu->retry.flags & IW_RETRY_SHORT) 9483 priv->short_retry_limit = (u8) wrqu->retry.value; 9484 else if (wrqu->retry.flags & IW_RETRY_LONG) 9485 priv->long_retry_limit = (u8) wrqu->retry.value; 9486 else { 9487 priv->short_retry_limit = (u8) wrqu->retry.value; 9488 priv->long_retry_limit = (u8) wrqu->retry.value; 9489 } 9490 9491 ipw_send_retry_limit(priv, priv->short_retry_limit, 9492 priv->long_retry_limit); 9493 mutex_unlock(&priv->mutex); 9494 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", 9495 priv->short_retry_limit, priv->long_retry_limit); 9496 return 0; 9497} 9498 9499static int ipw_wx_get_retry(struct net_device *dev, 9500 struct iw_request_info *info, 9501 union iwreq_data *wrqu, char *extra) 9502{ 9503 struct ipw_priv *priv = libipw_priv(dev); 9504 9505 mutex_lock(&priv->mutex); 9506 wrqu->retry.disabled = 0; 9507 9508 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { 9509 mutex_unlock(&priv->mutex); 9510 return -EINVAL; 9511 } 9512 9513 if (wrqu->retry.flags & IW_RETRY_LONG) { 9514 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG; 9515 wrqu->retry.value = priv->long_retry_limit; 9516 } else if (wrqu->retry.flags & IW_RETRY_SHORT) { 9517 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; 9518 wrqu->retry.value = priv->short_retry_limit; 9519 } else { 9520 wrqu->retry.flags = IW_RETRY_LIMIT; 9521 wrqu->retry.value = priv->short_retry_limit; 9522 } 9523 mutex_unlock(&priv->mutex); 9524 9525 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); 9526 9527 return 0; 9528} 9529 9530static int ipw_wx_set_scan(struct net_device *dev, 9531 struct iw_request_info *info, 9532 union iwreq_data *wrqu, char *extra) 9533{ 9534 struct ipw_priv *priv = libipw_priv(dev); 9535 struct iw_scan_req *req = (struct iw_scan_req *)extra; 9536 struct delayed_work *work = NULL; 9537 9538 mutex_lock(&priv->mutex); 9539 9540 priv->user_requested_scan = 1; 9541 9542 if (wrqu->data.length == sizeof(struct iw_scan_req)) { 9543 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 9544 int len = min((int)req->essid_len, 9545 (int)sizeof(priv->direct_scan_ssid)); 9546 memcpy(priv->direct_scan_ssid, req->essid, len); 9547 priv->direct_scan_ssid_len = len; 9548 work = &priv->request_direct_scan; 9549 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { 9550 work = &priv->request_passive_scan; 9551 } 9552 } else { 9553 /* Normal active broadcast scan */ 9554 work = &priv->request_scan; 9555 } 9556 9557 mutex_unlock(&priv->mutex); 9558 9559 IPW_DEBUG_WX("Start scan\n"); 9560 9561 queue_delayed_work(priv->workqueue, work, 0); 9562 9563 return 0; 9564} 9565 9566static int ipw_wx_get_scan(struct net_device *dev, 9567 struct iw_request_info *info, 9568 union iwreq_data *wrqu, char *extra) 9569{ 9570 struct ipw_priv *priv = libipw_priv(dev); 9571 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra); 9572} 9573 9574static int ipw_wx_set_encode(struct net_device *dev, 9575 struct iw_request_info *info, 9576 union iwreq_data *wrqu, char *key) 9577{ 9578 struct ipw_priv *priv = libipw_priv(dev); 9579 int ret; 9580 u32 cap = priv->capability; 9581 9582 mutex_lock(&priv->mutex); 9583 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key); 9584 9585 /* In IBSS mode, we need to notify the firmware to update 9586 * the beacon info after we changed the capability. */ 9587 if (cap != priv->capability && 9588 priv->ieee->iw_mode == IW_MODE_ADHOC && 9589 priv->status & STATUS_ASSOCIATED) 9590 ipw_disassociate(priv); 9591 9592 mutex_unlock(&priv->mutex); 9593 return ret; 9594} 9595 9596static int ipw_wx_get_encode(struct net_device *dev, 9597 struct iw_request_info *info, 9598 union iwreq_data *wrqu, char *key) 9599{ 9600 struct ipw_priv *priv = libipw_priv(dev); 9601 return libipw_wx_get_encode(priv->ieee, info, wrqu, key); 9602} 9603 9604static int ipw_wx_set_power(struct net_device *dev, 9605 struct iw_request_info *info, 9606 union iwreq_data *wrqu, char *extra) 9607{ 9608 struct ipw_priv *priv = libipw_priv(dev); 9609 int err; 9610 mutex_lock(&priv->mutex); 9611 if (wrqu->power.disabled) { 9612 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); 9613 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); 9614 if (err) { 9615 IPW_DEBUG_WX("failed setting power mode.\n"); 9616 mutex_unlock(&priv->mutex); 9617 return err; 9618 } 9619 IPW_DEBUG_WX("SET Power Management Mode -> off\n"); 9620 mutex_unlock(&priv->mutex); 9621 return 0; 9622 } 9623 9624 switch (wrqu->power.flags & IW_POWER_MODE) { 9625 case IW_POWER_ON: /* If not specified */ 9626 case IW_POWER_MODE: /* If set all mask */ 9627 case IW_POWER_ALL_R: /* If explicitly state all */ 9628 break; 9629 default: /* Otherwise we don't support it */ 9630 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", 9631 wrqu->power.flags); 9632 mutex_unlock(&priv->mutex); 9633 return -EOPNOTSUPP; 9634 } 9635 9636 /* If the user hasn't specified a power management mode yet, default 9637 * to BATTERY */ 9638 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC) 9639 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; 9640 else 9641 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; 9642 9643 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); 9644 if (err) { 9645 IPW_DEBUG_WX("failed setting power mode.\n"); 9646 mutex_unlock(&priv->mutex); 9647 return err; 9648 } 9649 9650 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); 9651 mutex_unlock(&priv->mutex); 9652 return 0; 9653} 9654 9655static int ipw_wx_get_power(struct net_device *dev, 9656 struct iw_request_info *info, 9657 union iwreq_data *wrqu, char *extra) 9658{ 9659 struct ipw_priv *priv = libipw_priv(dev); 9660 mutex_lock(&priv->mutex); 9661 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9662 wrqu->power.disabled = 1; 9663 else 9664 wrqu->power.disabled = 0; 9665 9666 mutex_unlock(&priv->mutex); 9667 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); 9668 9669 return 0; 9670} 9671 9672static int ipw_wx_set_powermode(struct net_device *dev, 9673 struct iw_request_info *info, 9674 union iwreq_data *wrqu, char *extra) 9675{ 9676 struct ipw_priv *priv = libipw_priv(dev); 9677 int mode = *(int *)extra; 9678 int err; 9679 9680 mutex_lock(&priv->mutex); 9681 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) 9682 mode = IPW_POWER_AC; 9683 9684 if (IPW_POWER_LEVEL(priv->power_mode) != mode) { 9685 err = ipw_send_power_mode(priv, mode); 9686 if (err) { 9687 IPW_DEBUG_WX("failed setting power mode.\n"); 9688 mutex_unlock(&priv->mutex); 9689 return err; 9690 } 9691 priv->power_mode = IPW_POWER_ENABLED | mode; 9692 } 9693 mutex_unlock(&priv->mutex); 9694 return 0; 9695} 9696 9697#define MAX_WX_STRING 80 9698static int ipw_wx_get_powermode(struct net_device *dev, 9699 struct iw_request_info *info, 9700 union iwreq_data *wrqu, char *extra) 9701{ 9702 struct ipw_priv *priv = libipw_priv(dev); 9703 int level = IPW_POWER_LEVEL(priv->power_mode); 9704 char *p = extra; 9705 9706 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level); 9707 9708 switch (level) { 9709 case IPW_POWER_AC: 9710 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)"); 9711 break; 9712 case IPW_POWER_BATTERY: 9713 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)"); 9714 break; 9715 default: 9716 p += snprintf(p, MAX_WX_STRING - (p - extra), 9717 "(Timeout %dms, Period %dms)", 9718 timeout_duration[level - 1] / 1000, 9719 period_duration[level - 1] / 1000); 9720 } 9721 9722 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9723 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF"); 9724 9725 wrqu->data.length = p - extra + 1; 9726 9727 return 0; 9728} 9729 9730static int ipw_wx_set_wireless_mode(struct net_device *dev, 9731 struct iw_request_info *info, 9732 union iwreq_data *wrqu, char *extra) 9733{ 9734 struct ipw_priv *priv = libipw_priv(dev); 9735 int mode = *(int *)extra; 9736 u8 band = 0, modulation = 0; 9737 9738 if (mode == 0 || mode & ~IEEE_MODE_MASK) { 9739 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); 9740 return -EINVAL; 9741 } 9742 mutex_lock(&priv->mutex); 9743 if (priv->adapter == IPW_2915ABG) { 9744 priv->ieee->abg_true = 1; 9745 if (mode & IEEE_A) { 9746 band |= LIBIPW_52GHZ_BAND; 9747 modulation |= LIBIPW_OFDM_MODULATION; 9748 } else 9749 priv->ieee->abg_true = 0; 9750 } else { 9751 if (mode & IEEE_A) { 9752 IPW_WARNING("Attempt to set 2200BG into " 9753 "802.11a mode\n"); 9754 mutex_unlock(&priv->mutex); 9755 return -EINVAL; 9756 } 9757 9758 priv->ieee->abg_true = 0; 9759 } 9760 9761 if (mode & IEEE_B) { 9762 band |= LIBIPW_24GHZ_BAND; 9763 modulation |= LIBIPW_CCK_MODULATION; 9764 } else 9765 priv->ieee->abg_true = 0; 9766 9767 if (mode & IEEE_G) { 9768 band |= LIBIPW_24GHZ_BAND; 9769 modulation |= LIBIPW_OFDM_MODULATION; 9770 } else 9771 priv->ieee->abg_true = 0; 9772 9773 priv->ieee->mode = mode; 9774 priv->ieee->freq_band = band; 9775 priv->ieee->modulation = modulation; 9776 init_supported_rates(priv, &priv->rates); 9777 9778 /* Network configuration changed -- force [re]association */ 9779 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n"); 9780 if (!ipw_disassociate(priv)) { 9781 ipw_send_supported_rates(priv, &priv->rates); 9782 ipw_associate(priv); 9783 } 9784 9785 /* Update the band LEDs */ 9786 ipw_led_band_on(priv); 9787 9788 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", 9789 mode & IEEE_A ? 'a' : '.', 9790 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); 9791 mutex_unlock(&priv->mutex); 9792 return 0; 9793} 9794 9795static int ipw_wx_get_wireless_mode(struct net_device *dev, 9796 struct iw_request_info *info, 9797 union iwreq_data *wrqu, char *extra) 9798{ 9799 struct ipw_priv *priv = libipw_priv(dev); 9800 mutex_lock(&priv->mutex); 9801 switch (priv->ieee->mode) { 9802 case IEEE_A: 9803 strncpy(extra, "802.11a (1)", MAX_WX_STRING); 9804 break; 9805 case IEEE_B: 9806 strncpy(extra, "802.11b (2)", MAX_WX_STRING); 9807 break; 9808 case IEEE_A | IEEE_B: 9809 strncpy(extra, "802.11ab (3)", MAX_WX_STRING); 9810 break; 9811 case IEEE_G: 9812 strncpy(extra, "802.11g (4)", MAX_WX_STRING); 9813 break; 9814 case IEEE_A | IEEE_G: 9815 strncpy(extra, "802.11ag (5)", MAX_WX_STRING); 9816 break; 9817 case IEEE_B | IEEE_G: 9818 strncpy(extra, "802.11bg (6)", MAX_WX_STRING); 9819 break; 9820 case IEEE_A | IEEE_B | IEEE_G: 9821 strncpy(extra, "802.11abg (7)", MAX_WX_STRING); 9822 break; 9823 default: 9824 strncpy(extra, "unknown", MAX_WX_STRING); 9825 break; 9826 } 9827 9828 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); 9829 9830 wrqu->data.length = strlen(extra) + 1; 9831 mutex_unlock(&priv->mutex); 9832 9833 return 0; 9834} 9835 9836static int ipw_wx_set_preamble(struct net_device *dev, 9837 struct iw_request_info *info, 9838 union iwreq_data *wrqu, char *extra) 9839{ 9840 struct ipw_priv *priv = libipw_priv(dev); 9841 int mode = *(int *)extra; 9842 mutex_lock(&priv->mutex); 9843 /* Switching from SHORT -> LONG requires a disassociation */ 9844 if (mode == 1) { 9845 if (!(priv->config & CFG_PREAMBLE_LONG)) { 9846 priv->config |= CFG_PREAMBLE_LONG; 9847 9848 /* Network configuration changed -- force [re]association */ 9849 IPW_DEBUG_ASSOC 9850 ("[re]association triggered due to preamble change.\n"); 9851 if (!ipw_disassociate(priv)) 9852 ipw_associate(priv); 9853 } 9854 goto done; 9855 } 9856 9857 if (mode == 0) { 9858 priv->config &= ~CFG_PREAMBLE_LONG; 9859 goto done; 9860 } 9861 mutex_unlock(&priv->mutex); 9862 return -EINVAL; 9863 9864 done: 9865 mutex_unlock(&priv->mutex); 9866 return 0; 9867} 9868 9869static int ipw_wx_get_preamble(struct net_device *dev, 9870 struct iw_request_info *info, 9871 union iwreq_data *wrqu, char *extra) 9872{ 9873 struct ipw_priv *priv = libipw_priv(dev); 9874 mutex_lock(&priv->mutex); 9875 if (priv->config & CFG_PREAMBLE_LONG) 9876 snprintf(wrqu->name, IFNAMSIZ, "long (1)"); 9877 else 9878 snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); 9879 mutex_unlock(&priv->mutex); 9880 return 0; 9881} 9882 9883#ifdef CONFIG_IPW2200_MONITOR 9884static int ipw_wx_set_monitor(struct net_device *dev, 9885 struct iw_request_info *info, 9886 union iwreq_data *wrqu, char *extra) 9887{ 9888 struct ipw_priv *priv = libipw_priv(dev); 9889 int *parms = (int *)extra; 9890 int enable = (parms[0] > 0); 9891 mutex_lock(&priv->mutex); 9892 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); 9893 if (enable) { 9894 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9895#ifdef CONFIG_IPW2200_RADIOTAP 9896 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 9897#else 9898 priv->net_dev->type = ARPHRD_IEEE80211; 9899#endif 9900 queue_work(priv->workqueue, &priv->adapter_restart); 9901 } 9902 9903 ipw_set_channel(priv, parms[1]); 9904 } else { 9905 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9906 mutex_unlock(&priv->mutex); 9907 return 0; 9908 } 9909 priv->net_dev->type = ARPHRD_ETHER; 9910 queue_work(priv->workqueue, &priv->adapter_restart); 9911 } 9912 mutex_unlock(&priv->mutex); 9913 return 0; 9914} 9915 9916#endif /* CONFIG_IPW2200_MONITOR */ 9917 9918static int ipw_wx_reset(struct net_device *dev, 9919 struct iw_request_info *info, 9920 union iwreq_data *wrqu, char *extra) 9921{ 9922 struct ipw_priv *priv = libipw_priv(dev); 9923 IPW_DEBUG_WX("RESET\n"); 9924 queue_work(priv->workqueue, &priv->adapter_restart); 9925 return 0; 9926} 9927 9928static int ipw_wx_sw_reset(struct net_device *dev, 9929 struct iw_request_info *info, 9930 union iwreq_data *wrqu, char *extra) 9931{ 9932 struct ipw_priv *priv = libipw_priv(dev); 9933 union iwreq_data wrqu_sec = { 9934 .encoding = { 9935 .flags = IW_ENCODE_DISABLED, 9936 }, 9937 }; 9938 int ret; 9939 9940 IPW_DEBUG_WX("SW_RESET\n"); 9941 9942 mutex_lock(&priv->mutex); 9943 9944 ret = ipw_sw_reset(priv, 2); 9945 if (!ret) { 9946 free_firmware(); 9947 ipw_adapter_restart(priv); 9948 } 9949 9950 /* The SW reset bit might have been toggled on by the 'disable' 9951 * module parameter, so take appropriate action */ 9952 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); 9953 9954 mutex_unlock(&priv->mutex); 9955 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); 9956 mutex_lock(&priv->mutex); 9957 9958 if (!(priv->status & STATUS_RF_KILL_MASK)) { 9959 /* Configuration likely changed -- force [re]association */ 9960 IPW_DEBUG_ASSOC("[re]association triggered due to sw " 9961 "reset.\n"); 9962 if (!ipw_disassociate(priv)) 9963 ipw_associate(priv); 9964 } 9965 9966 mutex_unlock(&priv->mutex); 9967 9968 return 0; 9969} 9970 9971/* Rebase the WE IOCTLs to zero for the handler array */ 9972#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] 9973static iw_handler ipw_wx_handlers[] = { 9974 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name, 9975 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 9976 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 9977 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 9978 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, 9979 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens, 9980 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens, 9981 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, 9982 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, 9983 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, 9984 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan, 9985 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan, 9986 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid, 9987 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid, 9988 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick, 9989 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick, 9990 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate, 9991 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate, 9992 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts, 9993 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts, 9994 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag, 9995 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag, 9996 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow, 9997 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow, 9998 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry, 9999 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry, 10000 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode, 10001 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode, 10002 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power, 10003 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power, 10004 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy, 10005 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy, 10006 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy, 10007 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy, 10008 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie, 10009 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie, 10010 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme, 10011 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth, 10012 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth, 10013 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext, 10014 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext, 10015}; 10016 10017enum { 10018 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV, 10019 IPW_PRIV_GET_POWER, 10020 IPW_PRIV_SET_MODE, 10021 IPW_PRIV_GET_MODE, 10022 IPW_PRIV_SET_PREAMBLE, 10023 IPW_PRIV_GET_PREAMBLE, 10024 IPW_PRIV_RESET, 10025 IPW_PRIV_SW_RESET, 10026#ifdef CONFIG_IPW2200_MONITOR 10027 IPW_PRIV_SET_MONITOR, 10028#endif 10029}; 10030 10031static struct iw_priv_args ipw_priv_args[] = { 10032 { 10033 .cmd = IPW_PRIV_SET_POWER, 10034 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 10035 .name = "set_power"}, 10036 { 10037 .cmd = IPW_PRIV_GET_POWER, 10038 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 10039 .name = "get_power"}, 10040 { 10041 .cmd = IPW_PRIV_SET_MODE, 10042 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 10043 .name = "set_mode"}, 10044 { 10045 .cmd = IPW_PRIV_GET_MODE, 10046 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 10047 .name = "get_mode"}, 10048 { 10049 .cmd = IPW_PRIV_SET_PREAMBLE, 10050 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 10051 .name = "set_preamble"}, 10052 { 10053 .cmd = IPW_PRIV_GET_PREAMBLE, 10054 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, 10055 .name = "get_preamble"}, 10056 { 10057 IPW_PRIV_RESET, 10058 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"}, 10059 { 10060 IPW_PRIV_SW_RESET, 10061 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"}, 10062#ifdef CONFIG_IPW2200_MONITOR 10063 { 10064 IPW_PRIV_SET_MONITOR, 10065 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"}, 10066#endif /* CONFIG_IPW2200_MONITOR */ 10067}; 10068 10069static iw_handler ipw_priv_handler[] = { 10070 ipw_wx_set_powermode, 10071 ipw_wx_get_powermode, 10072 ipw_wx_set_wireless_mode, 10073 ipw_wx_get_wireless_mode, 10074 ipw_wx_set_preamble, 10075 ipw_wx_get_preamble, 10076 ipw_wx_reset, 10077 ipw_wx_sw_reset, 10078#ifdef CONFIG_IPW2200_MONITOR 10079 ipw_wx_set_monitor, 10080#endif 10081}; 10082 10083static struct iw_handler_def ipw_wx_handler_def = { 10084 .standard = ipw_wx_handlers, 10085 .num_standard = ARRAY_SIZE(ipw_wx_handlers), 10086 .num_private = ARRAY_SIZE(ipw_priv_handler), 10087 .num_private_args = ARRAY_SIZE(ipw_priv_args), 10088 .private = ipw_priv_handler, 10089 .private_args = ipw_priv_args, 10090 .get_wireless_stats = ipw_get_wireless_stats, 10091}; 10092 10093/* 10094 * Get wireless statistics. 10095 * Called by /proc/net/wireless 10096 * Also called by SIOCGIWSTATS 10097 */ 10098static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) 10099{ 10100 struct ipw_priv *priv = libipw_priv(dev); 10101 struct iw_statistics *wstats; 10102 10103 wstats = &priv->wstats; 10104 10105 /* if hw is disabled, then ipw_get_ordinal() can't be called. 10106 * netdev->get_wireless_stats seems to be called before fw is 10107 * initialized. STATUS_ASSOCIATED will only be set if the hw is up 10108 * and associated; if not associcated, the values are all meaningless 10109 * anyway, so set them all to NULL and INVALID */ 10110 if (!(priv->status & STATUS_ASSOCIATED)) { 10111 wstats->miss.beacon = 0; 10112 wstats->discard.retries = 0; 10113 wstats->qual.qual = 0; 10114 wstats->qual.level = 0; 10115 wstats->qual.noise = 0; 10116 wstats->qual.updated = 7; 10117 wstats->qual.updated |= IW_QUAL_NOISE_INVALID | 10118 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; 10119 return wstats; 10120 } 10121 10122 wstats->qual.qual = priv->quality; 10123 wstats->qual.level = priv->exp_avg_rssi; 10124 wstats->qual.noise = priv->exp_avg_noise; 10125 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | 10126 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; 10127 10128 wstats->miss.beacon = average_value(&priv->average_missed_beacons); 10129 wstats->discard.retries = priv->last_tx_failures; 10130 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable; 10131 10132/* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len)) 10133 goto fail_get_ordinal; 10134 wstats->discard.retries += tx_retry; */ 10135 10136 return wstats; 10137} 10138 10139/* net device stuff */ 10140 10141static void init_sys_config(struct ipw_sys_config *sys_config) 10142{ 10143 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 10144 sys_config->bt_coexistence = 0; 10145 sys_config->answer_broadcast_ssid_probe = 0; 10146 sys_config->accept_all_data_frames = 0; 10147 sys_config->accept_non_directed_frames = 1; 10148 sys_config->exclude_unicast_unencrypted = 0; 10149 sys_config->disable_unicast_decryption = 1; 10150 sys_config->exclude_multicast_unencrypted = 0; 10151 sys_config->disable_multicast_decryption = 1; 10152 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B) 10153 antenna = CFG_SYS_ANTENNA_BOTH; 10154 sys_config->antenna_diversity = antenna; 10155 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ 10156 sys_config->dot11g_auto_detection = 0; 10157 sys_config->enable_cts_to_self = 0; 10158 sys_config->bt_coexist_collision_thr = 0; 10159 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */ 10160 sys_config->silence_threshold = 0x1e; 10161} 10162 10163static int ipw_net_open(struct net_device *dev) 10164{ 10165 IPW_DEBUG_INFO("dev->open\n"); 10166 netif_start_queue(dev); 10167 return 0; 10168} 10169 10170static int ipw_net_stop(struct net_device *dev) 10171{ 10172 IPW_DEBUG_INFO("dev->close\n"); 10173 netif_stop_queue(dev); 10174 return 0; 10175} 10176 10177/* 10178todo: 10179 10180modify to send one tfd per fragment instead of using chunking. otherwise 10181we need to heavily modify the libipw_skb_to_txb. 10182*/ 10183 10184static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, 10185 int pri) 10186{ 10187 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *) 10188 txb->fragments[0]->data; 10189 int i = 0; 10190 struct tfd_frame *tfd; 10191#ifdef CONFIG_IPW2200_QOS 10192 int tx_id = ipw_get_tx_queue_number(priv, pri); 10193 struct clx2_tx_queue *txq = &priv->txq[tx_id]; 10194#else 10195 struct clx2_tx_queue *txq = &priv->txq[0]; 10196#endif 10197 struct clx2_queue *q = &txq->q; 10198 u8 id, hdr_len, unicast; 10199 int fc; 10200 10201 if (!(priv->status & STATUS_ASSOCIATED)) 10202 goto drop; 10203 10204 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10205 switch (priv->ieee->iw_mode) { 10206 case IW_MODE_ADHOC: 10207 unicast = !is_multicast_ether_addr(hdr->addr1); 10208 id = ipw_find_station(priv, hdr->addr1); 10209 if (id == IPW_INVALID_STATION) { 10210 id = ipw_add_station(priv, hdr->addr1); 10211 if (id == IPW_INVALID_STATION) { 10212 IPW_WARNING("Attempt to send data to " 10213 "invalid cell: %pM\n", 10214 hdr->addr1); 10215 goto drop; 10216 } 10217 } 10218 break; 10219 10220 case IW_MODE_INFRA: 10221 default: 10222 unicast = !is_multicast_ether_addr(hdr->addr3); 10223 id = 0; 10224 break; 10225 } 10226 10227 tfd = &txq->bd[q->first_empty]; 10228 txq->txb[q->first_empty] = txb; 10229 memset(tfd, 0, sizeof(*tfd)); 10230 tfd->u.data.station_number = id; 10231 10232 tfd->control_flags.message_type = TX_FRAME_TYPE; 10233 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; 10234 10235 tfd->u.data.cmd_id = DINO_CMD_TX; 10236 tfd->u.data.len = cpu_to_le16(txb->payload_size); 10237 10238 if (priv->assoc_request.ieee_mode == IPW_B_MODE) 10239 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK; 10240 else 10241 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM; 10242 10243 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE) 10244 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE; 10245 10246 fc = le16_to_cpu(hdr->frame_ctl); 10247 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS); 10248 10249 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len); 10250 10251 if (likely(unicast)) 10252 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; 10253 10254 if (txb->encrypted && !priv->ieee->host_encrypt) { 10255 switch (priv->ieee->sec.level) { 10256 case SEC_LEVEL_3: 10257 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10258 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10259 /* XXX: ACK flag must be set for CCMP even if it 10260 * is a multicast/broadcast packet, because CCMP 10261 * group communication encrypted by GTK is 10262 * actually done by the AP. */ 10263 if (!unicast) 10264 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; 10265 10266 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10267 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM; 10268 tfd->u.data.key_index = 0; 10269 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE; 10270 break; 10271 case SEC_LEVEL_2: 10272 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10273 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10274 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10275 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP; 10276 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE; 10277 break; 10278 case SEC_LEVEL_1: 10279 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10280 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10281 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx; 10282 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <= 10283 40) 10284 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit; 10285 else 10286 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit; 10287 break; 10288 case SEC_LEVEL_0: 10289 break; 10290 default: 10291 printk(KERN_ERR "Unknow security level %d\n", 10292 priv->ieee->sec.level); 10293 break; 10294 } 10295 } else 10296 /* No hardware encryption */ 10297 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP; 10298 10299#ifdef CONFIG_IPW2200_QOS 10300 if (fc & IEEE80211_STYPE_QOS_DATA) 10301 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data)); 10302#endif /* CONFIG_IPW2200_QOS */ 10303 10304 /* payload */ 10305 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2), 10306 txb->nr_frags)); 10307 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n", 10308 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks)); 10309 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) { 10310 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n", 10311 i, le32_to_cpu(tfd->u.data.num_chunks), 10312 txb->fragments[i]->len - hdr_len); 10313 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n", 10314 i, tfd->u.data.num_chunks, 10315 txb->fragments[i]->len - hdr_len); 10316 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len, 10317 txb->fragments[i]->len - hdr_len); 10318 10319 tfd->u.data.chunk_ptr[i] = 10320 cpu_to_le32(pci_map_single 10321 (priv->pci_dev, 10322 txb->fragments[i]->data + hdr_len, 10323 txb->fragments[i]->len - hdr_len, 10324 PCI_DMA_TODEVICE)); 10325 tfd->u.data.chunk_len[i] = 10326 cpu_to_le16(txb->fragments[i]->len - hdr_len); 10327 } 10328 10329 if (i != txb->nr_frags) { 10330 struct sk_buff *skb; 10331 u16 remaining_bytes = 0; 10332 int j; 10333 10334 for (j = i; j < txb->nr_frags; j++) 10335 remaining_bytes += txb->fragments[j]->len - hdr_len; 10336 10337 printk(KERN_INFO "Trying to reallocate for %d bytes\n", 10338 remaining_bytes); 10339 skb = alloc_skb(remaining_bytes, GFP_ATOMIC); 10340 if (skb != NULL) { 10341 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes); 10342 for (j = i; j < txb->nr_frags; j++) { 10343 int size = txb->fragments[j]->len - hdr_len; 10344 10345 printk(KERN_INFO "Adding frag %d %d...\n", 10346 j, size); 10347 memcpy(skb_put(skb, size), 10348 txb->fragments[j]->data + hdr_len, size); 10349 } 10350 dev_kfree_skb_any(txb->fragments[i]); 10351 txb->fragments[i] = skb; 10352 tfd->u.data.chunk_ptr[i] = 10353 cpu_to_le32(pci_map_single 10354 (priv->pci_dev, skb->data, 10355 remaining_bytes, 10356 PCI_DMA_TODEVICE)); 10357 10358 le32_add_cpu(&tfd->u.data.num_chunks, 1); 10359 } 10360 } 10361 10362 /* kick DMA */ 10363 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 10364 ipw_write32(priv, q->reg_w, q->first_empty); 10365 10366 if (ipw_tx_queue_space(q) < q->high_mark) 10367 netif_stop_queue(priv->net_dev); 10368 10369 return NETDEV_TX_OK; 10370 10371 drop: 10372 IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); 10373 libipw_txb_free(txb); 10374 return NETDEV_TX_OK; 10375} 10376 10377static int ipw_net_is_queue_full(struct net_device *dev, int pri) 10378{ 10379 struct ipw_priv *priv = libipw_priv(dev); 10380#ifdef CONFIG_IPW2200_QOS 10381 int tx_id = ipw_get_tx_queue_number(priv, pri); 10382 struct clx2_tx_queue *txq = &priv->txq[tx_id]; 10383#else 10384 struct clx2_tx_queue *txq = &priv->txq[0]; 10385#endif /* CONFIG_IPW2200_QOS */ 10386 10387 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark) 10388 return 1; 10389 10390 return 0; 10391} 10392 10393#ifdef CONFIG_IPW2200_PROMISCUOUS 10394static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, 10395 struct libipw_txb *txb) 10396{ 10397 struct libipw_rx_stats dummystats; 10398 struct ieee80211_hdr *hdr; 10399 u8 n; 10400 u16 filter = priv->prom_priv->filter; 10401 int hdr_only = 0; 10402 10403 if (filter & IPW_PROM_NO_TX) 10404 return; 10405 10406 memset(&dummystats, 0, sizeof(dummystats)); 10407 10408 /* Filtering of fragment chains is done agains the first fragment */ 10409 hdr = (void *)txb->fragments[0]->data; 10410 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { 10411 if (filter & IPW_PROM_NO_MGMT) 10412 return; 10413 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 10414 hdr_only = 1; 10415 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { 10416 if (filter & IPW_PROM_NO_CTL) 10417 return; 10418 if (filter & IPW_PROM_CTL_HEADER_ONLY) 10419 hdr_only = 1; 10420 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { 10421 if (filter & IPW_PROM_NO_DATA) 10422 return; 10423 if (filter & IPW_PROM_DATA_HEADER_ONLY) 10424 hdr_only = 1; 10425 } 10426 10427 for(n=0; n<txb->nr_frags; ++n) { 10428 struct sk_buff *src = txb->fragments[n]; 10429 struct sk_buff *dst; 10430 struct ieee80211_radiotap_header *rt_hdr; 10431 int len; 10432 10433 if (hdr_only) { 10434 hdr = (void *)src->data; 10435 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); 10436 } else 10437 len = src->len; 10438 10439 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC); 10440 if (!dst) 10441 continue; 10442 10443 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr)); 10444 10445 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION; 10446 rt_hdr->it_pad = 0; 10447 rt_hdr->it_present = 0; /* after all, it's just an idea */ 10448 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL); 10449 10450 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16( 10451 ieee80211chan2mhz(priv->channel)); 10452 if (priv->channel > 14) /* 802.11a */ 10453 *(__le16*)skb_put(dst, sizeof(u16)) = 10454 cpu_to_le16(IEEE80211_CHAN_OFDM | 10455 IEEE80211_CHAN_5GHZ); 10456 else if (priv->ieee->mode == IEEE_B) /* 802.11b */ 10457 *(__le16*)skb_put(dst, sizeof(u16)) = 10458 cpu_to_le16(IEEE80211_CHAN_CCK | 10459 IEEE80211_CHAN_2GHZ); 10460 else /* 802.11g */ 10461 *(__le16*)skb_put(dst, sizeof(u16)) = 10462 cpu_to_le16(IEEE80211_CHAN_OFDM | 10463 IEEE80211_CHAN_2GHZ); 10464 10465 rt_hdr->it_len = cpu_to_le16(dst->len); 10466 10467 skb_copy_from_linear_data(src, skb_put(dst, len), len); 10468 10469 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats)) 10470 dev_kfree_skb_any(dst); 10471 } 10472} 10473#endif 10474 10475static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb, 10476 struct net_device *dev, int pri) 10477{ 10478 struct ipw_priv *priv = libipw_priv(dev); 10479 unsigned long flags; 10480 netdev_tx_t ret; 10481 10482 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); 10483 spin_lock_irqsave(&priv->lock, flags); 10484 10485#ifdef CONFIG_IPW2200_PROMISCUOUS 10486 if (rtap_iface && netif_running(priv->prom_net_dev)) 10487 ipw_handle_promiscuous_tx(priv, txb); 10488#endif 10489 10490 ret = ipw_tx_skb(priv, txb, pri); 10491 if (ret == NETDEV_TX_OK) 10492 __ipw_led_activity_on(priv); 10493 spin_unlock_irqrestore(&priv->lock, flags); 10494 10495 return ret; 10496} 10497 10498static void ipw_net_set_multicast_list(struct net_device *dev) 10499{ 10500 10501} 10502 10503static int ipw_net_set_mac_address(struct net_device *dev, void *p) 10504{ 10505 struct ipw_priv *priv = libipw_priv(dev); 10506 struct sockaddr *addr = p; 10507 10508 if (!is_valid_ether_addr(addr->sa_data)) 10509 return -EADDRNOTAVAIL; 10510 mutex_lock(&priv->mutex); 10511 priv->config |= CFG_CUSTOM_MAC; 10512 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 10513 printk(KERN_INFO "%s: Setting MAC to %pM\n", 10514 priv->net_dev->name, priv->mac_addr); 10515 queue_work(priv->workqueue, &priv->adapter_restart); 10516 mutex_unlock(&priv->mutex); 10517 return 0; 10518} 10519 10520static void ipw_ethtool_get_drvinfo(struct net_device *dev, 10521 struct ethtool_drvinfo *info) 10522{ 10523 struct ipw_priv *p = libipw_priv(dev); 10524 char vers[64]; 10525 char date[32]; 10526 u32 len; 10527 10528 strcpy(info->driver, DRV_NAME); 10529 strcpy(info->version, DRV_VERSION); 10530 10531 len = sizeof(vers); 10532 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); 10533 len = sizeof(date); 10534 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); 10535 10536 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", 10537 vers, date); 10538 strcpy(info->bus_info, pci_name(p->pci_dev)); 10539 info->eedump_len = IPW_EEPROM_IMAGE_SIZE; 10540} 10541 10542static u32 ipw_ethtool_get_link(struct net_device *dev) 10543{ 10544 struct ipw_priv *priv = libipw_priv(dev); 10545 return (priv->status & STATUS_ASSOCIATED) != 0; 10546} 10547 10548static int ipw_ethtool_get_eeprom_len(struct net_device *dev) 10549{ 10550 return IPW_EEPROM_IMAGE_SIZE; 10551} 10552 10553static int ipw_ethtool_get_eeprom(struct net_device *dev, 10554 struct ethtool_eeprom *eeprom, u8 * bytes) 10555{ 10556 struct ipw_priv *p = libipw_priv(dev); 10557 10558 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 10559 return -EINVAL; 10560 mutex_lock(&p->mutex); 10561 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); 10562 mutex_unlock(&p->mutex); 10563 return 0; 10564} 10565 10566static int ipw_ethtool_set_eeprom(struct net_device *dev, 10567 struct ethtool_eeprom *eeprom, u8 * bytes) 10568{ 10569 struct ipw_priv *p = libipw_priv(dev); 10570 int i; 10571 10572 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 10573 return -EINVAL; 10574 mutex_lock(&p->mutex); 10575 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); 10576 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) 10577 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]); 10578 mutex_unlock(&p->mutex); 10579 return 0; 10580} 10581 10582static const struct ethtool_ops ipw_ethtool_ops = { 10583 .get_link = ipw_ethtool_get_link, 10584 .get_drvinfo = ipw_ethtool_get_drvinfo, 10585 .get_eeprom_len = ipw_ethtool_get_eeprom_len, 10586 .get_eeprom = ipw_ethtool_get_eeprom, 10587 .set_eeprom = ipw_ethtool_set_eeprom, 10588}; 10589 10590static irqreturn_t ipw_isr(int irq, void *data) 10591{ 10592 struct ipw_priv *priv = data; 10593 u32 inta, inta_mask; 10594 10595 if (!priv) 10596 return IRQ_NONE; 10597 10598 spin_lock(&priv->irq_lock); 10599 10600 if (!(priv->status & STATUS_INT_ENABLED)) { 10601 /* IRQ is disabled */ 10602 goto none; 10603 } 10604 10605 inta = ipw_read32(priv, IPW_INTA_RW); 10606 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 10607 10608 if (inta == 0xFFFFFFFF) { 10609 /* Hardware disappeared */ 10610 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n"); 10611 goto none; 10612 } 10613 10614 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) { 10615 /* Shared interrupt */ 10616 goto none; 10617 } 10618 10619 /* tell the device to stop sending interrupts */ 10620 __ipw_disable_interrupts(priv); 10621 10622 /* ack current interrupts */ 10623 inta &= (IPW_INTA_MASK_ALL & inta_mask); 10624 ipw_write32(priv, IPW_INTA_RW, inta); 10625 10626 /* Cache INTA value for our tasklet */ 10627 priv->isr_inta = inta; 10628 10629 tasklet_schedule(&priv->irq_tasklet); 10630 10631 spin_unlock(&priv->irq_lock); 10632 10633 return IRQ_HANDLED; 10634 none: 10635 spin_unlock(&priv->irq_lock); 10636 return IRQ_NONE; 10637} 10638 10639static void ipw_rf_kill(void *adapter) 10640{ 10641 struct ipw_priv *priv = adapter; 10642 unsigned long flags; 10643 10644 spin_lock_irqsave(&priv->lock, flags); 10645 10646 if (rf_kill_active(priv)) { 10647 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 10648 if (priv->workqueue) 10649 queue_delayed_work(priv->workqueue, 10650 &priv->rf_kill, 2 * HZ); 10651 goto exit_unlock; 10652 } 10653 10654 /* RF Kill is now disabled, so bring the device back up */ 10655 10656 if (!(priv->status & STATUS_RF_KILL_MASK)) { 10657 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting " 10658 "device\n"); 10659 10660 /* we can not do an adapter restart while inside an irq lock */ 10661 queue_work(priv->workqueue, &priv->adapter_restart); 10662 } else 10663 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " 10664 "enabled\n"); 10665 10666 exit_unlock: 10667 spin_unlock_irqrestore(&priv->lock, flags); 10668} 10669 10670static void ipw_bg_rf_kill(struct work_struct *work) 10671{ 10672 struct ipw_priv *priv = 10673 container_of(work, struct ipw_priv, rf_kill.work); 10674 mutex_lock(&priv->mutex); 10675 ipw_rf_kill(priv); 10676 mutex_unlock(&priv->mutex); 10677} 10678 10679static void ipw_link_up(struct ipw_priv *priv) 10680{ 10681 priv->last_seq_num = -1; 10682 priv->last_frag_num = -1; 10683 priv->last_packet_time = 0; 10684 10685 netif_carrier_on(priv->net_dev); 10686 10687 cancel_delayed_work(&priv->request_scan); 10688 cancel_delayed_work(&priv->request_direct_scan); 10689 cancel_delayed_work(&priv->request_passive_scan); 10690 cancel_delayed_work(&priv->scan_event); 10691 ipw_reset_stats(priv); 10692 /* Ensure the rate is updated immediately */ 10693 priv->last_rate = ipw_get_current_rate(priv); 10694 ipw_gather_stats(priv); 10695 ipw_led_link_up(priv); 10696 notify_wx_assoc_event(priv); 10697 10698 if (priv->config & CFG_BACKGROUND_SCAN) 10699 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); 10700} 10701 10702static void ipw_bg_link_up(struct work_struct *work) 10703{ 10704 struct ipw_priv *priv = 10705 container_of(work, struct ipw_priv, link_up); 10706 mutex_lock(&priv->mutex); 10707 ipw_link_up(priv); 10708 mutex_unlock(&priv->mutex); 10709} 10710 10711static void ipw_link_down(struct ipw_priv *priv) 10712{ 10713 ipw_led_link_down(priv); 10714 netif_carrier_off(priv->net_dev); 10715 notify_wx_assoc_event(priv); 10716 10717 /* Cancel any queued work ... */ 10718 cancel_delayed_work(&priv->request_scan); 10719 cancel_delayed_work(&priv->request_direct_scan); 10720 cancel_delayed_work(&priv->request_passive_scan); 10721 cancel_delayed_work(&priv->adhoc_check); 10722 cancel_delayed_work(&priv->gather_stats); 10723 10724 ipw_reset_stats(priv); 10725 10726 if (!(priv->status & STATUS_EXIT_PENDING)) { 10727 /* Queue up another scan... */ 10728 queue_delayed_work(priv->workqueue, &priv->request_scan, 0); 10729 } else 10730 cancel_delayed_work(&priv->scan_event); 10731} 10732 10733static void ipw_bg_link_down(struct work_struct *work) 10734{ 10735 struct ipw_priv *priv = 10736 container_of(work, struct ipw_priv, link_down); 10737 mutex_lock(&priv->mutex); 10738 ipw_link_down(priv); 10739 mutex_unlock(&priv->mutex); 10740} 10741 10742static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) 10743{ 10744 int ret = 0; 10745 10746 priv->workqueue = create_workqueue(DRV_NAME); 10747 init_waitqueue_head(&priv->wait_command_queue); 10748 init_waitqueue_head(&priv->wait_state); 10749 10750 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); 10751 INIT_WORK(&priv->associate, ipw_bg_associate); 10752 INIT_WORK(&priv->disassociate, ipw_bg_disassociate); 10753 INIT_WORK(&priv->system_config, ipw_system_config); 10754 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); 10755 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); 10756 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); 10757 INIT_WORK(&priv->up, ipw_bg_up); 10758 INIT_WORK(&priv->down, ipw_bg_down); 10759 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); 10760 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan); 10761 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan); 10762 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); 10763 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); 10764 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); 10765 INIT_WORK(&priv->roam, ipw_bg_roam); 10766 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); 10767 INIT_WORK(&priv->link_up, ipw_bg_link_up); 10768 INIT_WORK(&priv->link_down, ipw_bg_link_down); 10769 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); 10770 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); 10771 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); 10772 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); 10773 10774#ifdef CONFIG_IPW2200_QOS 10775 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); 10776#endif /* CONFIG_IPW2200_QOS */ 10777 10778 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 10779 ipw_irq_tasklet, (unsigned long)priv); 10780 10781 return ret; 10782} 10783 10784static void shim__set_security(struct net_device *dev, 10785 struct libipw_security *sec) 10786{ 10787 struct ipw_priv *priv = libipw_priv(dev); 10788 int i; 10789 for (i = 0; i < 4; i++) { 10790 if (sec->flags & (1 << i)) { 10791 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i]; 10792 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i]; 10793 if (sec->key_sizes[i] == 0) 10794 priv->ieee->sec.flags &= ~(1 << i); 10795 else { 10796 memcpy(priv->ieee->sec.keys[i], sec->keys[i], 10797 sec->key_sizes[i]); 10798 priv->ieee->sec.flags |= (1 << i); 10799 } 10800 priv->status |= STATUS_SECURITY_UPDATED; 10801 } else if (sec->level != SEC_LEVEL_1) 10802 priv->ieee->sec.flags &= ~(1 << i); 10803 } 10804 10805 if (sec->flags & SEC_ACTIVE_KEY) { 10806 if (sec->active_key <= 3) { 10807 priv->ieee->sec.active_key = sec->active_key; 10808 priv->ieee->sec.flags |= SEC_ACTIVE_KEY; 10809 } else 10810 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; 10811 priv->status |= STATUS_SECURITY_UPDATED; 10812 } else 10813 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; 10814 10815 if ((sec->flags & SEC_AUTH_MODE) && 10816 (priv->ieee->sec.auth_mode != sec->auth_mode)) { 10817 priv->ieee->sec.auth_mode = sec->auth_mode; 10818 priv->ieee->sec.flags |= SEC_AUTH_MODE; 10819 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY) 10820 priv->capability |= CAP_SHARED_KEY; 10821 else 10822 priv->capability &= ~CAP_SHARED_KEY; 10823 priv->status |= STATUS_SECURITY_UPDATED; 10824 } 10825 10826 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) { 10827 priv->ieee->sec.flags |= SEC_ENABLED; 10828 priv->ieee->sec.enabled = sec->enabled; 10829 priv->status |= STATUS_SECURITY_UPDATED; 10830 if (sec->enabled) 10831 priv->capability |= CAP_PRIVACY_ON; 10832 else 10833 priv->capability &= ~CAP_PRIVACY_ON; 10834 } 10835 10836 if (sec->flags & SEC_ENCRYPT) 10837 priv->ieee->sec.encrypt = sec->encrypt; 10838 10839 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) { 10840 priv->ieee->sec.level = sec->level; 10841 priv->ieee->sec.flags |= SEC_LEVEL; 10842 priv->status |= STATUS_SECURITY_UPDATED; 10843 } 10844 10845 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT)) 10846 ipw_set_hwcrypto_keys(priv); 10847 10848 /* To match current functionality of ipw2100 (which works well w/ 10849 * various supplicants, we don't force a disassociate if the 10850 * privacy capability changes ... */ 10851#if 0 10852 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) && 10853 (((priv->assoc_request.capability & 10854 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) || 10855 (!(priv->assoc_request.capability & 10856 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) { 10857 IPW_DEBUG_ASSOC("Disassociating due to capability " 10858 "change.\n"); 10859 ipw_disassociate(priv); 10860 } 10861#endif 10862} 10863 10864static int init_supported_rates(struct ipw_priv *priv, 10865 struct ipw_supported_rates *rates) 10866{ 10867 /* TODO: Mask out rates based on priv->rates_mask */ 10868 10869 memset(rates, 0, sizeof(*rates)); 10870 /* configure supported rates */ 10871 switch (priv->ieee->freq_band) { 10872 case LIBIPW_52GHZ_BAND: 10873 rates->ieee_mode = IPW_A_MODE; 10874 rates->purpose = IPW_RATE_CAPABILITIES; 10875 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, 10876 LIBIPW_OFDM_DEFAULT_RATES_MASK); 10877 break; 10878 10879 default: /* Mixed or 2.4Ghz */ 10880 rates->ieee_mode = IPW_G_MODE; 10881 rates->purpose = IPW_RATE_CAPABILITIES; 10882 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION, 10883 LIBIPW_CCK_DEFAULT_RATES_MASK); 10884 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) { 10885 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, 10886 LIBIPW_OFDM_DEFAULT_RATES_MASK); 10887 } 10888 break; 10889 } 10890 10891 return 0; 10892} 10893 10894static int ipw_config(struct ipw_priv *priv) 10895{ 10896 /* This is only called from ipw_up, which resets/reloads the firmware 10897 so, we don't need to first disable the card before we configure 10898 it */ 10899 if (ipw_set_tx_power(priv)) 10900 goto error; 10901 10902 /* initialize adapter address */ 10903 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr)) 10904 goto error; 10905 10906 /* set basic system config settings */ 10907 init_sys_config(&priv->sys_config); 10908 10909 /* Support Bluetooth if we have BT h/w on board, and user wants to. 10910 * Does not support BT priority yet (don't abort or defer our Tx) */ 10911 if (bt_coexist) { 10912 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY]; 10913 10914 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG) 10915 priv->sys_config.bt_coexistence 10916 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL; 10917 if (bt_caps & EEPROM_SKU_CAP_BT_OOB) 10918 priv->sys_config.bt_coexistence 10919 |= CFG_BT_COEXISTENCE_OOB; 10920 } 10921 10922#ifdef CONFIG_IPW2200_PROMISCUOUS 10923 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 10924 priv->sys_config.accept_all_data_frames = 1; 10925 priv->sys_config.accept_non_directed_frames = 1; 10926 priv->sys_config.accept_all_mgmt_bcpr = 1; 10927 priv->sys_config.accept_all_mgmt_frames = 1; 10928 } 10929#endif 10930 10931 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 10932 priv->sys_config.answer_broadcast_ssid_probe = 1; 10933 else 10934 priv->sys_config.answer_broadcast_ssid_probe = 0; 10935 10936 if (ipw_send_system_config(priv)) 10937 goto error; 10938 10939 init_supported_rates(priv, &priv->rates); 10940 if (ipw_send_supported_rates(priv, &priv->rates)) 10941 goto error; 10942 10943 /* Set request-to-send threshold */ 10944 if (priv->rts_threshold) { 10945 if (ipw_send_rts_threshold(priv, priv->rts_threshold)) 10946 goto error; 10947 } 10948#ifdef CONFIG_IPW2200_QOS 10949 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n"); 10950 ipw_qos_activate(priv, NULL); 10951#endif /* CONFIG_IPW2200_QOS */ 10952 10953 if (ipw_set_random_seed(priv)) 10954 goto error; 10955 10956 /* final state transition to the RUN state */ 10957 if (ipw_send_host_complete(priv)) 10958 goto error; 10959 10960 priv->status |= STATUS_INIT; 10961 10962 ipw_led_init(priv); 10963 ipw_led_radio_on(priv); 10964 priv->notif_missed_beacons = 0; 10965 10966 /* Set hardware WEP key if it is configured. */ 10967 if ((priv->capability & CAP_PRIVACY_ON) && 10968 (priv->ieee->sec.level == SEC_LEVEL_1) && 10969 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt)) 10970 ipw_set_hwcrypto_keys(priv); 10971 10972 return 0; 10973 10974 error: 10975 return -EIO; 10976} 10977 10978/* 10979 * NOTE: 10980 * 10981 * These tables have been tested in conjunction with the 10982 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters. 10983 * 10984 * Altering this values, using it on other hardware, or in geographies 10985 * not intended for resale of the above mentioned Intel adapters has 10986 * not been tested. 10987 * 10988 * Remember to update the table in README.ipw2200 when changing this 10989 * table. 10990 * 10991 */ 10992static const struct libipw_geo ipw_geos[] = { 10993 { /* Restricted */ 10994 "---", 10995 .bg_channels = 11, 10996 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 10997 {2427, 4}, {2432, 5}, {2437, 6}, 10998 {2442, 7}, {2447, 8}, {2452, 9}, 10999 {2457, 10}, {2462, 11}}, 11000 }, 11001 11002 { /* Custom US/Canada */ 11003 "ZZF", 11004 .bg_channels = 11, 11005 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11006 {2427, 4}, {2432, 5}, {2437, 6}, 11007 {2442, 7}, {2447, 8}, {2452, 9}, 11008 {2457, 10}, {2462, 11}}, 11009 .a_channels = 8, 11010 .a = {{5180, 36}, 11011 {5200, 40}, 11012 {5220, 44}, 11013 {5240, 48}, 11014 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11015 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11016 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11017 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}}, 11018 }, 11019 11020 { /* Rest of World */ 11021 "ZZD", 11022 .bg_channels = 13, 11023 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11024 {2427, 4}, {2432, 5}, {2437, 6}, 11025 {2442, 7}, {2447, 8}, {2452, 9}, 11026 {2457, 10}, {2462, 11}, {2467, 12}, 11027 {2472, 13}}, 11028 }, 11029 11030 { /* Custom USA & Europe & High */ 11031 "ZZA", 11032 .bg_channels = 11, 11033 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11034 {2427, 4}, {2432, 5}, {2437, 6}, 11035 {2442, 7}, {2447, 8}, {2452, 9}, 11036 {2457, 10}, {2462, 11}}, 11037 .a_channels = 13, 11038 .a = {{5180, 36}, 11039 {5200, 40}, 11040 {5220, 44}, 11041 {5240, 48}, 11042 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11043 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11044 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11045 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11046 {5745, 149}, 11047 {5765, 153}, 11048 {5785, 157}, 11049 {5805, 161}, 11050 {5825, 165}}, 11051 }, 11052 11053 { /* Custom NA & Europe */ 11054 "ZZB", 11055 .bg_channels = 11, 11056 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11057 {2427, 4}, {2432, 5}, {2437, 6}, 11058 {2442, 7}, {2447, 8}, {2452, 9}, 11059 {2457, 10}, {2462, 11}}, 11060 .a_channels = 13, 11061 .a = {{5180, 36}, 11062 {5200, 40}, 11063 {5220, 44}, 11064 {5240, 48}, 11065 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11066 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11067 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11068 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11069 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11070 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11071 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11072 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11073 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11074 }, 11075 11076 { /* Custom Japan */ 11077 "ZZC", 11078 .bg_channels = 11, 11079 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11080 {2427, 4}, {2432, 5}, {2437, 6}, 11081 {2442, 7}, {2447, 8}, {2452, 9}, 11082 {2457, 10}, {2462, 11}}, 11083 .a_channels = 4, 11084 .a = {{5170, 34}, {5190, 38}, 11085 {5210, 42}, {5230, 46}}, 11086 }, 11087 11088 { /* Custom */ 11089 "ZZM", 11090 .bg_channels = 11, 11091 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11092 {2427, 4}, {2432, 5}, {2437, 6}, 11093 {2442, 7}, {2447, 8}, {2452, 9}, 11094 {2457, 10}, {2462, 11}}, 11095 }, 11096 11097 { /* Europe */ 11098 "ZZE", 11099 .bg_channels = 13, 11100 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11101 {2427, 4}, {2432, 5}, {2437, 6}, 11102 {2442, 7}, {2447, 8}, {2452, 9}, 11103 {2457, 10}, {2462, 11}, {2467, 12}, 11104 {2472, 13}}, 11105 .a_channels = 19, 11106 .a = {{5180, 36}, 11107 {5200, 40}, 11108 {5220, 44}, 11109 {5240, 48}, 11110 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11111 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11112 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11113 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11114 {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, 11115 {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, 11116 {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, 11117 {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, 11118 {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, 11119 {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, 11120 {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, 11121 {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, 11122 {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, 11123 {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, 11124 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}}, 11125 }, 11126 11127 { /* Custom Japan */ 11128 "ZZJ", 11129 .bg_channels = 14, 11130 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11131 {2427, 4}, {2432, 5}, {2437, 6}, 11132 {2442, 7}, {2447, 8}, {2452, 9}, 11133 {2457, 10}, {2462, 11}, {2467, 12}, 11134 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}}, 11135 .a_channels = 4, 11136 .a = {{5170, 34}, {5190, 38}, 11137 {5210, 42}, {5230, 46}}, 11138 }, 11139 11140 { /* Rest of World */ 11141 "ZZR", 11142 .bg_channels = 14, 11143 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11144 {2427, 4}, {2432, 5}, {2437, 6}, 11145 {2442, 7}, {2447, 8}, {2452, 9}, 11146 {2457, 10}, {2462, 11}, {2467, 12}, 11147 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY | 11148 LIBIPW_CH_PASSIVE_ONLY}}, 11149 }, 11150 11151 { /* High Band */ 11152 "ZZH", 11153 .bg_channels = 13, 11154 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11155 {2427, 4}, {2432, 5}, {2437, 6}, 11156 {2442, 7}, {2447, 8}, {2452, 9}, 11157 {2457, 10}, {2462, 11}, 11158 {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, 11159 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, 11160 .a_channels = 4, 11161 .a = {{5745, 149}, {5765, 153}, 11162 {5785, 157}, {5805, 161}}, 11163 }, 11164 11165 { /* Custom Europe */ 11166 "ZZG", 11167 .bg_channels = 13, 11168 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11169 {2427, 4}, {2432, 5}, {2437, 6}, 11170 {2442, 7}, {2447, 8}, {2452, 9}, 11171 {2457, 10}, {2462, 11}, 11172 {2467, 12}, {2472, 13}}, 11173 .a_channels = 4, 11174 .a = {{5180, 36}, {5200, 40}, 11175 {5220, 44}, {5240, 48}}, 11176 }, 11177 11178 { /* Europe */ 11179 "ZZK", 11180 .bg_channels = 13, 11181 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11182 {2427, 4}, {2432, 5}, {2437, 6}, 11183 {2442, 7}, {2447, 8}, {2452, 9}, 11184 {2457, 10}, {2462, 11}, 11185 {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, 11186 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, 11187 .a_channels = 24, 11188 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, 11189 {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, 11190 {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, 11191 {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, 11192 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11193 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11194 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11195 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11196 {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, 11197 {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, 11198 {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, 11199 {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, 11200 {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, 11201 {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, 11202 {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, 11203 {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, 11204 {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, 11205 {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, 11206 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}, 11207 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11208 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11209 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11210 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11211 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11212 }, 11213 11214 { /* Europe */ 11215 "ZZL", 11216 .bg_channels = 11, 11217 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11218 {2427, 4}, {2432, 5}, {2437, 6}, 11219 {2442, 7}, {2447, 8}, {2452, 9}, 11220 {2457, 10}, {2462, 11}}, 11221 .a_channels = 13, 11222 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, 11223 {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, 11224 {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, 11225 {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, 11226 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11227 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11228 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11229 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11230 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11231 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11232 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11233 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11234 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11235 } 11236}; 11237 11238#define MAX_HW_RESTARTS 5 11239static int ipw_up(struct ipw_priv *priv) 11240{ 11241 int rc, i, j; 11242 11243 /* Age scan list entries found before suspend */ 11244 if (priv->suspend_time) { 11245 libipw_networks_age(priv->ieee, priv->suspend_time); 11246 priv->suspend_time = 0; 11247 } 11248 11249 if (priv->status & STATUS_EXIT_PENDING) 11250 return -EIO; 11251 11252 if (cmdlog && !priv->cmdlog) { 11253 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog), 11254 GFP_KERNEL); 11255 if (priv->cmdlog == NULL) { 11256 IPW_ERROR("Error allocating %d command log entries.\n", 11257 cmdlog); 11258 return -ENOMEM; 11259 } else { 11260 priv->cmdlog_len = cmdlog; 11261 } 11262 } 11263 11264 for (i = 0; i < MAX_HW_RESTARTS; i++) { 11265 /* Load the microcode, firmware, and eeprom. 11266 * Also start the clocks. */ 11267 rc = ipw_load(priv); 11268 if (rc) { 11269 IPW_ERROR("Unable to load firmware: %d\n", rc); 11270 return rc; 11271 } 11272 11273 ipw_init_ordinals(priv); 11274 if (!(priv->config & CFG_CUSTOM_MAC)) 11275 eeprom_parse_mac(priv, priv->mac_addr); 11276 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11277 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN); 11278 11279 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) { 11280 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE], 11281 ipw_geos[j].name, 3)) 11282 break; 11283 } 11284 if (j == ARRAY_SIZE(ipw_geos)) { 11285 IPW_WARNING("SKU [%c%c%c] not recognized.\n", 11286 priv->eeprom[EEPROM_COUNTRY_CODE + 0], 11287 priv->eeprom[EEPROM_COUNTRY_CODE + 1], 11288 priv->eeprom[EEPROM_COUNTRY_CODE + 2]); 11289 j = 0; 11290 } 11291 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) { 11292 IPW_WARNING("Could not set geography."); 11293 return 0; 11294 } 11295 11296 if (priv->status & STATUS_RF_KILL_SW) { 11297 IPW_WARNING("Radio disabled by module parameter.\n"); 11298 return 0; 11299 } else if (rf_kill_active(priv)) { 11300 IPW_WARNING("Radio Frequency Kill Switch is On:\n" 11301 "Kill switch must be turned off for " 11302 "wireless networking to work.\n"); 11303 queue_delayed_work(priv->workqueue, &priv->rf_kill, 11304 2 * HZ); 11305 return 0; 11306 } 11307 11308 rc = ipw_config(priv); 11309 if (!rc) { 11310 IPW_DEBUG_INFO("Configured device on count %i\n", i); 11311 11312 /* If configure to try and auto-associate, kick 11313 * off a scan. */ 11314 queue_delayed_work(priv->workqueue, 11315 &priv->request_scan, 0); 11316 11317 return 0; 11318 } 11319 11320 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc); 11321 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n", 11322 i, MAX_HW_RESTARTS); 11323 11324 /* We had an error bringing up the hardware, so take it 11325 * all the way back down so we can try again */ 11326 ipw_down(priv); 11327 } 11328 11329 /* tried to restart and config the device for as long as our 11330 * patience could withstand */ 11331 IPW_ERROR("Unable to initialize device after %d attempts.\n", i); 11332 11333 return -EIO; 11334} 11335 11336static void ipw_bg_up(struct work_struct *work) 11337{ 11338 struct ipw_priv *priv = 11339 container_of(work, struct ipw_priv, up); 11340 mutex_lock(&priv->mutex); 11341 ipw_up(priv); 11342 mutex_unlock(&priv->mutex); 11343} 11344 11345static void ipw_deinit(struct ipw_priv *priv) 11346{ 11347 int i; 11348 11349 if (priv->status & STATUS_SCANNING) { 11350 IPW_DEBUG_INFO("Aborting scan during shutdown.\n"); 11351 ipw_abort_scan(priv); 11352 } 11353 11354 if (priv->status & STATUS_ASSOCIATED) { 11355 IPW_DEBUG_INFO("Disassociating during shutdown.\n"); 11356 ipw_disassociate(priv); 11357 } 11358 11359 ipw_led_shutdown(priv); 11360 11361 /* Wait up to 1s for status to change to not scanning and not 11362 * associated (disassociation can take a while for a ful 802.11 11363 * exchange */ 11364 for (i = 1000; i && (priv->status & 11365 (STATUS_DISASSOCIATING | 11366 STATUS_ASSOCIATED | STATUS_SCANNING)); i--) 11367 udelay(10); 11368 11369 if (priv->status & (STATUS_DISASSOCIATING | 11370 STATUS_ASSOCIATED | STATUS_SCANNING)) 11371 IPW_DEBUG_INFO("Still associated or scanning...\n"); 11372 else 11373 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i); 11374 11375 /* Attempt to disable the card */ 11376 ipw_send_card_disable(priv, 0); 11377 11378 priv->status &= ~STATUS_INIT; 11379} 11380 11381static void ipw_down(struct ipw_priv *priv) 11382{ 11383 int exit_pending = priv->status & STATUS_EXIT_PENDING; 11384 11385 priv->status |= STATUS_EXIT_PENDING; 11386 11387 if (ipw_is_init(priv)) 11388 ipw_deinit(priv); 11389 11390 /* Wipe out the EXIT_PENDING status bit if we are not actually 11391 * exiting the module */ 11392 if (!exit_pending) 11393 priv->status &= ~STATUS_EXIT_PENDING; 11394 11395 /* tell the device to stop sending interrupts */ 11396 ipw_disable_interrupts(priv); 11397 11398 /* Clear all bits but the RF Kill */ 11399 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING; 11400 netif_carrier_off(priv->net_dev); 11401 11402 ipw_stop_nic(priv); 11403 11404 ipw_led_radio_off(priv); 11405} 11406 11407static void ipw_bg_down(struct work_struct *work) 11408{ 11409 struct ipw_priv *priv = 11410 container_of(work, struct ipw_priv, down); 11411 mutex_lock(&priv->mutex); 11412 ipw_down(priv); 11413 mutex_unlock(&priv->mutex); 11414} 11415 11416/* Called by register_netdev() */ 11417static int ipw_net_init(struct net_device *dev) 11418{ 11419 struct ipw_priv *priv = libipw_priv(dev); 11420 mutex_lock(&priv->mutex); 11421 11422 if (ipw_up(priv)) { 11423 mutex_unlock(&priv->mutex); 11424 return -EIO; 11425 } 11426 11427 mutex_unlock(&priv->mutex); 11428 return 0; 11429} 11430 11431/* PCI driver stuff */ 11432static struct pci_device_id card_ids[] = { 11433 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, 11434 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, 11435 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, 11436 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0}, 11437 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0}, 11438 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0}, 11439 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0}, 11440 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0}, 11441 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0}, 11442 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0}, 11443 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0}, 11444 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0}, 11445 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0}, 11446 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0}, 11447 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0}, 11448 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0}, 11449 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0}, 11450 {PCI_VDEVICE(INTEL, 0x104f), 0}, 11451 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */ 11452 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */ 11453 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */ 11454 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */ 11455 11456 /* required last entry */ 11457 {0,} 11458}; 11459 11460MODULE_DEVICE_TABLE(pci, card_ids); 11461 11462static struct attribute *ipw_sysfs_entries[] = { 11463 &dev_attr_rf_kill.attr, 11464 &dev_attr_direct_dword.attr, 11465 &dev_attr_indirect_byte.attr, 11466 &dev_attr_indirect_dword.attr, 11467 &dev_attr_mem_gpio_reg.attr, 11468 &dev_attr_command_event_reg.attr, 11469 &dev_attr_nic_type.attr, 11470 &dev_attr_status.attr, 11471 &dev_attr_cfg.attr, 11472 &dev_attr_error.attr, 11473 &dev_attr_event_log.attr, 11474 &dev_attr_cmd_log.attr, 11475 &dev_attr_eeprom_delay.attr, 11476 &dev_attr_ucode_version.attr, 11477 &dev_attr_rtc.attr, 11478 &dev_attr_scan_age.attr, 11479 &dev_attr_led.attr, 11480 &dev_attr_speed_scan.attr, 11481 &dev_attr_net_stats.attr, 11482 &dev_attr_channels.attr, 11483#ifdef CONFIG_IPW2200_PROMISCUOUS 11484 &dev_attr_rtap_iface.attr, 11485 &dev_attr_rtap_filter.attr, 11486#endif 11487 NULL 11488}; 11489 11490static struct attribute_group ipw_attribute_group = { 11491 .name = NULL, /* put in device directory */ 11492 .attrs = ipw_sysfs_entries, 11493}; 11494 11495#ifdef CONFIG_IPW2200_PROMISCUOUS 11496static int ipw_prom_open(struct net_device *dev) 11497{ 11498 struct ipw_prom_priv *prom_priv = libipw_priv(dev); 11499 struct ipw_priv *priv = prom_priv->priv; 11500 11501 IPW_DEBUG_INFO("prom dev->open\n"); 11502 netif_carrier_off(dev); 11503 11504 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 11505 priv->sys_config.accept_all_data_frames = 1; 11506 priv->sys_config.accept_non_directed_frames = 1; 11507 priv->sys_config.accept_all_mgmt_bcpr = 1; 11508 priv->sys_config.accept_all_mgmt_frames = 1; 11509 11510 ipw_send_system_config(priv); 11511 } 11512 11513 return 0; 11514} 11515 11516static int ipw_prom_stop(struct net_device *dev) 11517{ 11518 struct ipw_prom_priv *prom_priv = libipw_priv(dev); 11519 struct ipw_priv *priv = prom_priv->priv; 11520 11521 IPW_DEBUG_INFO("prom dev->stop\n"); 11522 11523 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 11524 priv->sys_config.accept_all_data_frames = 0; 11525 priv->sys_config.accept_non_directed_frames = 0; 11526 priv->sys_config.accept_all_mgmt_bcpr = 0; 11527 priv->sys_config.accept_all_mgmt_frames = 0; 11528 11529 ipw_send_system_config(priv); 11530 } 11531 11532 return 0; 11533} 11534 11535static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb, 11536 struct net_device *dev) 11537{ 11538 IPW_DEBUG_INFO("prom dev->xmit\n"); 11539 dev_kfree_skb(skb); 11540 return NETDEV_TX_OK; 11541} 11542 11543static const struct net_device_ops ipw_prom_netdev_ops = { 11544 .ndo_open = ipw_prom_open, 11545 .ndo_stop = ipw_prom_stop, 11546 .ndo_start_xmit = ipw_prom_hard_start_xmit, 11547 .ndo_change_mtu = libipw_change_mtu, 11548 .ndo_set_mac_address = eth_mac_addr, 11549 .ndo_validate_addr = eth_validate_addr, 11550}; 11551 11552static int ipw_prom_alloc(struct ipw_priv *priv) 11553{ 11554 int rc = 0; 11555 11556 if (priv->prom_net_dev) 11557 return -EPERM; 11558 11559 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv)); 11560 if (priv->prom_net_dev == NULL) 11561 return -ENOMEM; 11562 11563 priv->prom_priv = libipw_priv(priv->prom_net_dev); 11564 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev); 11565 priv->prom_priv->priv = priv; 11566 11567 strcpy(priv->prom_net_dev->name, "rtap%d"); 11568 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11569 11570 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 11571 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops; 11572 11573 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; 11574 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev); 11575 11576 rc = register_netdev(priv->prom_net_dev); 11577 if (rc) { 11578 free_ieee80211(priv->prom_net_dev); 11579 priv->prom_net_dev = NULL; 11580 return rc; 11581 } 11582 11583 return 0; 11584} 11585 11586static void ipw_prom_free(struct ipw_priv *priv) 11587{ 11588 if (!priv->prom_net_dev) 11589 return; 11590 11591 unregister_netdev(priv->prom_net_dev); 11592 free_ieee80211(priv->prom_net_dev); 11593 11594 priv->prom_net_dev = NULL; 11595} 11596 11597#endif 11598 11599static const struct net_device_ops ipw_netdev_ops = { 11600 .ndo_init = ipw_net_init, 11601 .ndo_open = ipw_net_open, 11602 .ndo_stop = ipw_net_stop, 11603 .ndo_set_multicast_list = ipw_net_set_multicast_list, 11604 .ndo_set_mac_address = ipw_net_set_mac_address, 11605 .ndo_start_xmit = libipw_xmit, 11606 .ndo_change_mtu = libipw_change_mtu, 11607 .ndo_validate_addr = eth_validate_addr, 11608}; 11609 11610static int __devinit ipw_pci_probe(struct pci_dev *pdev, 11611 const struct pci_device_id *ent) 11612{ 11613 int err = 0; 11614 struct net_device *net_dev; 11615 void __iomem *base; 11616 u32 length, val; 11617 struct ipw_priv *priv; 11618 int i; 11619 11620 net_dev = alloc_ieee80211(sizeof(struct ipw_priv)); 11621 if (net_dev == NULL) { 11622 err = -ENOMEM; 11623 goto out; 11624 } 11625 11626 priv = libipw_priv(net_dev); 11627 priv->ieee = netdev_priv(net_dev); 11628 11629 priv->net_dev = net_dev; 11630 priv->pci_dev = pdev; 11631 ipw_debug_level = debug; 11632 spin_lock_init(&priv->irq_lock); 11633 spin_lock_init(&priv->lock); 11634 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) 11635 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); 11636 11637 mutex_init(&priv->mutex); 11638 if (pci_enable_device(pdev)) { 11639 err = -ENODEV; 11640 goto out_free_ieee80211; 11641 } 11642 11643 pci_set_master(pdev); 11644 11645 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 11646 if (!err) 11647 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 11648 if (err) { 11649 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); 11650 goto out_pci_disable_device; 11651 } 11652 11653 pci_set_drvdata(pdev, priv); 11654 11655 err = pci_request_regions(pdev, DRV_NAME); 11656 if (err) 11657 goto out_pci_disable_device; 11658 11659 /* We disable the RETRY_TIMEOUT register (0x41) to keep 11660 * PCI Tx retries from interfering with C3 CPU state */ 11661 pci_read_config_dword(pdev, 0x40, &val); 11662 if ((val & 0x0000ff00) != 0) 11663 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 11664 11665 length = pci_resource_len(pdev, 0); 11666 priv->hw_len = length; 11667 11668 base = pci_ioremap_bar(pdev, 0); 11669 if (!base) { 11670 err = -ENODEV; 11671 goto out_pci_release_regions; 11672 } 11673 11674 priv->hw_base = base; 11675 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length); 11676 IPW_DEBUG_INFO("pci_resource_base = %p\n", base); 11677 11678 err = ipw_setup_deferred_work(priv); 11679 if (err) { 11680 IPW_ERROR("Unable to setup deferred work\n"); 11681 goto out_iounmap; 11682 } 11683 11684 ipw_sw_reset(priv, 1); 11685 11686 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); 11687 if (err) { 11688 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); 11689 goto out_destroy_workqueue; 11690 } 11691 11692 SET_NETDEV_DEV(net_dev, &pdev->dev); 11693 11694 mutex_lock(&priv->mutex); 11695 11696 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; 11697 priv->ieee->set_security = shim__set_security; 11698 priv->ieee->is_queue_full = ipw_net_is_queue_full; 11699 11700#ifdef CONFIG_IPW2200_QOS 11701 priv->ieee->is_qos_active = ipw_is_qos_active; 11702 priv->ieee->handle_probe_response = ipw_handle_beacon; 11703 priv->ieee->handle_beacon = ipw_handle_probe_response; 11704 priv->ieee->handle_assoc_response = ipw_handle_assoc_response; 11705#endif /* CONFIG_IPW2200_QOS */ 11706 11707 priv->ieee->perfect_rssi = -20; 11708 priv->ieee->worst_rssi = -85; 11709 11710 net_dev->netdev_ops = &ipw_netdev_ops; 11711 priv->wireless_data.spy_data = &priv->ieee->spy_data; 11712 net_dev->wireless_data = &priv->wireless_data; 11713 net_dev->wireless_handlers = &ipw_wx_handler_def; 11714 net_dev->ethtool_ops = &ipw_ethtool_ops; 11715 net_dev->irq = pdev->irq; 11716 net_dev->base_addr = (unsigned long)priv->hw_base; 11717 net_dev->mem_start = pci_resource_start(pdev, 0); 11718 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1; 11719 11720 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); 11721 if (err) { 11722 IPW_ERROR("failed to create sysfs device attributes\n"); 11723 mutex_unlock(&priv->mutex); 11724 goto out_release_irq; 11725 } 11726 11727 mutex_unlock(&priv->mutex); 11728 err = register_netdev(net_dev); 11729 if (err) { 11730 IPW_ERROR("failed to register network device\n"); 11731 goto out_remove_sysfs; 11732 } 11733 11734#ifdef CONFIG_IPW2200_PROMISCUOUS 11735 if (rtap_iface) { 11736 err = ipw_prom_alloc(priv); 11737 if (err) { 11738 IPW_ERROR("Failed to register promiscuous network " 11739 "device (error %d).\n", err); 11740 unregister_netdev(priv->net_dev); 11741 goto out_remove_sysfs; 11742 } 11743 } 11744#endif 11745 11746 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " 11747 "channels, %d 802.11a channels)\n", 11748 priv->ieee->geo.name, priv->ieee->geo.bg_channels, 11749 priv->ieee->geo.a_channels); 11750 11751 return 0; 11752 11753 out_remove_sysfs: 11754 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11755 out_release_irq: 11756 free_irq(pdev->irq, priv); 11757 out_destroy_workqueue: 11758 destroy_workqueue(priv->workqueue); 11759 priv->workqueue = NULL; 11760 out_iounmap: 11761 iounmap(priv->hw_base); 11762 out_pci_release_regions: 11763 pci_release_regions(pdev); 11764 out_pci_disable_device: 11765 pci_disable_device(pdev); 11766 pci_set_drvdata(pdev, NULL); 11767 out_free_ieee80211: 11768 free_ieee80211(priv->net_dev); 11769 out: 11770 return err; 11771} 11772 11773static void __devexit ipw_pci_remove(struct pci_dev *pdev) 11774{ 11775 struct ipw_priv *priv = pci_get_drvdata(pdev); 11776 struct list_head *p, *q; 11777 int i; 11778 11779 if (!priv) 11780 return; 11781 11782 mutex_lock(&priv->mutex); 11783 11784 priv->status |= STATUS_EXIT_PENDING; 11785 ipw_down(priv); 11786 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11787 11788 mutex_unlock(&priv->mutex); 11789 11790 unregister_netdev(priv->net_dev); 11791 11792 if (priv->rxq) { 11793 ipw_rx_queue_free(priv, priv->rxq); 11794 priv->rxq = NULL; 11795 } 11796 ipw_tx_queue_free(priv); 11797 11798 if (priv->cmdlog) { 11799 kfree(priv->cmdlog); 11800 priv->cmdlog = NULL; 11801 } 11802 /* ipw_down will ensure that there is no more pending work 11803 * in the workqueue's, so we can safely remove them now. */ 11804 cancel_delayed_work(&priv->adhoc_check); 11805 cancel_delayed_work(&priv->gather_stats); 11806 cancel_delayed_work(&priv->request_scan); 11807 cancel_delayed_work(&priv->request_direct_scan); 11808 cancel_delayed_work(&priv->request_passive_scan); 11809 cancel_delayed_work(&priv->scan_event); 11810 cancel_delayed_work(&priv->rf_kill); 11811 cancel_delayed_work(&priv->scan_check); 11812 destroy_workqueue(priv->workqueue); 11813 priv->workqueue = NULL; 11814 11815 /* Free MAC hash list for ADHOC */ 11816 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { 11817 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { 11818 list_del(p); 11819 kfree(list_entry(p, struct ipw_ibss_seq, list)); 11820 } 11821 } 11822 11823 kfree(priv->error); 11824 priv->error = NULL; 11825 11826#ifdef CONFIG_IPW2200_PROMISCUOUS 11827 ipw_prom_free(priv); 11828#endif 11829 11830 free_irq(pdev->irq, priv); 11831 iounmap(priv->hw_base); 11832 pci_release_regions(pdev); 11833 pci_disable_device(pdev); 11834 pci_set_drvdata(pdev, NULL); 11835 free_ieee80211(priv->net_dev); 11836 free_firmware(); 11837} 11838 11839#ifdef CONFIG_PM 11840static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 11841{ 11842 struct ipw_priv *priv = pci_get_drvdata(pdev); 11843 struct net_device *dev = priv->net_dev; 11844 11845 printk(KERN_INFO "%s: Going into suspend...\n", dev->name); 11846 11847 /* Take down the device; powers it off, etc. */ 11848 ipw_down(priv); 11849 11850 /* Remove the PRESENT state of the device */ 11851 netif_device_detach(dev); 11852 11853 pci_save_state(pdev); 11854 pci_disable_device(pdev); 11855 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 11856 11857 priv->suspend_at = get_seconds(); 11858 11859 return 0; 11860} 11861 11862static int ipw_pci_resume(struct pci_dev *pdev) 11863{ 11864 struct ipw_priv *priv = pci_get_drvdata(pdev); 11865 struct net_device *dev = priv->net_dev; 11866 int err; 11867 u32 val; 11868 11869 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); 11870 11871 pci_set_power_state(pdev, PCI_D0); 11872 err = pci_enable_device(pdev); 11873 if (err) { 11874 printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 11875 dev->name); 11876 return err; 11877 } 11878 pci_restore_state(pdev); 11879 11880 /* 11881 * Suspend/Resume resets the PCI configuration space, so we have to 11882 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 11883 * from interfering with C3 CPU state. pci_restore_state won't help 11884 * here since it only restores the first 64 bytes pci config header. 11885 */ 11886 pci_read_config_dword(pdev, 0x40, &val); 11887 if ((val & 0x0000ff00) != 0) 11888 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 11889 11890 /* Set the device back into the PRESENT state; this will also wake 11891 * the queue of needed */ 11892 netif_device_attach(dev); 11893 11894 priv->suspend_time = get_seconds() - priv->suspend_at; 11895 11896 /* Bring the device back up */ 11897 queue_work(priv->workqueue, &priv->up); 11898 11899 return 0; 11900} 11901#endif 11902 11903static void ipw_pci_shutdown(struct pci_dev *pdev) 11904{ 11905 struct ipw_priv *priv = pci_get_drvdata(pdev); 11906 11907 /* Take down the device; powers it off, etc. */ 11908 ipw_down(priv); 11909 11910 pci_disable_device(pdev); 11911} 11912 11913/* driver initialization stuff */ 11914static struct pci_driver ipw_driver = { 11915 .name = DRV_NAME, 11916 .id_table = card_ids, 11917 .probe = ipw_pci_probe, 11918 .remove = __devexit_p(ipw_pci_remove), 11919#ifdef CONFIG_PM 11920 .suspend = ipw_pci_suspend, 11921 .resume = ipw_pci_resume, 11922#endif 11923 .shutdown = ipw_pci_shutdown, 11924}; 11925 11926static int __init ipw_init(void) 11927{ 11928 int ret; 11929 11930 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 11931 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 11932 11933 ret = pci_register_driver(&ipw_driver); 11934 if (ret) { 11935 IPW_ERROR("Unable to initialize PCI module\n"); 11936 return ret; 11937 } 11938 11939 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level); 11940 if (ret) { 11941 IPW_ERROR("Unable to create driver sysfs file\n"); 11942 pci_unregister_driver(&ipw_driver); 11943 return ret; 11944 } 11945 11946 return ret; 11947} 11948 11949static void __exit ipw_exit(void) 11950{ 11951 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level); 11952 pci_unregister_driver(&ipw_driver); 11953} 11954 11955module_param(disable, int, 0444); 11956MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 11957 11958module_param(associate, int, 0444); 11959MODULE_PARM_DESC(associate, "auto associate when scanning (default off)"); 11960 11961module_param(auto_create, int, 0444); 11962MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); 11963 11964module_param_named(led, led_support, int, 0444); 11965MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)"); 11966 11967module_param(debug, int, 0444); 11968MODULE_PARM_DESC(debug, "debug output mask"); 11969 11970module_param_named(channel, default_channel, int, 0444); 11971MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); 11972 11973#ifdef CONFIG_IPW2200_PROMISCUOUS 11974module_param(rtap_iface, int, 0444); 11975MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)"); 11976#endif 11977 11978#ifdef CONFIG_IPW2200_QOS 11979module_param(qos_enable, int, 0444); 11980MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis"); 11981 11982module_param(qos_burst_enable, int, 0444); 11983MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode"); 11984 11985module_param(qos_no_ack_mask, int, 0444); 11986MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack"); 11987 11988module_param(burst_duration_CCK, int, 0444); 11989MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value"); 11990 11991module_param(burst_duration_OFDM, int, 0444); 11992MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value"); 11993#endif /* CONFIG_IPW2200_QOS */ 11994 11995#ifdef CONFIG_IPW2200_MONITOR 11996module_param_named(mode, network_mode, int, 0444); 11997MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); 11998#else 11999module_param_named(mode, network_mode, int, 0444); 12000MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); 12001#endif 12002 12003module_param(bt_coexist, int, 0444); 12004MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)"); 12005 12006module_param(hwcrypto, int, 0444); 12007MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)"); 12008 12009module_param(cmdlog, int, 0444); 12010MODULE_PARM_DESC(cmdlog, 12011 "allocate a ring buffer for logging firmware commands"); 12012 12013module_param(roaming, int, 0444); 12014MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); 12015 12016module_param(antenna, int, 0444); 12017MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)"); 12018 12019module_exit(ipw_exit); 12020module_init(ipw_init); 12021