ipw2200.c revision 33e2bf6aa16061bae1253514e7c32af27d2b4b31
1/****************************************************************************** 2 3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. 4 5 802.11 status code portion of this file from ethereal-0.10.6: 6 Copyright 2000, Axis Communications AB 7 Ethereal - Network traffic analyzer 8 By Gerald Combs <gerald@ethereal.com> 9 Copyright 1998 Gerald Combs 10 11 This program is free software; you can redistribute it and/or modify it 12 under the terms of version 2 of the GNU General Public License as 13 published by the Free Software Foundation. 14 15 This program is distributed in the hope that it will be useful, but WITHOUT 16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 more details. 19 20 You should have received a copy of the GNU General Public License along with 21 this program; if not, write to the Free Software Foundation, Inc., 59 22 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 24 The full GNU General Public License is included in this distribution in the 25 file called LICENSE. 26 27 Contact Information: 28 Intel Linux Wireless <ilw@linux.intel.com> 29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 30 31******************************************************************************/ 32 33#include <linux/sched.h> 34#include "ipw2200.h" 35 36 37#ifndef KBUILD_EXTMOD 38#define VK "k" 39#else 40#define VK 41#endif 42 43#ifdef CONFIG_IPW2200_DEBUG 44#define VD "d" 45#else 46#define VD 47#endif 48 49#ifdef CONFIG_IPW2200_MONITOR 50#define VM "m" 51#else 52#define VM 53#endif 54 55#ifdef CONFIG_IPW2200_PROMISCUOUS 56#define VP "p" 57#else 58#define VP 59#endif 60 61#ifdef CONFIG_IPW2200_RADIOTAP 62#define VR "r" 63#else 64#define VR 65#endif 66 67#ifdef CONFIG_IPW2200_QOS 68#define VQ "q" 69#else 70#define VQ 71#endif 72 73#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ 74#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" 75#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 76#define DRV_VERSION IPW2200_VERSION 77 78#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1) 79 80MODULE_DESCRIPTION(DRV_DESCRIPTION); 81MODULE_VERSION(DRV_VERSION); 82MODULE_AUTHOR(DRV_COPYRIGHT); 83MODULE_LICENSE("GPL"); 84MODULE_FIRMWARE("ipw2200-ibss.fw"); 85#ifdef CONFIG_IPW2200_MONITOR 86MODULE_FIRMWARE("ipw2200-sniffer.fw"); 87#endif 88MODULE_FIRMWARE("ipw2200-bss.fw"); 89 90static int cmdlog = 0; 91static int debug = 0; 92static int default_channel = 0; 93static int network_mode = 0; 94 95static u32 ipw_debug_level; 96static int associate; 97static int auto_create = 1; 98static int led_support = 0; 99static int disable = 0; 100static int bt_coexist = 0; 101static int hwcrypto = 0; 102static int roaming = 1; 103static const char ipw_modes[] = { 104 'a', 'b', 'g', '?' 105}; 106static int antenna = CFG_SYS_ANTENNA_BOTH; 107 108#ifdef CONFIG_IPW2200_PROMISCUOUS 109static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ 110#endif 111 112static struct ieee80211_rate ipw2200_rates[] = { 113 { .bitrate = 10 }, 114 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 115 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 116 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 117 { .bitrate = 60 }, 118 { .bitrate = 90 }, 119 { .bitrate = 120 }, 120 { .bitrate = 180 }, 121 { .bitrate = 240 }, 122 { .bitrate = 360 }, 123 { .bitrate = 480 }, 124 { .bitrate = 540 } 125}; 126 127#define ipw2200_a_rates (ipw2200_rates + 4) 128#define ipw2200_num_a_rates 8 129#define ipw2200_bg_rates (ipw2200_rates + 0) 130#define ipw2200_num_bg_rates 12 131 132#ifdef CONFIG_IPW2200_QOS 133static int qos_enable = 0; 134static int qos_burst_enable = 0; 135static int qos_no_ack_mask = 0; 136static int burst_duration_CCK = 0; 137static int burst_duration_OFDM = 0; 138 139static struct libipw_qos_parameters def_qos_parameters_OFDM = { 140 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM, 141 QOS_TX3_CW_MIN_OFDM}, 142 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM, 143 QOS_TX3_CW_MAX_OFDM}, 144 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, 145 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, 146 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM, 147 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM} 148}; 149 150static struct libipw_qos_parameters def_qos_parameters_CCK = { 151 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK, 152 QOS_TX3_CW_MIN_CCK}, 153 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK, 154 QOS_TX3_CW_MAX_CCK}, 155 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS}, 156 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM}, 157 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK, 158 QOS_TX3_TXOP_LIMIT_CCK} 159}; 160 161static struct libipw_qos_parameters def_parameters_OFDM = { 162 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM, 163 DEF_TX3_CW_MIN_OFDM}, 164 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM, 165 DEF_TX3_CW_MAX_OFDM}, 166 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, 167 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, 168 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM, 169 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM} 170}; 171 172static struct libipw_qos_parameters def_parameters_CCK = { 173 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK, 174 DEF_TX3_CW_MIN_CCK}, 175 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK, 176 DEF_TX3_CW_MAX_CCK}, 177 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS}, 178 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM}, 179 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK, 180 DEF_TX3_TXOP_LIMIT_CCK} 181}; 182 183static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; 184 185static int from_priority_to_tx_queue[] = { 186 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1, 187 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4 188}; 189 190static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv); 191 192static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters 193 *qos_param); 194static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element 195 *qos_param); 196#endif /* CONFIG_IPW2200_QOS */ 197 198static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev); 199static void ipw_remove_current_network(struct ipw_priv *priv); 200static void ipw_rx(struct ipw_priv *priv); 201static int ipw_queue_tx_reclaim(struct ipw_priv *priv, 202 struct clx2_tx_queue *txq, int qindex); 203static int ipw_queue_reset(struct ipw_priv *priv); 204 205static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, 206 int len, int sync); 207 208static void ipw_tx_queue_free(struct ipw_priv *); 209 210static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); 211static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); 212static void ipw_rx_queue_replenish(void *); 213static int ipw_up(struct ipw_priv *); 214static void ipw_bg_up(struct work_struct *work); 215static void ipw_down(struct ipw_priv *); 216static void ipw_bg_down(struct work_struct *work); 217static int ipw_config(struct ipw_priv *); 218static int init_supported_rates(struct ipw_priv *priv, 219 struct ipw_supported_rates *prates); 220static void ipw_set_hwcrypto_keys(struct ipw_priv *); 221static void ipw_send_wep_keys(struct ipw_priv *, int); 222 223static int snprint_line(char *buf, size_t count, 224 const u8 * data, u32 len, u32 ofs) 225{ 226 int out, i, j, l; 227 char c; 228 229 out = snprintf(buf, count, "%08X", ofs); 230 231 for (l = 0, i = 0; i < 2; i++) { 232 out += snprintf(buf + out, count - out, " "); 233 for (j = 0; j < 8 && l < len; j++, l++) 234 out += snprintf(buf + out, count - out, "%02X ", 235 data[(i * 8 + j)]); 236 for (; j < 8; j++) 237 out += snprintf(buf + out, count - out, " "); 238 } 239 240 out += snprintf(buf + out, count - out, " "); 241 for (l = 0, i = 0; i < 2; i++) { 242 out += snprintf(buf + out, count - out, " "); 243 for (j = 0; j < 8 && l < len; j++, l++) { 244 c = data[(i * 8 + j)]; 245 if (!isascii(c) || !isprint(c)) 246 c = '.'; 247 248 out += snprintf(buf + out, count - out, "%c", c); 249 } 250 251 for (; j < 8; j++) 252 out += snprintf(buf + out, count - out, " "); 253 } 254 255 return out; 256} 257 258static void printk_buf(int level, const u8 * data, u32 len) 259{ 260 char line[81]; 261 u32 ofs = 0; 262 if (!(ipw_debug_level & level)) 263 return; 264 265 while (len) { 266 snprint_line(line, sizeof(line), &data[ofs], 267 min(len, 16U), ofs); 268 printk(KERN_DEBUG "%s\n", line); 269 ofs += 16; 270 len -= min(len, 16U); 271 } 272} 273 274static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len) 275{ 276 size_t out = size; 277 u32 ofs = 0; 278 int total = 0; 279 280 while (size && len) { 281 out = snprint_line(output, size, &data[ofs], 282 min_t(size_t, len, 16U), ofs); 283 284 ofs += 16; 285 output += out; 286 size -= out; 287 len -= min_t(size_t, len, 16U); 288 total += out; 289 } 290 return total; 291} 292 293/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ 294static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg); 295#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b) 296 297/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */ 298static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg); 299#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b) 300 301/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 302static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); 303static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) 304{ 305 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, 306 __LINE__, (u32) (b), (u32) (c)); 307 _ipw_write_reg8(a, b, c); 308} 309 310/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 311static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); 312static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) 313{ 314 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, 315 __LINE__, (u32) (b), (u32) (c)); 316 _ipw_write_reg16(a, b, c); 317} 318 319/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */ 320static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); 321static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) 322{ 323 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, 324 __LINE__, (u32) (b), (u32) (c)); 325 _ipw_write_reg32(a, b, c); 326} 327 328/* 8-bit direct write (low 4K) */ 329static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs, 330 u8 val) 331{ 332 writeb(val, ipw->hw_base + ofs); 333} 334 335/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 336#define ipw_write8(ipw, ofs, val) do { \ 337 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \ 338 __LINE__, (u32)(ofs), (u32)(val)); \ 339 _ipw_write8(ipw, ofs, val); \ 340} while (0) 341 342/* 16-bit direct write (low 4K) */ 343static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs, 344 u16 val) 345{ 346 writew(val, ipw->hw_base + ofs); 347} 348 349/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 350#define ipw_write16(ipw, ofs, val) do { \ 351 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \ 352 __LINE__, (u32)(ofs), (u32)(val)); \ 353 _ipw_write16(ipw, ofs, val); \ 354} while (0) 355 356/* 32-bit direct write (low 4K) */ 357static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs, 358 u32 val) 359{ 360 writel(val, ipw->hw_base + ofs); 361} 362 363/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */ 364#define ipw_write32(ipw, ofs, val) do { \ 365 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \ 366 __LINE__, (u32)(ofs), (u32)(val)); \ 367 _ipw_write32(ipw, ofs, val); \ 368} while (0) 369 370/* 8-bit direct read (low 4K) */ 371static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs) 372{ 373 return readb(ipw->hw_base + ofs); 374} 375 376/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 377#define ipw_read8(ipw, ofs) ({ \ 378 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \ 379 (u32)(ofs)); \ 380 _ipw_read8(ipw, ofs); \ 381}) 382 383/* 16-bit direct read (low 4K) */ 384static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs) 385{ 386 return readw(ipw->hw_base + ofs); 387} 388 389/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 390#define ipw_read16(ipw, ofs) ({ \ 391 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \ 392 (u32)(ofs)); \ 393 _ipw_read16(ipw, ofs); \ 394}) 395 396/* 32-bit direct read (low 4K) */ 397static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs) 398{ 399 return readl(ipw->hw_base + ofs); 400} 401 402/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */ 403#define ipw_read32(ipw, ofs) ({ \ 404 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \ 405 (u32)(ofs)); \ 406 _ipw_read32(ipw, ofs); \ 407}) 408 409static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); 410/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ 411#define ipw_read_indirect(a, b, c, d) ({ \ 412 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \ 413 __LINE__, (u32)(b), (u32)(d)); \ 414 _ipw_read_indirect(a, b, c, d); \ 415}) 416 417/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */ 418static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data, 419 int num); 420#define ipw_write_indirect(a, b, c, d) do { \ 421 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \ 422 __LINE__, (u32)(b), (u32)(d)); \ 423 _ipw_write_indirect(a, b, c, d); \ 424} while (0) 425 426/* 32-bit indirect write (above 4K) */ 427static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value) 428{ 429 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value); 430 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 431 _ipw_write32(priv, IPW_INDIRECT_DATA, value); 432} 433 434/* 8-bit indirect write (above 4K) */ 435static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) 436{ 437 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ 438 u32 dif_len = reg - aligned_addr; 439 440 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 441 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 442 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value); 443} 444 445/* 16-bit indirect write (above 4K) */ 446static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value) 447{ 448 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */ 449 u32 dif_len = (reg - aligned_addr) & (~0x1ul); 450 451 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 453 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value); 454} 455 456/* 8-bit indirect read (above 4K) */ 457static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg) 458{ 459 u32 word; 460 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); 461 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg); 462 word = _ipw_read32(priv, IPW_INDIRECT_DATA); 463 return (word >> ((reg & 0x3) * 8)) & 0xff; 464} 465 466/* 32-bit indirect read (above 4K) */ 467static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) 468{ 469 u32 value; 470 471 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg); 472 473 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 474 value = _ipw_read32(priv, IPW_INDIRECT_DATA); 475 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value); 476 return value; 477} 478 479/* General purpose, no alignment requirement, iterative (multi-byte) read, */ 480/* for area above 1st 4K of SRAM/reg space */ 481static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 482 int num) 483{ 484 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ 485 u32 dif_len = addr - aligned_addr; 486 u32 i; 487 488 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); 489 490 if (num <= 0) { 491 return; 492 } 493 494 /* Read the first dword (or portion) byte by byte */ 495 if (unlikely(dif_len)) { 496 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 497 /* Start reading at aligned_addr + dif_len */ 498 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--) 499 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i); 500 aligned_addr += 4; 501 } 502 503 /* Read all of the middle dwords as dwords, with auto-increment */ 504 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 505 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 506 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA); 507 508 /* Read the last dword (or portion) byte by byte */ 509 if (unlikely(num)) { 510 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 511 for (i = 0; num > 0; i++, num--) 512 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i); 513 } 514} 515 516/* General purpose, no alignment requirement, iterative (multi-byte) write, */ 517/* for area above 1st 4K of SRAM/reg space */ 518static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf, 519 int num) 520{ 521 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */ 522 u32 dif_len = addr - aligned_addr; 523 u32 i; 524 525 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num); 526 527 if (num <= 0) { 528 return; 529 } 530 531 /* Write the first dword (or portion) byte by byte */ 532 if (unlikely(dif_len)) { 533 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 534 /* Start writing at aligned_addr + dif_len */ 535 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++) 536 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 537 aligned_addr += 4; 538 } 539 540 /* Write all of the middle dwords as dwords, with auto-increment */ 541 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr); 542 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4) 543 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf); 544 545 /* Write the last dword (or portion) byte by byte */ 546 if (unlikely(num)) { 547 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr); 548 for (i = 0; num > 0; i++, num--, buf++) 549 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf); 550 } 551} 552 553/* General purpose, no alignment requirement, iterative (multi-byte) write, */ 554/* for 1st 4K of SRAM/regs space */ 555static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf, 556 int num) 557{ 558 memcpy_toio((priv->hw_base + addr), buf, num); 559} 560 561/* Set bit(s) in low 4K of SRAM/regs */ 562static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask) 563{ 564 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask); 565} 566 567/* Clear bit(s) in low 4K of SRAM/regs */ 568static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask) 569{ 570 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask); 571} 572 573static inline void __ipw_enable_interrupts(struct ipw_priv *priv) 574{ 575 if (priv->status & STATUS_INT_ENABLED) 576 return; 577 priv->status |= STATUS_INT_ENABLED; 578 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL); 579} 580 581static inline void __ipw_disable_interrupts(struct ipw_priv *priv) 582{ 583 if (!(priv->status & STATUS_INT_ENABLED)) 584 return; 585 priv->status &= ~STATUS_INT_ENABLED; 586 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 587} 588 589static inline void ipw_enable_interrupts(struct ipw_priv *priv) 590{ 591 unsigned long flags; 592 593 spin_lock_irqsave(&priv->irq_lock, flags); 594 __ipw_enable_interrupts(priv); 595 spin_unlock_irqrestore(&priv->irq_lock, flags); 596} 597 598static inline void ipw_disable_interrupts(struct ipw_priv *priv) 599{ 600 unsigned long flags; 601 602 spin_lock_irqsave(&priv->irq_lock, flags); 603 __ipw_disable_interrupts(priv); 604 spin_unlock_irqrestore(&priv->irq_lock, flags); 605} 606 607static char *ipw_error_desc(u32 val) 608{ 609 switch (val) { 610 case IPW_FW_ERROR_OK: 611 return "ERROR_OK"; 612 case IPW_FW_ERROR_FAIL: 613 return "ERROR_FAIL"; 614 case IPW_FW_ERROR_MEMORY_UNDERFLOW: 615 return "MEMORY_UNDERFLOW"; 616 case IPW_FW_ERROR_MEMORY_OVERFLOW: 617 return "MEMORY_OVERFLOW"; 618 case IPW_FW_ERROR_BAD_PARAM: 619 return "BAD_PARAM"; 620 case IPW_FW_ERROR_BAD_CHECKSUM: 621 return "BAD_CHECKSUM"; 622 case IPW_FW_ERROR_NMI_INTERRUPT: 623 return "NMI_INTERRUPT"; 624 case IPW_FW_ERROR_BAD_DATABASE: 625 return "BAD_DATABASE"; 626 case IPW_FW_ERROR_ALLOC_FAIL: 627 return "ALLOC_FAIL"; 628 case IPW_FW_ERROR_DMA_UNDERRUN: 629 return "DMA_UNDERRUN"; 630 case IPW_FW_ERROR_DMA_STATUS: 631 return "DMA_STATUS"; 632 case IPW_FW_ERROR_DINO_ERROR: 633 return "DINO_ERROR"; 634 case IPW_FW_ERROR_EEPROM_ERROR: 635 return "EEPROM_ERROR"; 636 case IPW_FW_ERROR_SYSASSERT: 637 return "SYSASSERT"; 638 case IPW_FW_ERROR_FATAL_ERROR: 639 return "FATAL_ERROR"; 640 default: 641 return "UNKNOWN_ERROR"; 642 } 643} 644 645static void ipw_dump_error_log(struct ipw_priv *priv, 646 struct ipw_fw_error *error) 647{ 648 u32 i; 649 650 if (!error) { 651 IPW_ERROR("Error allocating and capturing error log. " 652 "Nothing to dump.\n"); 653 return; 654 } 655 656 IPW_ERROR("Start IPW Error Log Dump:\n"); 657 IPW_ERROR("Status: 0x%08X, Config: %08X\n", 658 error->status, error->config); 659 660 for (i = 0; i < error->elem_len; i++) 661 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 662 ipw_error_desc(error->elem[i].desc), 663 error->elem[i].time, 664 error->elem[i].blink1, 665 error->elem[i].blink2, 666 error->elem[i].link1, 667 error->elem[i].link2, error->elem[i].data); 668 for (i = 0; i < error->log_len; i++) 669 IPW_ERROR("%i\t0x%08x\t%i\n", 670 error->log[i].time, 671 error->log[i].data, error->log[i].event); 672} 673 674static inline int ipw_is_init(struct ipw_priv *priv) 675{ 676 return (priv->status & STATUS_INIT) ? 1 : 0; 677} 678 679static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len) 680{ 681 u32 addr, field_info, field_len, field_count, total_len; 682 683 IPW_DEBUG_ORD("ordinal = %i\n", ord); 684 685 if (!priv || !val || !len) { 686 IPW_DEBUG_ORD("Invalid argument\n"); 687 return -EINVAL; 688 } 689 690 /* verify device ordinal tables have been initialized */ 691 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) { 692 IPW_DEBUG_ORD("Access ordinals before initialization\n"); 693 return -EINVAL; 694 } 695 696 switch (IPW_ORD_TABLE_ID_MASK & ord) { 697 case IPW_ORD_TABLE_0_MASK: 698 /* 699 * TABLE 0: Direct access to a table of 32 bit values 700 * 701 * This is a very simple table with the data directly 702 * read from the table 703 */ 704 705 /* remove the table id from the ordinal */ 706 ord &= IPW_ORD_TABLE_VALUE_MASK; 707 708 /* boundary check */ 709 if (ord > priv->table0_len) { 710 IPW_DEBUG_ORD("ordinal value (%i) longer then " 711 "max (%i)\n", ord, priv->table0_len); 712 return -EINVAL; 713 } 714 715 /* verify we have enough room to store the value */ 716 if (*len < sizeof(u32)) { 717 IPW_DEBUG_ORD("ordinal buffer length too small, " 718 "need %zd\n", sizeof(u32)); 719 return -EINVAL; 720 } 721 722 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n", 723 ord, priv->table0_addr + (ord << 2)); 724 725 *len = sizeof(u32); 726 ord <<= 2; 727 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord); 728 break; 729 730 case IPW_ORD_TABLE_1_MASK: 731 /* 732 * TABLE 1: Indirect access to a table of 32 bit values 733 * 734 * This is a fairly large table of u32 values each 735 * representing starting addr for the data (which is 736 * also a u32) 737 */ 738 739 /* remove the table id from the ordinal */ 740 ord &= IPW_ORD_TABLE_VALUE_MASK; 741 742 /* boundary check */ 743 if (ord > priv->table1_len) { 744 IPW_DEBUG_ORD("ordinal value too long\n"); 745 return -EINVAL; 746 } 747 748 /* verify we have enough room to store the value */ 749 if (*len < sizeof(u32)) { 750 IPW_DEBUG_ORD("ordinal buffer length too small, " 751 "need %zd\n", sizeof(u32)); 752 return -EINVAL; 753 } 754 755 *((u32 *) val) = 756 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2))); 757 *len = sizeof(u32); 758 break; 759 760 case IPW_ORD_TABLE_2_MASK: 761 /* 762 * TABLE 2: Indirect access to a table of variable sized values 763 * 764 * This table consist of six values, each containing 765 * - dword containing the starting offset of the data 766 * - dword containing the lengh in the first 16bits 767 * and the count in the second 16bits 768 */ 769 770 /* remove the table id from the ordinal */ 771 ord &= IPW_ORD_TABLE_VALUE_MASK; 772 773 /* boundary check */ 774 if (ord > priv->table2_len) { 775 IPW_DEBUG_ORD("ordinal value too long\n"); 776 return -EINVAL; 777 } 778 779 /* get the address of statistic */ 780 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3)); 781 782 /* get the second DW of statistics ; 783 * two 16-bit words - first is length, second is count */ 784 field_info = 785 ipw_read_reg32(priv, 786 priv->table2_addr + (ord << 3) + 787 sizeof(u32)); 788 789 /* get each entry length */ 790 field_len = *((u16 *) & field_info); 791 792 /* get number of entries */ 793 field_count = *(((u16 *) & field_info) + 1); 794 795 /* abort if not enough memory */ 796 total_len = field_len * field_count; 797 if (total_len > *len) { 798 *len = total_len; 799 return -EINVAL; 800 } 801 802 *len = total_len; 803 if (!total_len) 804 return 0; 805 806 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, " 807 "field_info = 0x%08x\n", 808 addr, total_len, field_info); 809 ipw_read_indirect(priv, addr, val, total_len); 810 break; 811 812 default: 813 IPW_DEBUG_ORD("Invalid ordinal!\n"); 814 return -EINVAL; 815 816 } 817 818 return 0; 819} 820 821static void ipw_init_ordinals(struct ipw_priv *priv) 822{ 823 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER; 824 priv->table0_len = ipw_read32(priv, priv->table0_addr); 825 826 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n", 827 priv->table0_addr, priv->table0_len); 828 829 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1); 830 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr); 831 832 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n", 833 priv->table1_addr, priv->table1_len); 834 835 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2); 836 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr); 837 priv->table2_len &= 0x0000ffff; /* use first two bytes */ 838 839 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n", 840 priv->table2_addr, priv->table2_len); 841 842} 843 844static u32 ipw_register_toggle(u32 reg) 845{ 846 reg &= ~IPW_START_STANDBY; 847 if (reg & IPW_GATE_ODMA) 848 reg &= ~IPW_GATE_ODMA; 849 if (reg & IPW_GATE_IDMA) 850 reg &= ~IPW_GATE_IDMA; 851 if (reg & IPW_GATE_ADMA) 852 reg &= ~IPW_GATE_ADMA; 853 return reg; 854} 855 856/* 857 * LED behavior: 858 * - On radio ON, turn on any LEDs that require to be on during start 859 * - On initialization, start unassociated blink 860 * - On association, disable unassociated blink 861 * - On disassociation, start unassociated blink 862 * - On radio OFF, turn off any LEDs started during radio on 863 * 864 */ 865#define LD_TIME_LINK_ON msecs_to_jiffies(300) 866#define LD_TIME_LINK_OFF msecs_to_jiffies(2700) 867#define LD_TIME_ACT_ON msecs_to_jiffies(250) 868 869static void ipw_led_link_on(struct ipw_priv *priv) 870{ 871 unsigned long flags; 872 u32 led; 873 874 /* If configured to not use LEDs, or nic_type is 1, 875 * then we don't toggle a LINK led */ 876 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) 877 return; 878 879 spin_lock_irqsave(&priv->lock, flags); 880 881 if (!(priv->status & STATUS_RF_KILL_MASK) && 882 !(priv->status & STATUS_LED_LINK_ON)) { 883 IPW_DEBUG_LED("Link LED On\n"); 884 led = ipw_read_reg32(priv, IPW_EVENT_REG); 885 led |= priv->led_association_on; 886 887 led = ipw_register_toggle(led); 888 889 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 890 ipw_write_reg32(priv, IPW_EVENT_REG, led); 891 892 priv->status |= STATUS_LED_LINK_ON; 893 894 /* If we aren't associated, schedule turning the LED off */ 895 if (!(priv->status & STATUS_ASSOCIATED)) 896 queue_delayed_work(priv->workqueue, 897 &priv->led_link_off, 898 LD_TIME_LINK_ON); 899 } 900 901 spin_unlock_irqrestore(&priv->lock, flags); 902} 903 904static void ipw_bg_led_link_on(struct work_struct *work) 905{ 906 struct ipw_priv *priv = 907 container_of(work, struct ipw_priv, led_link_on.work); 908 mutex_lock(&priv->mutex); 909 ipw_led_link_on(priv); 910 mutex_unlock(&priv->mutex); 911} 912 913static void ipw_led_link_off(struct ipw_priv *priv) 914{ 915 unsigned long flags; 916 u32 led; 917 918 /* If configured not to use LEDs, or nic type is 1, 919 * then we don't goggle the LINK led. */ 920 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1) 921 return; 922 923 spin_lock_irqsave(&priv->lock, flags); 924 925 if (priv->status & STATUS_LED_LINK_ON) { 926 led = ipw_read_reg32(priv, IPW_EVENT_REG); 927 led &= priv->led_association_off; 928 led = ipw_register_toggle(led); 929 930 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 931 ipw_write_reg32(priv, IPW_EVENT_REG, led); 932 933 IPW_DEBUG_LED("Link LED Off\n"); 934 935 priv->status &= ~STATUS_LED_LINK_ON; 936 937 /* If we aren't associated and the radio is on, schedule 938 * turning the LED on (blink while unassociated) */ 939 if (!(priv->status & STATUS_RF_KILL_MASK) && 940 !(priv->status & STATUS_ASSOCIATED)) 941 queue_delayed_work(priv->workqueue, &priv->led_link_on, 942 LD_TIME_LINK_OFF); 943 944 } 945 946 spin_unlock_irqrestore(&priv->lock, flags); 947} 948 949static void ipw_bg_led_link_off(struct work_struct *work) 950{ 951 struct ipw_priv *priv = 952 container_of(work, struct ipw_priv, led_link_off.work); 953 mutex_lock(&priv->mutex); 954 ipw_led_link_off(priv); 955 mutex_unlock(&priv->mutex); 956} 957 958static void __ipw_led_activity_on(struct ipw_priv *priv) 959{ 960 u32 led; 961 962 if (priv->config & CFG_NO_LED) 963 return; 964 965 if (priv->status & STATUS_RF_KILL_MASK) 966 return; 967 968 if (!(priv->status & STATUS_LED_ACT_ON)) { 969 led = ipw_read_reg32(priv, IPW_EVENT_REG); 970 led |= priv->led_activity_on; 971 972 led = ipw_register_toggle(led); 973 974 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 975 ipw_write_reg32(priv, IPW_EVENT_REG, led); 976 977 IPW_DEBUG_LED("Activity LED On\n"); 978 979 priv->status |= STATUS_LED_ACT_ON; 980 981 cancel_delayed_work(&priv->led_act_off); 982 queue_delayed_work(priv->workqueue, &priv->led_act_off, 983 LD_TIME_ACT_ON); 984 } else { 985 /* Reschedule LED off for full time period */ 986 cancel_delayed_work(&priv->led_act_off); 987 queue_delayed_work(priv->workqueue, &priv->led_act_off, 988 LD_TIME_ACT_ON); 989 } 990} 991 992#if 0 993void ipw_led_activity_on(struct ipw_priv *priv) 994{ 995 unsigned long flags; 996 spin_lock_irqsave(&priv->lock, flags); 997 __ipw_led_activity_on(priv); 998 spin_unlock_irqrestore(&priv->lock, flags); 999} 1000#endif /* 0 */ 1001 1002static void ipw_led_activity_off(struct ipw_priv *priv) 1003{ 1004 unsigned long flags; 1005 u32 led; 1006 1007 if (priv->config & CFG_NO_LED) 1008 return; 1009 1010 spin_lock_irqsave(&priv->lock, flags); 1011 1012 if (priv->status & STATUS_LED_ACT_ON) { 1013 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1014 led &= priv->led_activity_off; 1015 1016 led = ipw_register_toggle(led); 1017 1018 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1019 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1020 1021 IPW_DEBUG_LED("Activity LED Off\n"); 1022 1023 priv->status &= ~STATUS_LED_ACT_ON; 1024 } 1025 1026 spin_unlock_irqrestore(&priv->lock, flags); 1027} 1028 1029static void ipw_bg_led_activity_off(struct work_struct *work) 1030{ 1031 struct ipw_priv *priv = 1032 container_of(work, struct ipw_priv, led_act_off.work); 1033 mutex_lock(&priv->mutex); 1034 ipw_led_activity_off(priv); 1035 mutex_unlock(&priv->mutex); 1036} 1037 1038static void ipw_led_band_on(struct ipw_priv *priv) 1039{ 1040 unsigned long flags; 1041 u32 led; 1042 1043 /* Only nic type 1 supports mode LEDs */ 1044 if (priv->config & CFG_NO_LED || 1045 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network) 1046 return; 1047 1048 spin_lock_irqsave(&priv->lock, flags); 1049 1050 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1051 if (priv->assoc_network->mode == IEEE_A) { 1052 led |= priv->led_ofdm_on; 1053 led &= priv->led_association_off; 1054 IPW_DEBUG_LED("Mode LED On: 802.11a\n"); 1055 } else if (priv->assoc_network->mode == IEEE_G) { 1056 led |= priv->led_ofdm_on; 1057 led |= priv->led_association_on; 1058 IPW_DEBUG_LED("Mode LED On: 802.11g\n"); 1059 } else { 1060 led &= priv->led_ofdm_off; 1061 led |= priv->led_association_on; 1062 IPW_DEBUG_LED("Mode LED On: 802.11b\n"); 1063 } 1064 1065 led = ipw_register_toggle(led); 1066 1067 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1068 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1069 1070 spin_unlock_irqrestore(&priv->lock, flags); 1071} 1072 1073static void ipw_led_band_off(struct ipw_priv *priv) 1074{ 1075 unsigned long flags; 1076 u32 led; 1077 1078 /* Only nic type 1 supports mode LEDs */ 1079 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1) 1080 return; 1081 1082 spin_lock_irqsave(&priv->lock, flags); 1083 1084 led = ipw_read_reg32(priv, IPW_EVENT_REG); 1085 led &= priv->led_ofdm_off; 1086 led &= priv->led_association_off; 1087 1088 led = ipw_register_toggle(led); 1089 1090 IPW_DEBUG_LED("Reg: 0x%08X\n", led); 1091 ipw_write_reg32(priv, IPW_EVENT_REG, led); 1092 1093 spin_unlock_irqrestore(&priv->lock, flags); 1094} 1095 1096static void ipw_led_radio_on(struct ipw_priv *priv) 1097{ 1098 ipw_led_link_on(priv); 1099} 1100 1101static void ipw_led_radio_off(struct ipw_priv *priv) 1102{ 1103 ipw_led_activity_off(priv); 1104 ipw_led_link_off(priv); 1105} 1106 1107static void ipw_led_link_up(struct ipw_priv *priv) 1108{ 1109 /* Set the Link Led on for all nic types */ 1110 ipw_led_link_on(priv); 1111} 1112 1113static void ipw_led_link_down(struct ipw_priv *priv) 1114{ 1115 ipw_led_activity_off(priv); 1116 ipw_led_link_off(priv); 1117 1118 if (priv->status & STATUS_RF_KILL_MASK) 1119 ipw_led_radio_off(priv); 1120} 1121 1122static void ipw_led_init(struct ipw_priv *priv) 1123{ 1124 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE]; 1125 1126 /* Set the default PINs for the link and activity leds */ 1127 priv->led_activity_on = IPW_ACTIVITY_LED; 1128 priv->led_activity_off = ~(IPW_ACTIVITY_LED); 1129 1130 priv->led_association_on = IPW_ASSOCIATED_LED; 1131 priv->led_association_off = ~(IPW_ASSOCIATED_LED); 1132 1133 /* Set the default PINs for the OFDM leds */ 1134 priv->led_ofdm_on = IPW_OFDM_LED; 1135 priv->led_ofdm_off = ~(IPW_OFDM_LED); 1136 1137 switch (priv->nic_type) { 1138 case EEPROM_NIC_TYPE_1: 1139 /* In this NIC type, the LEDs are reversed.... */ 1140 priv->led_activity_on = IPW_ASSOCIATED_LED; 1141 priv->led_activity_off = ~(IPW_ASSOCIATED_LED); 1142 priv->led_association_on = IPW_ACTIVITY_LED; 1143 priv->led_association_off = ~(IPW_ACTIVITY_LED); 1144 1145 if (!(priv->config & CFG_NO_LED)) 1146 ipw_led_band_on(priv); 1147 1148 /* And we don't blink link LEDs for this nic, so 1149 * just return here */ 1150 return; 1151 1152 case EEPROM_NIC_TYPE_3: 1153 case EEPROM_NIC_TYPE_2: 1154 case EEPROM_NIC_TYPE_4: 1155 case EEPROM_NIC_TYPE_0: 1156 break; 1157 1158 default: 1159 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n", 1160 priv->nic_type); 1161 priv->nic_type = EEPROM_NIC_TYPE_0; 1162 break; 1163 } 1164 1165 if (!(priv->config & CFG_NO_LED)) { 1166 if (priv->status & STATUS_ASSOCIATED) 1167 ipw_led_link_on(priv); 1168 else 1169 ipw_led_link_off(priv); 1170 } 1171} 1172 1173static void ipw_led_shutdown(struct ipw_priv *priv) 1174{ 1175 ipw_led_activity_off(priv); 1176 ipw_led_link_off(priv); 1177 ipw_led_band_off(priv); 1178 cancel_delayed_work(&priv->led_link_on); 1179 cancel_delayed_work(&priv->led_link_off); 1180 cancel_delayed_work(&priv->led_act_off); 1181} 1182 1183/* 1184 * The following adds a new attribute to the sysfs representation 1185 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/) 1186 * used for controling the debug level. 1187 * 1188 * See the level definitions in ipw for details. 1189 */ 1190static ssize_t show_debug_level(struct device_driver *d, char *buf) 1191{ 1192 return sprintf(buf, "0x%08X\n", ipw_debug_level); 1193} 1194 1195static ssize_t store_debug_level(struct device_driver *d, const char *buf, 1196 size_t count) 1197{ 1198 char *p = (char *)buf; 1199 u32 val; 1200 1201 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 1202 p++; 1203 if (p[0] == 'x' || p[0] == 'X') 1204 p++; 1205 val = simple_strtoul(p, &p, 16); 1206 } else 1207 val = simple_strtoul(p, &p, 10); 1208 if (p == buf) 1209 printk(KERN_INFO DRV_NAME 1210 ": %s is not in hex or decimal form.\n", buf); 1211 else 1212 ipw_debug_level = val; 1213 1214 return strnlen(buf, count); 1215} 1216 1217static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, 1218 show_debug_level, store_debug_level); 1219 1220static inline u32 ipw_get_event_log_len(struct ipw_priv *priv) 1221{ 1222 /* length = 1st dword in log */ 1223 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG)); 1224} 1225 1226static void ipw_capture_event_log(struct ipw_priv *priv, 1227 u32 log_len, struct ipw_event *log) 1228{ 1229 u32 base; 1230 1231 if (log_len) { 1232 base = ipw_read32(priv, IPW_EVENT_LOG); 1233 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32), 1234 (u8 *) log, sizeof(*log) * log_len); 1235 } 1236} 1237 1238static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv) 1239{ 1240 struct ipw_fw_error *error; 1241 u32 log_len = ipw_get_event_log_len(priv); 1242 u32 base = ipw_read32(priv, IPW_ERROR_LOG); 1243 u32 elem_len = ipw_read_reg32(priv, base); 1244 1245 error = kmalloc(sizeof(*error) + 1246 sizeof(*error->elem) * elem_len + 1247 sizeof(*error->log) * log_len, GFP_ATOMIC); 1248 if (!error) { 1249 IPW_ERROR("Memory allocation for firmware error log " 1250 "failed.\n"); 1251 return NULL; 1252 } 1253 error->jiffies = jiffies; 1254 error->status = priv->status; 1255 error->config = priv->config; 1256 error->elem_len = elem_len; 1257 error->log_len = log_len; 1258 error->elem = (struct ipw_error_elem *)error->payload; 1259 error->log = (struct ipw_event *)(error->elem + elem_len); 1260 1261 ipw_capture_event_log(priv, log_len, error->log); 1262 1263 if (elem_len) 1264 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem, 1265 sizeof(*error->elem) * elem_len); 1266 1267 return error; 1268} 1269 1270static ssize_t show_event_log(struct device *d, 1271 struct device_attribute *attr, char *buf) 1272{ 1273 struct ipw_priv *priv = dev_get_drvdata(d); 1274 u32 log_len = ipw_get_event_log_len(priv); 1275 u32 log_size; 1276 struct ipw_event *log; 1277 u32 len = 0, i; 1278 1279 /* not using min() because of its strict type checking */ 1280 log_size = PAGE_SIZE / sizeof(*log) > log_len ? 1281 sizeof(*log) * log_len : PAGE_SIZE; 1282 log = kzalloc(log_size, GFP_KERNEL); 1283 if (!log) { 1284 IPW_ERROR("Unable to allocate memory for log\n"); 1285 return 0; 1286 } 1287 log_len = log_size / sizeof(*log); 1288 ipw_capture_event_log(priv, log_len, log); 1289 1290 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len); 1291 for (i = 0; i < log_len; i++) 1292 len += snprintf(buf + len, PAGE_SIZE - len, 1293 "\n%08X%08X%08X", 1294 log[i].time, log[i].event, log[i].data); 1295 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1296 kfree(log); 1297 return len; 1298} 1299 1300static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL); 1301 1302static ssize_t show_error(struct device *d, 1303 struct device_attribute *attr, char *buf) 1304{ 1305 struct ipw_priv *priv = dev_get_drvdata(d); 1306 u32 len = 0, i; 1307 if (!priv->error) 1308 return 0; 1309 len += snprintf(buf + len, PAGE_SIZE - len, 1310 "%08lX%08X%08X%08X", 1311 priv->error->jiffies, 1312 priv->error->status, 1313 priv->error->config, priv->error->elem_len); 1314 for (i = 0; i < priv->error->elem_len; i++) 1315 len += snprintf(buf + len, PAGE_SIZE - len, 1316 "\n%08X%08X%08X%08X%08X%08X%08X", 1317 priv->error->elem[i].time, 1318 priv->error->elem[i].desc, 1319 priv->error->elem[i].blink1, 1320 priv->error->elem[i].blink2, 1321 priv->error->elem[i].link1, 1322 priv->error->elem[i].link2, 1323 priv->error->elem[i].data); 1324 1325 len += snprintf(buf + len, PAGE_SIZE - len, 1326 "\n%08X", priv->error->log_len); 1327 for (i = 0; i < priv->error->log_len; i++) 1328 len += snprintf(buf + len, PAGE_SIZE - len, 1329 "\n%08X%08X%08X", 1330 priv->error->log[i].time, 1331 priv->error->log[i].event, 1332 priv->error->log[i].data); 1333 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1334 return len; 1335} 1336 1337static ssize_t clear_error(struct device *d, 1338 struct device_attribute *attr, 1339 const char *buf, size_t count) 1340{ 1341 struct ipw_priv *priv = dev_get_drvdata(d); 1342 1343 kfree(priv->error); 1344 priv->error = NULL; 1345 return count; 1346} 1347 1348static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error); 1349 1350static ssize_t show_cmd_log(struct device *d, 1351 struct device_attribute *attr, char *buf) 1352{ 1353 struct ipw_priv *priv = dev_get_drvdata(d); 1354 u32 len = 0, i; 1355 if (!priv->cmdlog) 1356 return 0; 1357 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len; 1358 (i != priv->cmdlog_pos) && (PAGE_SIZE - len); 1359 i = (i + 1) % priv->cmdlog_len) { 1360 len += 1361 snprintf(buf + len, PAGE_SIZE - len, 1362 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies, 1363 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd, 1364 priv->cmdlog[i].cmd.len); 1365 len += 1366 snprintk_buf(buf + len, PAGE_SIZE - len, 1367 (u8 *) priv->cmdlog[i].cmd.param, 1368 priv->cmdlog[i].cmd.len); 1369 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1370 } 1371 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1372 return len; 1373} 1374 1375static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL); 1376 1377#ifdef CONFIG_IPW2200_PROMISCUOUS 1378static void ipw_prom_free(struct ipw_priv *priv); 1379static int ipw_prom_alloc(struct ipw_priv *priv); 1380static ssize_t store_rtap_iface(struct device *d, 1381 struct device_attribute *attr, 1382 const char *buf, size_t count) 1383{ 1384 struct ipw_priv *priv = dev_get_drvdata(d); 1385 int rc = 0; 1386 1387 if (count < 1) 1388 return -EINVAL; 1389 1390 switch (buf[0]) { 1391 case '0': 1392 if (!rtap_iface) 1393 return count; 1394 1395 if (netif_running(priv->prom_net_dev)) { 1396 IPW_WARNING("Interface is up. Cannot unregister.\n"); 1397 return count; 1398 } 1399 1400 ipw_prom_free(priv); 1401 rtap_iface = 0; 1402 break; 1403 1404 case '1': 1405 if (rtap_iface) 1406 return count; 1407 1408 rc = ipw_prom_alloc(priv); 1409 if (!rc) 1410 rtap_iface = 1; 1411 break; 1412 1413 default: 1414 return -EINVAL; 1415 } 1416 1417 if (rc) { 1418 IPW_ERROR("Failed to register promiscuous network " 1419 "device (error %d).\n", rc); 1420 } 1421 1422 return count; 1423} 1424 1425static ssize_t show_rtap_iface(struct device *d, 1426 struct device_attribute *attr, 1427 char *buf) 1428{ 1429 struct ipw_priv *priv = dev_get_drvdata(d); 1430 if (rtap_iface) 1431 return sprintf(buf, "%s", priv->prom_net_dev->name); 1432 else { 1433 buf[0] = '-'; 1434 buf[1] = '1'; 1435 buf[2] = '\0'; 1436 return 3; 1437 } 1438} 1439 1440static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface, 1441 store_rtap_iface); 1442 1443static ssize_t store_rtap_filter(struct device *d, 1444 struct device_attribute *attr, 1445 const char *buf, size_t count) 1446{ 1447 struct ipw_priv *priv = dev_get_drvdata(d); 1448 1449 if (!priv->prom_priv) { 1450 IPW_ERROR("Attempting to set filter without " 1451 "rtap_iface enabled.\n"); 1452 return -EPERM; 1453 } 1454 1455 priv->prom_priv->filter = simple_strtol(buf, NULL, 0); 1456 1457 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n", 1458 BIT_ARG16(priv->prom_priv->filter)); 1459 1460 return count; 1461} 1462 1463static ssize_t show_rtap_filter(struct device *d, 1464 struct device_attribute *attr, 1465 char *buf) 1466{ 1467 struct ipw_priv *priv = dev_get_drvdata(d); 1468 return sprintf(buf, "0x%04X", 1469 priv->prom_priv ? priv->prom_priv->filter : 0); 1470} 1471 1472static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter, 1473 store_rtap_filter); 1474#endif 1475 1476static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, 1477 char *buf) 1478{ 1479 struct ipw_priv *priv = dev_get_drvdata(d); 1480 return sprintf(buf, "%d\n", priv->ieee->scan_age); 1481} 1482 1483static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, 1484 const char *buf, size_t count) 1485{ 1486 struct ipw_priv *priv = dev_get_drvdata(d); 1487 struct net_device *dev = priv->net_dev; 1488 char buffer[] = "00000000"; 1489 unsigned long len = 1490 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; 1491 unsigned long val; 1492 char *p = buffer; 1493 1494 IPW_DEBUG_INFO("enter\n"); 1495 1496 strncpy(buffer, buf, len); 1497 buffer[len] = 0; 1498 1499 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { 1500 p++; 1501 if (p[0] == 'x' || p[0] == 'X') 1502 p++; 1503 val = simple_strtoul(p, &p, 16); 1504 } else 1505 val = simple_strtoul(p, &p, 10); 1506 if (p == buffer) { 1507 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); 1508 } else { 1509 priv->ieee->scan_age = val; 1510 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age); 1511 } 1512 1513 IPW_DEBUG_INFO("exit\n"); 1514 return len; 1515} 1516 1517static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); 1518 1519static ssize_t show_led(struct device *d, struct device_attribute *attr, 1520 char *buf) 1521{ 1522 struct ipw_priv *priv = dev_get_drvdata(d); 1523 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1); 1524} 1525 1526static ssize_t store_led(struct device *d, struct device_attribute *attr, 1527 const char *buf, size_t count) 1528{ 1529 struct ipw_priv *priv = dev_get_drvdata(d); 1530 1531 IPW_DEBUG_INFO("enter\n"); 1532 1533 if (count == 0) 1534 return 0; 1535 1536 if (*buf == 0) { 1537 IPW_DEBUG_LED("Disabling LED control.\n"); 1538 priv->config |= CFG_NO_LED; 1539 ipw_led_shutdown(priv); 1540 } else { 1541 IPW_DEBUG_LED("Enabling LED control.\n"); 1542 priv->config &= ~CFG_NO_LED; 1543 ipw_led_init(priv); 1544 } 1545 1546 IPW_DEBUG_INFO("exit\n"); 1547 return count; 1548} 1549 1550static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led); 1551 1552static ssize_t show_status(struct device *d, 1553 struct device_attribute *attr, char *buf) 1554{ 1555 struct ipw_priv *p = dev_get_drvdata(d); 1556 return sprintf(buf, "0x%08x\n", (int)p->status); 1557} 1558 1559static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 1560 1561static ssize_t show_cfg(struct device *d, struct device_attribute *attr, 1562 char *buf) 1563{ 1564 struct ipw_priv *p = dev_get_drvdata(d); 1565 return sprintf(buf, "0x%08x\n", (int)p->config); 1566} 1567 1568static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); 1569 1570static ssize_t show_nic_type(struct device *d, 1571 struct device_attribute *attr, char *buf) 1572{ 1573 struct ipw_priv *priv = dev_get_drvdata(d); 1574 return sprintf(buf, "TYPE: %d\n", priv->nic_type); 1575} 1576 1577static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL); 1578 1579static ssize_t show_ucode_version(struct device *d, 1580 struct device_attribute *attr, char *buf) 1581{ 1582 u32 len = sizeof(u32), tmp = 0; 1583 struct ipw_priv *p = dev_get_drvdata(d); 1584 1585 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len)) 1586 return 0; 1587 1588 return sprintf(buf, "0x%08x\n", tmp); 1589} 1590 1591static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL); 1592 1593static ssize_t show_rtc(struct device *d, struct device_attribute *attr, 1594 char *buf) 1595{ 1596 u32 len = sizeof(u32), tmp = 0; 1597 struct ipw_priv *p = dev_get_drvdata(d); 1598 1599 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len)) 1600 return 0; 1601 1602 return sprintf(buf, "0x%08x\n", tmp); 1603} 1604 1605static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL); 1606 1607/* 1608 * Add a device attribute to view/control the delay between eeprom 1609 * operations. 1610 */ 1611static ssize_t show_eeprom_delay(struct device *d, 1612 struct device_attribute *attr, char *buf) 1613{ 1614 struct ipw_priv *p = dev_get_drvdata(d); 1615 int n = p->eeprom_delay; 1616 return sprintf(buf, "%i\n", n); 1617} 1618static ssize_t store_eeprom_delay(struct device *d, 1619 struct device_attribute *attr, 1620 const char *buf, size_t count) 1621{ 1622 struct ipw_priv *p = dev_get_drvdata(d); 1623 sscanf(buf, "%i", &p->eeprom_delay); 1624 return strnlen(buf, count); 1625} 1626 1627static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO, 1628 show_eeprom_delay, store_eeprom_delay); 1629 1630static ssize_t show_command_event_reg(struct device *d, 1631 struct device_attribute *attr, char *buf) 1632{ 1633 u32 reg = 0; 1634 struct ipw_priv *p = dev_get_drvdata(d); 1635 1636 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT); 1637 return sprintf(buf, "0x%08x\n", reg); 1638} 1639static ssize_t store_command_event_reg(struct device *d, 1640 struct device_attribute *attr, 1641 const char *buf, size_t count) 1642{ 1643 u32 reg; 1644 struct ipw_priv *p = dev_get_drvdata(d); 1645 1646 sscanf(buf, "%x", ®); 1647 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg); 1648 return strnlen(buf, count); 1649} 1650 1651static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO, 1652 show_command_event_reg, store_command_event_reg); 1653 1654static ssize_t show_mem_gpio_reg(struct device *d, 1655 struct device_attribute *attr, char *buf) 1656{ 1657 u32 reg = 0; 1658 struct ipw_priv *p = dev_get_drvdata(d); 1659 1660 reg = ipw_read_reg32(p, 0x301100); 1661 return sprintf(buf, "0x%08x\n", reg); 1662} 1663static ssize_t store_mem_gpio_reg(struct device *d, 1664 struct device_attribute *attr, 1665 const char *buf, size_t count) 1666{ 1667 u32 reg; 1668 struct ipw_priv *p = dev_get_drvdata(d); 1669 1670 sscanf(buf, "%x", ®); 1671 ipw_write_reg32(p, 0x301100, reg); 1672 return strnlen(buf, count); 1673} 1674 1675static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO, 1676 show_mem_gpio_reg, store_mem_gpio_reg); 1677 1678static ssize_t show_indirect_dword(struct device *d, 1679 struct device_attribute *attr, char *buf) 1680{ 1681 u32 reg = 0; 1682 struct ipw_priv *priv = dev_get_drvdata(d); 1683 1684 if (priv->status & STATUS_INDIRECT_DWORD) 1685 reg = ipw_read_reg32(priv, priv->indirect_dword); 1686 else 1687 reg = 0; 1688 1689 return sprintf(buf, "0x%08x\n", reg); 1690} 1691static ssize_t store_indirect_dword(struct device *d, 1692 struct device_attribute *attr, 1693 const char *buf, size_t count) 1694{ 1695 struct ipw_priv *priv = dev_get_drvdata(d); 1696 1697 sscanf(buf, "%x", &priv->indirect_dword); 1698 priv->status |= STATUS_INDIRECT_DWORD; 1699 return strnlen(buf, count); 1700} 1701 1702static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO, 1703 show_indirect_dword, store_indirect_dword); 1704 1705static ssize_t show_indirect_byte(struct device *d, 1706 struct device_attribute *attr, char *buf) 1707{ 1708 u8 reg = 0; 1709 struct ipw_priv *priv = dev_get_drvdata(d); 1710 1711 if (priv->status & STATUS_INDIRECT_BYTE) 1712 reg = ipw_read_reg8(priv, priv->indirect_byte); 1713 else 1714 reg = 0; 1715 1716 return sprintf(buf, "0x%02x\n", reg); 1717} 1718static ssize_t store_indirect_byte(struct device *d, 1719 struct device_attribute *attr, 1720 const char *buf, size_t count) 1721{ 1722 struct ipw_priv *priv = dev_get_drvdata(d); 1723 1724 sscanf(buf, "%x", &priv->indirect_byte); 1725 priv->status |= STATUS_INDIRECT_BYTE; 1726 return strnlen(buf, count); 1727} 1728 1729static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO, 1730 show_indirect_byte, store_indirect_byte); 1731 1732static ssize_t show_direct_dword(struct device *d, 1733 struct device_attribute *attr, char *buf) 1734{ 1735 u32 reg = 0; 1736 struct ipw_priv *priv = dev_get_drvdata(d); 1737 1738 if (priv->status & STATUS_DIRECT_DWORD) 1739 reg = ipw_read32(priv, priv->direct_dword); 1740 else 1741 reg = 0; 1742 1743 return sprintf(buf, "0x%08x\n", reg); 1744} 1745static ssize_t store_direct_dword(struct device *d, 1746 struct device_attribute *attr, 1747 const char *buf, size_t count) 1748{ 1749 struct ipw_priv *priv = dev_get_drvdata(d); 1750 1751 sscanf(buf, "%x", &priv->direct_dword); 1752 priv->status |= STATUS_DIRECT_DWORD; 1753 return strnlen(buf, count); 1754} 1755 1756static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO, 1757 show_direct_dword, store_direct_dword); 1758 1759static int rf_kill_active(struct ipw_priv *priv) 1760{ 1761 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) { 1762 priv->status |= STATUS_RF_KILL_HW; 1763 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); 1764 } else { 1765 priv->status &= ~STATUS_RF_KILL_HW; 1766 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false); 1767 } 1768 1769 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0; 1770} 1771 1772static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, 1773 char *buf) 1774{ 1775 /* 0 - RF kill not enabled 1776 1 - SW based RF kill active (sysfs) 1777 2 - HW based RF kill active 1778 3 - Both HW and SW baed RF kill active */ 1779 struct ipw_priv *priv = dev_get_drvdata(d); 1780 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | 1781 (rf_kill_active(priv) ? 0x2 : 0x0); 1782 return sprintf(buf, "%i\n", val); 1783} 1784 1785static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) 1786{ 1787 if ((disable_radio ? 1 : 0) == 1788 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0)) 1789 return 0; 1790 1791 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", 1792 disable_radio ? "OFF" : "ON"); 1793 1794 if (disable_radio) { 1795 priv->status |= STATUS_RF_KILL_SW; 1796 1797 if (priv->workqueue) { 1798 cancel_delayed_work(&priv->request_scan); 1799 cancel_delayed_work(&priv->request_direct_scan); 1800 cancel_delayed_work(&priv->request_passive_scan); 1801 cancel_delayed_work(&priv->scan_event); 1802 } 1803 queue_work(priv->workqueue, &priv->down); 1804 } else { 1805 priv->status &= ~STATUS_RF_KILL_SW; 1806 if (rf_kill_active(priv)) { 1807 IPW_DEBUG_RF_KILL("Can not turn radio back on - " 1808 "disabled by HW switch\n"); 1809 /* Make sure the RF_KILL check timer is running */ 1810 cancel_delayed_work(&priv->rf_kill); 1811 queue_delayed_work(priv->workqueue, &priv->rf_kill, 1812 round_jiffies_relative(2 * HZ)); 1813 } else 1814 queue_work(priv->workqueue, &priv->up); 1815 } 1816 1817 return 1; 1818} 1819 1820static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, 1821 const char *buf, size_t count) 1822{ 1823 struct ipw_priv *priv = dev_get_drvdata(d); 1824 1825 ipw_radio_kill_sw(priv, buf[0] == '1'); 1826 1827 return count; 1828} 1829 1830static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); 1831 1832static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr, 1833 char *buf) 1834{ 1835 struct ipw_priv *priv = dev_get_drvdata(d); 1836 int pos = 0, len = 0; 1837 if (priv->config & CFG_SPEED_SCAN) { 1838 while (priv->speed_scan[pos] != 0) 1839 len += sprintf(&buf[len], "%d ", 1840 priv->speed_scan[pos++]); 1841 return len + sprintf(&buf[len], "\n"); 1842 } 1843 1844 return sprintf(buf, "0\n"); 1845} 1846 1847static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, 1848 const char *buf, size_t count) 1849{ 1850 struct ipw_priv *priv = dev_get_drvdata(d); 1851 int channel, pos = 0; 1852 const char *p = buf; 1853 1854 /* list of space separated channels to scan, optionally ending with 0 */ 1855 while ((channel = simple_strtol(p, NULL, 0))) { 1856 if (pos == MAX_SPEED_SCAN - 1) { 1857 priv->speed_scan[pos] = 0; 1858 break; 1859 } 1860 1861 if (libipw_is_valid_channel(priv->ieee, channel)) 1862 priv->speed_scan[pos++] = channel; 1863 else 1864 IPW_WARNING("Skipping invalid channel request: %d\n", 1865 channel); 1866 p = strchr(p, ' '); 1867 if (!p) 1868 break; 1869 while (*p == ' ' || *p == '\t') 1870 p++; 1871 } 1872 1873 if (pos == 0) 1874 priv->config &= ~CFG_SPEED_SCAN; 1875 else { 1876 priv->speed_scan_pos = 0; 1877 priv->config |= CFG_SPEED_SCAN; 1878 } 1879 1880 return count; 1881} 1882 1883static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan, 1884 store_speed_scan); 1885 1886static ssize_t show_net_stats(struct device *d, struct device_attribute *attr, 1887 char *buf) 1888{ 1889 struct ipw_priv *priv = dev_get_drvdata(d); 1890 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0'); 1891} 1892 1893static ssize_t store_net_stats(struct device *d, struct device_attribute *attr, 1894 const char *buf, size_t count) 1895{ 1896 struct ipw_priv *priv = dev_get_drvdata(d); 1897 if (buf[0] == '1') 1898 priv->config |= CFG_NET_STATS; 1899 else 1900 priv->config &= ~CFG_NET_STATS; 1901 1902 return count; 1903} 1904 1905static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO, 1906 show_net_stats, store_net_stats); 1907 1908static ssize_t show_channels(struct device *d, 1909 struct device_attribute *attr, 1910 char *buf) 1911{ 1912 struct ipw_priv *priv = dev_get_drvdata(d); 1913 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 1914 int len = 0, i; 1915 1916 len = sprintf(&buf[len], 1917 "Displaying %d channels in 2.4Ghz band " 1918 "(802.11bg):\n", geo->bg_channels); 1919 1920 for (i = 0; i < geo->bg_channels; i++) { 1921 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n", 1922 geo->bg[i].channel, 1923 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ? 1924 " (radar spectrum)" : "", 1925 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) || 1926 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)) 1927 ? "" : ", IBSS", 1928 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ? 1929 "passive only" : "active/passive", 1930 geo->bg[i].flags & LIBIPW_CH_B_ONLY ? 1931 "B" : "B/G"); 1932 } 1933 1934 len += sprintf(&buf[len], 1935 "Displaying %d channels in 5.2Ghz band " 1936 "(802.11a):\n", geo->a_channels); 1937 for (i = 0; i < geo->a_channels; i++) { 1938 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n", 1939 geo->a[i].channel, 1940 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ? 1941 " (radar spectrum)" : "", 1942 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) || 1943 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)) 1944 ? "" : ", IBSS", 1945 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ? 1946 "passive only" : "active/passive"); 1947 } 1948 1949 return len; 1950} 1951 1952static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 1953 1954static void notify_wx_assoc_event(struct ipw_priv *priv) 1955{ 1956 union iwreq_data wrqu; 1957 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1958 if (priv->status & STATUS_ASSOCIATED) 1959 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); 1960 else 1961 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); 1962 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); 1963} 1964 1965static void ipw_irq_tasklet(struct ipw_priv *priv) 1966{ 1967 u32 inta, inta_mask, handled = 0; 1968 unsigned long flags; 1969 int rc = 0; 1970 1971 spin_lock_irqsave(&priv->irq_lock, flags); 1972 1973 inta = ipw_read32(priv, IPW_INTA_RW); 1974 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 1975 inta &= (IPW_INTA_MASK_ALL & inta_mask); 1976 1977 /* Add any cached INTA values that need to be handled */ 1978 inta |= priv->isr_inta; 1979 1980 spin_unlock_irqrestore(&priv->irq_lock, flags); 1981 1982 spin_lock_irqsave(&priv->lock, flags); 1983 1984 /* handle all the justifications for the interrupt */ 1985 if (inta & IPW_INTA_BIT_RX_TRANSFER) { 1986 ipw_rx(priv); 1987 handled |= IPW_INTA_BIT_RX_TRANSFER; 1988 } 1989 1990 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) { 1991 IPW_DEBUG_HC("Command completed.\n"); 1992 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1); 1993 priv->status &= ~STATUS_HCMD_ACTIVE; 1994 wake_up_interruptible(&priv->wait_command_queue); 1995 handled |= IPW_INTA_BIT_TX_CMD_QUEUE; 1996 } 1997 1998 if (inta & IPW_INTA_BIT_TX_QUEUE_1) { 1999 IPW_DEBUG_TX("TX_QUEUE_1\n"); 2000 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0); 2001 handled |= IPW_INTA_BIT_TX_QUEUE_1; 2002 } 2003 2004 if (inta & IPW_INTA_BIT_TX_QUEUE_2) { 2005 IPW_DEBUG_TX("TX_QUEUE_2\n"); 2006 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1); 2007 handled |= IPW_INTA_BIT_TX_QUEUE_2; 2008 } 2009 2010 if (inta & IPW_INTA_BIT_TX_QUEUE_3) { 2011 IPW_DEBUG_TX("TX_QUEUE_3\n"); 2012 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2); 2013 handled |= IPW_INTA_BIT_TX_QUEUE_3; 2014 } 2015 2016 if (inta & IPW_INTA_BIT_TX_QUEUE_4) { 2017 IPW_DEBUG_TX("TX_QUEUE_4\n"); 2018 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3); 2019 handled |= IPW_INTA_BIT_TX_QUEUE_4; 2020 } 2021 2022 if (inta & IPW_INTA_BIT_STATUS_CHANGE) { 2023 IPW_WARNING("STATUS_CHANGE\n"); 2024 handled |= IPW_INTA_BIT_STATUS_CHANGE; 2025 } 2026 2027 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) { 2028 IPW_WARNING("TX_PERIOD_EXPIRED\n"); 2029 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED; 2030 } 2031 2032 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) { 2033 IPW_WARNING("HOST_CMD_DONE\n"); 2034 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE; 2035 } 2036 2037 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) { 2038 IPW_WARNING("FW_INITIALIZATION_DONE\n"); 2039 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE; 2040 } 2041 2042 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) { 2043 IPW_WARNING("PHY_OFF_DONE\n"); 2044 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE; 2045 } 2046 2047 if (inta & IPW_INTA_BIT_RF_KILL_DONE) { 2048 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n"); 2049 priv->status |= STATUS_RF_KILL_HW; 2050 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); 2051 wake_up_interruptible(&priv->wait_command_queue); 2052 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); 2053 cancel_delayed_work(&priv->request_scan); 2054 cancel_delayed_work(&priv->request_direct_scan); 2055 cancel_delayed_work(&priv->request_passive_scan); 2056 cancel_delayed_work(&priv->scan_event); 2057 schedule_work(&priv->link_down); 2058 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); 2059 handled |= IPW_INTA_BIT_RF_KILL_DONE; 2060 } 2061 2062 if (inta & IPW_INTA_BIT_FATAL_ERROR) { 2063 IPW_WARNING("Firmware error detected. Restarting.\n"); 2064 if (priv->error) { 2065 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); 2066 if (ipw_debug_level & IPW_DL_FW_ERRORS) { 2067 struct ipw_fw_error *error = 2068 ipw_alloc_error_log(priv); 2069 ipw_dump_error_log(priv, error); 2070 kfree(error); 2071 } 2072 } else { 2073 priv->error = ipw_alloc_error_log(priv); 2074 if (priv->error) 2075 IPW_DEBUG_FW("Sysfs 'error' log captured.\n"); 2076 else 2077 IPW_DEBUG_FW("Error allocating sysfs 'error' " 2078 "log.\n"); 2079 if (ipw_debug_level & IPW_DL_FW_ERRORS) 2080 ipw_dump_error_log(priv, priv->error); 2081 } 2082 2083 /* XXX: If hardware encryption is for WPA/WPA2, 2084 * we have to notify the supplicant. */ 2085 if (priv->ieee->sec.encrypt) { 2086 priv->status &= ~STATUS_ASSOCIATED; 2087 notify_wx_assoc_event(priv); 2088 } 2089 2090 /* Keep the restart process from trying to send host 2091 * commands by clearing the INIT status bit */ 2092 priv->status &= ~STATUS_INIT; 2093 2094 /* Cancel currently queued command. */ 2095 priv->status &= ~STATUS_HCMD_ACTIVE; 2096 wake_up_interruptible(&priv->wait_command_queue); 2097 2098 queue_work(priv->workqueue, &priv->adapter_restart); 2099 handled |= IPW_INTA_BIT_FATAL_ERROR; 2100 } 2101 2102 if (inta & IPW_INTA_BIT_PARITY_ERROR) { 2103 IPW_ERROR("Parity error\n"); 2104 handled |= IPW_INTA_BIT_PARITY_ERROR; 2105 } 2106 2107 if (handled != inta) { 2108 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); 2109 } 2110 2111 spin_unlock_irqrestore(&priv->lock, flags); 2112 2113 /* enable all interrupts */ 2114 ipw_enable_interrupts(priv); 2115} 2116 2117#define IPW_CMD(x) case IPW_CMD_ ## x : return #x 2118static char *get_cmd_string(u8 cmd) 2119{ 2120 switch (cmd) { 2121 IPW_CMD(HOST_COMPLETE); 2122 IPW_CMD(POWER_DOWN); 2123 IPW_CMD(SYSTEM_CONFIG); 2124 IPW_CMD(MULTICAST_ADDRESS); 2125 IPW_CMD(SSID); 2126 IPW_CMD(ADAPTER_ADDRESS); 2127 IPW_CMD(PORT_TYPE); 2128 IPW_CMD(RTS_THRESHOLD); 2129 IPW_CMD(FRAG_THRESHOLD); 2130 IPW_CMD(POWER_MODE); 2131 IPW_CMD(WEP_KEY); 2132 IPW_CMD(TGI_TX_KEY); 2133 IPW_CMD(SCAN_REQUEST); 2134 IPW_CMD(SCAN_REQUEST_EXT); 2135 IPW_CMD(ASSOCIATE); 2136 IPW_CMD(SUPPORTED_RATES); 2137 IPW_CMD(SCAN_ABORT); 2138 IPW_CMD(TX_FLUSH); 2139 IPW_CMD(QOS_PARAMETERS); 2140 IPW_CMD(DINO_CONFIG); 2141 IPW_CMD(RSN_CAPABILITIES); 2142 IPW_CMD(RX_KEY); 2143 IPW_CMD(CARD_DISABLE); 2144 IPW_CMD(SEED_NUMBER); 2145 IPW_CMD(TX_POWER); 2146 IPW_CMD(COUNTRY_INFO); 2147 IPW_CMD(AIRONET_INFO); 2148 IPW_CMD(AP_TX_POWER); 2149 IPW_CMD(CCKM_INFO); 2150 IPW_CMD(CCX_VER_INFO); 2151 IPW_CMD(SET_CALIBRATION); 2152 IPW_CMD(SENSITIVITY_CALIB); 2153 IPW_CMD(RETRY_LIMIT); 2154 IPW_CMD(IPW_PRE_POWER_DOWN); 2155 IPW_CMD(VAP_BEACON_TEMPLATE); 2156 IPW_CMD(VAP_DTIM_PERIOD); 2157 IPW_CMD(EXT_SUPPORTED_RATES); 2158 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT); 2159 IPW_CMD(VAP_QUIET_INTERVALS); 2160 IPW_CMD(VAP_CHANNEL_SWITCH); 2161 IPW_CMD(VAP_MANDATORY_CHANNELS); 2162 IPW_CMD(VAP_CELL_PWR_LIMIT); 2163 IPW_CMD(VAP_CF_PARAM_SET); 2164 IPW_CMD(VAP_SET_BEACONING_STATE); 2165 IPW_CMD(MEASUREMENT); 2166 IPW_CMD(POWER_CAPABILITY); 2167 IPW_CMD(SUPPORTED_CHANNELS); 2168 IPW_CMD(TPC_REPORT); 2169 IPW_CMD(WME_INFO); 2170 IPW_CMD(PRODUCTION_COMMAND); 2171 default: 2172 return "UNKNOWN"; 2173 } 2174} 2175 2176#define HOST_COMPLETE_TIMEOUT HZ 2177 2178static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) 2179{ 2180 int rc = 0; 2181 unsigned long flags; 2182 2183 spin_lock_irqsave(&priv->lock, flags); 2184 if (priv->status & STATUS_HCMD_ACTIVE) { 2185 IPW_ERROR("Failed to send %s: Already sending a command.\n", 2186 get_cmd_string(cmd->cmd)); 2187 spin_unlock_irqrestore(&priv->lock, flags); 2188 return -EAGAIN; 2189 } 2190 2191 priv->status |= STATUS_HCMD_ACTIVE; 2192 2193 if (priv->cmdlog) { 2194 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies; 2195 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd; 2196 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len; 2197 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param, 2198 cmd->len); 2199 priv->cmdlog[priv->cmdlog_pos].retcode = -1; 2200 } 2201 2202 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n", 2203 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len, 2204 priv->status); 2205 2206#ifndef DEBUG_CMD_WEP_KEY 2207 if (cmd->cmd == IPW_CMD_WEP_KEY) 2208 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n"); 2209 else 2210#endif 2211 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len); 2212 2213 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0); 2214 if (rc) { 2215 priv->status &= ~STATUS_HCMD_ACTIVE; 2216 IPW_ERROR("Failed to send %s: Reason %d\n", 2217 get_cmd_string(cmd->cmd), rc); 2218 spin_unlock_irqrestore(&priv->lock, flags); 2219 goto exit; 2220 } 2221 spin_unlock_irqrestore(&priv->lock, flags); 2222 2223 rc = wait_event_interruptible_timeout(priv->wait_command_queue, 2224 !(priv-> 2225 status & STATUS_HCMD_ACTIVE), 2226 HOST_COMPLETE_TIMEOUT); 2227 if (rc == 0) { 2228 spin_lock_irqsave(&priv->lock, flags); 2229 if (priv->status & STATUS_HCMD_ACTIVE) { 2230 IPW_ERROR("Failed to send %s: Command timed out.\n", 2231 get_cmd_string(cmd->cmd)); 2232 priv->status &= ~STATUS_HCMD_ACTIVE; 2233 spin_unlock_irqrestore(&priv->lock, flags); 2234 rc = -EIO; 2235 goto exit; 2236 } 2237 spin_unlock_irqrestore(&priv->lock, flags); 2238 } else 2239 rc = 0; 2240 2241 if (priv->status & STATUS_RF_KILL_HW) { 2242 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n", 2243 get_cmd_string(cmd->cmd)); 2244 rc = -EIO; 2245 goto exit; 2246 } 2247 2248 exit: 2249 if (priv->cmdlog) { 2250 priv->cmdlog[priv->cmdlog_pos++].retcode = rc; 2251 priv->cmdlog_pos %= priv->cmdlog_len; 2252 } 2253 return rc; 2254} 2255 2256static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command) 2257{ 2258 struct host_cmd cmd = { 2259 .cmd = command, 2260 }; 2261 2262 return __ipw_send_cmd(priv, &cmd); 2263} 2264 2265static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len, 2266 void *data) 2267{ 2268 struct host_cmd cmd = { 2269 .cmd = command, 2270 .len = len, 2271 .param = data, 2272 }; 2273 2274 return __ipw_send_cmd(priv, &cmd); 2275} 2276 2277static int ipw_send_host_complete(struct ipw_priv *priv) 2278{ 2279 if (!priv) { 2280 IPW_ERROR("Invalid args\n"); 2281 return -1; 2282 } 2283 2284 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE); 2285} 2286 2287static int ipw_send_system_config(struct ipw_priv *priv) 2288{ 2289 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, 2290 sizeof(priv->sys_config), 2291 &priv->sys_config); 2292} 2293 2294static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len) 2295{ 2296 if (!priv || !ssid) { 2297 IPW_ERROR("Invalid args\n"); 2298 return -1; 2299 } 2300 2301 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE), 2302 ssid); 2303} 2304 2305static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) 2306{ 2307 if (!priv || !mac) { 2308 IPW_ERROR("Invalid args\n"); 2309 return -1; 2310 } 2311 2312 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n", 2313 priv->net_dev->name, mac); 2314 2315 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); 2316} 2317 2318/* 2319 * NOTE: This must be executed from our workqueue as it results in udelay 2320 * being called which may corrupt the keyboard if executed on default 2321 * workqueue 2322 */ 2323static void ipw_adapter_restart(void *adapter) 2324{ 2325 struct ipw_priv *priv = adapter; 2326 2327 if (priv->status & STATUS_RF_KILL_MASK) 2328 return; 2329 2330 ipw_down(priv); 2331 2332 if (priv->assoc_network && 2333 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS)) 2334 ipw_remove_current_network(priv); 2335 2336 if (ipw_up(priv)) { 2337 IPW_ERROR("Failed to up device\n"); 2338 return; 2339 } 2340} 2341 2342static void ipw_bg_adapter_restart(struct work_struct *work) 2343{ 2344 struct ipw_priv *priv = 2345 container_of(work, struct ipw_priv, adapter_restart); 2346 mutex_lock(&priv->mutex); 2347 ipw_adapter_restart(priv); 2348 mutex_unlock(&priv->mutex); 2349} 2350 2351#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) 2352 2353static void ipw_scan_check(void *data) 2354{ 2355 struct ipw_priv *priv = data; 2356 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { 2357 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2358 "adapter after (%dms).\n", 2359 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2360 queue_work(priv->workqueue, &priv->adapter_restart); 2361 } 2362} 2363 2364static void ipw_bg_scan_check(struct work_struct *work) 2365{ 2366 struct ipw_priv *priv = 2367 container_of(work, struct ipw_priv, scan_check.work); 2368 mutex_lock(&priv->mutex); 2369 ipw_scan_check(priv); 2370 mutex_unlock(&priv->mutex); 2371} 2372 2373static int ipw_send_scan_request_ext(struct ipw_priv *priv, 2374 struct ipw_scan_request_ext *request) 2375{ 2376 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT, 2377 sizeof(*request), request); 2378} 2379 2380static int ipw_send_scan_abort(struct ipw_priv *priv) 2381{ 2382 if (!priv) { 2383 IPW_ERROR("Invalid args\n"); 2384 return -1; 2385 } 2386 2387 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT); 2388} 2389 2390static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) 2391{ 2392 struct ipw_sensitivity_calib calib = { 2393 .beacon_rssi_raw = cpu_to_le16(sens), 2394 }; 2395 2396 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), 2397 &calib); 2398} 2399 2400static int ipw_send_associate(struct ipw_priv *priv, 2401 struct ipw_associate *associate) 2402{ 2403 if (!priv || !associate) { 2404 IPW_ERROR("Invalid args\n"); 2405 return -1; 2406 } 2407 2408 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate), 2409 associate); 2410} 2411 2412static int ipw_send_supported_rates(struct ipw_priv *priv, 2413 struct ipw_supported_rates *rates) 2414{ 2415 if (!priv || !rates) { 2416 IPW_ERROR("Invalid args\n"); 2417 return -1; 2418 } 2419 2420 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates), 2421 rates); 2422} 2423 2424static int ipw_set_random_seed(struct ipw_priv *priv) 2425{ 2426 u32 val; 2427 2428 if (!priv) { 2429 IPW_ERROR("Invalid args\n"); 2430 return -1; 2431 } 2432 2433 get_random_bytes(&val, sizeof(val)); 2434 2435 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val); 2436} 2437 2438static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off) 2439{ 2440 __le32 v = cpu_to_le32(phy_off); 2441 if (!priv) { 2442 IPW_ERROR("Invalid args\n"); 2443 return -1; 2444 } 2445 2446 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v); 2447} 2448 2449static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power) 2450{ 2451 if (!priv || !power) { 2452 IPW_ERROR("Invalid args\n"); 2453 return -1; 2454 } 2455 2456 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power); 2457} 2458 2459static int ipw_set_tx_power(struct ipw_priv *priv) 2460{ 2461 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 2462 struct ipw_tx_power tx_power; 2463 s8 max_power; 2464 int i; 2465 2466 memset(&tx_power, 0, sizeof(tx_power)); 2467 2468 /* configure device for 'G' band */ 2469 tx_power.ieee_mode = IPW_G_MODE; 2470 tx_power.num_channels = geo->bg_channels; 2471 for (i = 0; i < geo->bg_channels; i++) { 2472 max_power = geo->bg[i].max_power; 2473 tx_power.channels_tx_power[i].channel_number = 2474 geo->bg[i].channel; 2475 tx_power.channels_tx_power[i].tx_power = max_power ? 2476 min(max_power, priv->tx_power) : priv->tx_power; 2477 } 2478 if (ipw_send_tx_power(priv, &tx_power)) 2479 return -EIO; 2480 2481 /* configure device to also handle 'B' band */ 2482 tx_power.ieee_mode = IPW_B_MODE; 2483 if (ipw_send_tx_power(priv, &tx_power)) 2484 return -EIO; 2485 2486 /* configure device to also handle 'A' band */ 2487 if (priv->ieee->abg_true) { 2488 tx_power.ieee_mode = IPW_A_MODE; 2489 tx_power.num_channels = geo->a_channels; 2490 for (i = 0; i < tx_power.num_channels; i++) { 2491 max_power = geo->a[i].max_power; 2492 tx_power.channels_tx_power[i].channel_number = 2493 geo->a[i].channel; 2494 tx_power.channels_tx_power[i].tx_power = max_power ? 2495 min(max_power, priv->tx_power) : priv->tx_power; 2496 } 2497 if (ipw_send_tx_power(priv, &tx_power)) 2498 return -EIO; 2499 } 2500 return 0; 2501} 2502 2503static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) 2504{ 2505 struct ipw_rts_threshold rts_threshold = { 2506 .rts_threshold = cpu_to_le16(rts), 2507 }; 2508 2509 if (!priv) { 2510 IPW_ERROR("Invalid args\n"); 2511 return -1; 2512 } 2513 2514 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD, 2515 sizeof(rts_threshold), &rts_threshold); 2516} 2517 2518static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) 2519{ 2520 struct ipw_frag_threshold frag_threshold = { 2521 .frag_threshold = cpu_to_le16(frag), 2522 }; 2523 2524 if (!priv) { 2525 IPW_ERROR("Invalid args\n"); 2526 return -1; 2527 } 2528 2529 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD, 2530 sizeof(frag_threshold), &frag_threshold); 2531} 2532 2533static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode) 2534{ 2535 __le32 param; 2536 2537 if (!priv) { 2538 IPW_ERROR("Invalid args\n"); 2539 return -1; 2540 } 2541 2542 /* If on battery, set to 3, if AC set to CAM, else user 2543 * level */ 2544 switch (mode) { 2545 case IPW_POWER_BATTERY: 2546 param = cpu_to_le32(IPW_POWER_INDEX_3); 2547 break; 2548 case IPW_POWER_AC: 2549 param = cpu_to_le32(IPW_POWER_MODE_CAM); 2550 break; 2551 default: 2552 param = cpu_to_le32(mode); 2553 break; 2554 } 2555 2556 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), 2557 ¶m); 2558} 2559 2560static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit) 2561{ 2562 struct ipw_retry_limit retry_limit = { 2563 .short_retry_limit = slimit, 2564 .long_retry_limit = llimit 2565 }; 2566 2567 if (!priv) { 2568 IPW_ERROR("Invalid args\n"); 2569 return -1; 2570 } 2571 2572 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit), 2573 &retry_limit); 2574} 2575 2576/* 2577 * The IPW device contains a Microwire compatible EEPROM that stores 2578 * various data like the MAC address. Usually the firmware has exclusive 2579 * access to the eeprom, but during device initialization (before the 2580 * device driver has sent the HostComplete command to the firmware) the 2581 * device driver has read access to the EEPROM by way of indirect addressing 2582 * through a couple of memory mapped registers. 2583 * 2584 * The following is a simplified implementation for pulling data out of the 2585 * the eeprom, along with some helper functions to find information in 2586 * the per device private data's copy of the eeprom. 2587 * 2588 * NOTE: To better understand how these functions work (i.e what is a chip 2589 * select and why do have to keep driving the eeprom clock?), read 2590 * just about any data sheet for a Microwire compatible EEPROM. 2591 */ 2592 2593/* write a 32 bit value into the indirect accessor register */ 2594static inline void eeprom_write_reg(struct ipw_priv *p, u32 data) 2595{ 2596 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data); 2597 2598 /* the eeprom requires some time to complete the operation */ 2599 udelay(p->eeprom_delay); 2600 2601 return; 2602} 2603 2604/* perform a chip select operation */ 2605static void eeprom_cs(struct ipw_priv *priv) 2606{ 2607 eeprom_write_reg(priv, 0); 2608 eeprom_write_reg(priv, EEPROM_BIT_CS); 2609 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); 2610 eeprom_write_reg(priv, EEPROM_BIT_CS); 2611} 2612 2613/* perform a chip select operation */ 2614static void eeprom_disable_cs(struct ipw_priv *priv) 2615{ 2616 eeprom_write_reg(priv, EEPROM_BIT_CS); 2617 eeprom_write_reg(priv, 0); 2618 eeprom_write_reg(priv, EEPROM_BIT_SK); 2619} 2620 2621/* push a single bit down to the eeprom */ 2622static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit) 2623{ 2624 int d = (bit ? EEPROM_BIT_DI : 0); 2625 eeprom_write_reg(p, EEPROM_BIT_CS | d); 2626 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK); 2627} 2628 2629/* push an opcode followed by an address down to the eeprom */ 2630static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr) 2631{ 2632 int i; 2633 2634 eeprom_cs(priv); 2635 eeprom_write_bit(priv, 1); 2636 eeprom_write_bit(priv, op & 2); 2637 eeprom_write_bit(priv, op & 1); 2638 for (i = 7; i >= 0; i--) { 2639 eeprom_write_bit(priv, addr & (1 << i)); 2640 } 2641} 2642 2643/* pull 16 bits off the eeprom, one bit at a time */ 2644static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr) 2645{ 2646 int i; 2647 u16 r = 0; 2648 2649 /* Send READ Opcode */ 2650 eeprom_op(priv, EEPROM_CMD_READ, addr); 2651 2652 /* Send dummy bit */ 2653 eeprom_write_reg(priv, EEPROM_BIT_CS); 2654 2655 /* Read the byte off the eeprom one bit at a time */ 2656 for (i = 0; i < 16; i++) { 2657 u32 data = 0; 2658 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK); 2659 eeprom_write_reg(priv, EEPROM_BIT_CS); 2660 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS); 2661 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0); 2662 } 2663 2664 /* Send another dummy bit */ 2665 eeprom_write_reg(priv, 0); 2666 eeprom_disable_cs(priv); 2667 2668 return r; 2669} 2670 2671/* helper function for pulling the mac address out of the private */ 2672/* data's copy of the eeprom data */ 2673static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac) 2674{ 2675 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6); 2676} 2677 2678/* 2679 * Either the device driver (i.e. the host) or the firmware can 2680 * load eeprom data into the designated region in SRAM. If neither 2681 * happens then the FW will shutdown with a fatal error. 2682 * 2683 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE 2684 * bit needs region of shared SRAM needs to be non-zero. 2685 */ 2686static void ipw_eeprom_init_sram(struct ipw_priv *priv) 2687{ 2688 int i; 2689 __le16 *eeprom = (__le16 *) priv->eeprom; 2690 2691 IPW_DEBUG_TRACE(">>\n"); 2692 2693 /* read entire contents of eeprom into private buffer */ 2694 for (i = 0; i < 128; i++) 2695 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i)); 2696 2697 /* 2698 If the data looks correct, then copy it to our private 2699 copy. Otherwise let the firmware know to perform the operation 2700 on its own. 2701 */ 2702 if (priv->eeprom[EEPROM_VERSION] != 0) { 2703 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); 2704 2705 /* write the eeprom data to sram */ 2706 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) 2707 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]); 2708 2709 /* Do not load eeprom data on fatal error or suspend */ 2710 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 2711 } else { 2712 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n"); 2713 2714 /* Load eeprom data on fatal error or suspend */ 2715 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1); 2716 } 2717 2718 IPW_DEBUG_TRACE("<<\n"); 2719} 2720 2721static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) 2722{ 2723 count >>= 2; 2724 if (!count) 2725 return; 2726 _ipw_write32(priv, IPW_AUTOINC_ADDR, start); 2727 while (count--) 2728 _ipw_write32(priv, IPW_AUTOINC_DATA, 0); 2729} 2730 2731static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv) 2732{ 2733 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL, 2734 CB_NUMBER_OF_ELEMENTS_SMALL * 2735 sizeof(struct command_block)); 2736} 2737 2738static int ipw_fw_dma_enable(struct ipw_priv *priv) 2739{ /* start dma engine but no transfers yet */ 2740 2741 IPW_DEBUG_FW(">> : \n"); 2742 2743 /* Start the dma */ 2744 ipw_fw_dma_reset_command_blocks(priv); 2745 2746 /* Write CB base address */ 2747 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL); 2748 2749 IPW_DEBUG_FW("<< : \n"); 2750 return 0; 2751} 2752 2753static void ipw_fw_dma_abort(struct ipw_priv *priv) 2754{ 2755 u32 control = 0; 2756 2757 IPW_DEBUG_FW(">> :\n"); 2758 2759 /* set the Stop and Abort bit */ 2760 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; 2761 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2762 priv->sram_desc.last_cb_index = 0; 2763 2764 IPW_DEBUG_FW("<< \n"); 2765} 2766 2767static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, 2768 struct command_block *cb) 2769{ 2770 u32 address = 2771 IPW_SHARED_SRAM_DMA_CONTROL + 2772 (sizeof(struct command_block) * index); 2773 IPW_DEBUG_FW(">> :\n"); 2774 2775 ipw_write_indirect(priv, address, (u8 *) cb, 2776 (int)sizeof(struct command_block)); 2777 2778 IPW_DEBUG_FW("<< :\n"); 2779 return 0; 2780 2781} 2782 2783static int ipw_fw_dma_kick(struct ipw_priv *priv) 2784{ 2785 u32 control = 0; 2786 u32 index = 0; 2787 2788 IPW_DEBUG_FW(">> :\n"); 2789 2790 for (index = 0; index < priv->sram_desc.last_cb_index; index++) 2791 ipw_fw_dma_write_command_block(priv, index, 2792 &priv->sram_desc.cb_list[index]); 2793 2794 /* Enable the DMA in the CSR register */ 2795 ipw_clear_bit(priv, IPW_RESET_REG, 2796 IPW_RESET_REG_MASTER_DISABLED | 2797 IPW_RESET_REG_STOP_MASTER); 2798 2799 /* Set the Start bit. */ 2800 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START; 2801 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2802 2803 IPW_DEBUG_FW("<< :\n"); 2804 return 0; 2805} 2806 2807static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv) 2808{ 2809 u32 address; 2810 u32 register_value = 0; 2811 u32 cb_fields_address = 0; 2812 2813 IPW_DEBUG_FW(">> :\n"); 2814 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2815 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address); 2816 2817 /* Read the DMA Controlor register */ 2818 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL); 2819 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value); 2820 2821 /* Print the CB values */ 2822 cb_fields_address = address; 2823 register_value = ipw_read_reg32(priv, cb_fields_address); 2824 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value); 2825 2826 cb_fields_address += sizeof(u32); 2827 register_value = ipw_read_reg32(priv, cb_fields_address); 2828 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value); 2829 2830 cb_fields_address += sizeof(u32); 2831 register_value = ipw_read_reg32(priv, cb_fields_address); 2832 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n", 2833 register_value); 2834 2835 cb_fields_address += sizeof(u32); 2836 register_value = ipw_read_reg32(priv, cb_fields_address); 2837 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value); 2838 2839 IPW_DEBUG_FW(">> :\n"); 2840} 2841 2842static int ipw_fw_dma_command_block_index(struct ipw_priv *priv) 2843{ 2844 u32 current_cb_address = 0; 2845 u32 current_cb_index = 0; 2846 2847 IPW_DEBUG_FW("<< :\n"); 2848 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2849 2850 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) / 2851 sizeof(struct command_block); 2852 2853 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n", 2854 current_cb_index, current_cb_address); 2855 2856 IPW_DEBUG_FW(">> :\n"); 2857 return current_cb_index; 2858 2859} 2860 2861static int ipw_fw_dma_add_command_block(struct ipw_priv *priv, 2862 u32 src_address, 2863 u32 dest_address, 2864 u32 length, 2865 int interrupt_enabled, int is_last) 2866{ 2867 2868 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC | 2869 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG | 2870 CB_DEST_SIZE_LONG; 2871 struct command_block *cb; 2872 u32 last_cb_element = 0; 2873 2874 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n", 2875 src_address, dest_address, length); 2876 2877 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL) 2878 return -1; 2879 2880 last_cb_element = priv->sram_desc.last_cb_index; 2881 cb = &priv->sram_desc.cb_list[last_cb_element]; 2882 priv->sram_desc.last_cb_index++; 2883 2884 /* Calculate the new CB control word */ 2885 if (interrupt_enabled) 2886 control |= CB_INT_ENABLED; 2887 2888 if (is_last) 2889 control |= CB_LAST_VALID; 2890 2891 control |= length; 2892 2893 /* Calculate the CB Element's checksum value */ 2894 cb->status = control ^ src_address ^ dest_address; 2895 2896 /* Copy the Source and Destination addresses */ 2897 cb->dest_addr = dest_address; 2898 cb->source_addr = src_address; 2899 2900 /* Copy the Control Word last */ 2901 cb->control = control; 2902 2903 return 0; 2904} 2905 2906static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address, 2907 int nr, u32 dest_address, u32 len) 2908{ 2909 int ret, i; 2910 u32 size; 2911 2912 IPW_DEBUG_FW(">> \n"); 2913 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", 2914 nr, dest_address, len); 2915 2916 for (i = 0; i < nr; i++) { 2917 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH); 2918 ret = ipw_fw_dma_add_command_block(priv, src_address[i], 2919 dest_address + 2920 i * CB_MAX_LENGTH, size, 2921 0, 0); 2922 if (ret) { 2923 IPW_DEBUG_FW_INFO(": Failed\n"); 2924 return -1; 2925 } else 2926 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2927 } 2928 2929 IPW_DEBUG_FW("<< \n"); 2930 return 0; 2931} 2932 2933static int ipw_fw_dma_wait(struct ipw_priv *priv) 2934{ 2935 u32 current_index = 0, previous_index; 2936 u32 watchdog = 0; 2937 2938 IPW_DEBUG_FW(">> : \n"); 2939 2940 current_index = ipw_fw_dma_command_block_index(priv); 2941 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", 2942 (int)priv->sram_desc.last_cb_index); 2943 2944 while (current_index < priv->sram_desc.last_cb_index) { 2945 udelay(50); 2946 previous_index = current_index; 2947 current_index = ipw_fw_dma_command_block_index(priv); 2948 2949 if (previous_index < current_index) { 2950 watchdog = 0; 2951 continue; 2952 } 2953 if (++watchdog > 400) { 2954 IPW_DEBUG_FW_INFO("Timeout\n"); 2955 ipw_fw_dma_dump_command_block(priv); 2956 ipw_fw_dma_abort(priv); 2957 return -1; 2958 } 2959 } 2960 2961 ipw_fw_dma_abort(priv); 2962 2963 /*Disable the DMA in the CSR register */ 2964 ipw_set_bit(priv, IPW_RESET_REG, 2965 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER); 2966 2967 IPW_DEBUG_FW("<< dmaWaitSync \n"); 2968 return 0; 2969} 2970 2971static void ipw_remove_current_network(struct ipw_priv *priv) 2972{ 2973 struct list_head *element, *safe; 2974 struct libipw_network *network = NULL; 2975 unsigned long flags; 2976 2977 spin_lock_irqsave(&priv->ieee->lock, flags); 2978 list_for_each_safe(element, safe, &priv->ieee->network_list) { 2979 network = list_entry(element, struct libipw_network, list); 2980 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 2981 list_del(element); 2982 list_add_tail(&network->list, 2983 &priv->ieee->network_free_list); 2984 } 2985 } 2986 spin_unlock_irqrestore(&priv->ieee->lock, flags); 2987} 2988 2989/** 2990 * Check that card is still alive. 2991 * Reads debug register from domain0. 2992 * If card is present, pre-defined value should 2993 * be found there. 2994 * 2995 * @param priv 2996 * @return 1 if card is present, 0 otherwise 2997 */ 2998static inline int ipw_alive(struct ipw_priv *priv) 2999{ 3000 return ipw_read32(priv, 0x90) == 0xd55555d5; 3001} 3002 3003/* timeout in msec, attempted in 10-msec quanta */ 3004static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask, 3005 int timeout) 3006{ 3007 int i = 0; 3008 3009 do { 3010 if ((ipw_read32(priv, addr) & mask) == mask) 3011 return i; 3012 mdelay(10); 3013 i += 10; 3014 } while (i < timeout); 3015 3016 return -ETIME; 3017} 3018 3019/* These functions load the firmware and micro code for the operation of 3020 * the ipw hardware. It assumes the buffer has all the bits for the 3021 * image and the caller is handling the memory allocation and clean up. 3022 */ 3023 3024static int ipw_stop_master(struct ipw_priv *priv) 3025{ 3026 int rc; 3027 3028 IPW_DEBUG_TRACE(">> \n"); 3029 /* stop master. typical delay - 0 */ 3030 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3031 3032 /* timeout is in msec, polled in 10-msec quanta */ 3033 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3034 IPW_RESET_REG_MASTER_DISABLED, 100); 3035 if (rc < 0) { 3036 IPW_ERROR("wait for stop master failed after 100ms\n"); 3037 return -1; 3038 } 3039 3040 IPW_DEBUG_INFO("stop master %dms\n", rc); 3041 3042 return rc; 3043} 3044 3045static void ipw_arc_release(struct ipw_priv *priv) 3046{ 3047 IPW_DEBUG_TRACE(">> \n"); 3048 mdelay(5); 3049 3050 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3051 3052 /* no one knows timing, for safety add some delay */ 3053 mdelay(5); 3054} 3055 3056struct fw_chunk { 3057 __le32 address; 3058 __le32 length; 3059}; 3060 3061static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) 3062{ 3063 int rc = 0, i, addr; 3064 u8 cr = 0; 3065 __le16 *image; 3066 3067 image = (__le16 *) data; 3068 3069 IPW_DEBUG_TRACE(">> \n"); 3070 3071 rc = ipw_stop_master(priv); 3072 3073 if (rc < 0) 3074 return rc; 3075 3076 for (addr = IPW_SHARED_LOWER_BOUND; 3077 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) { 3078 ipw_write32(priv, addr, 0); 3079 } 3080 3081 /* no ucode (yet) */ 3082 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive)); 3083 /* destroy DMA queues */ 3084 /* reset sequence */ 3085 3086 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON); 3087 ipw_arc_release(priv); 3088 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF); 3089 mdelay(1); 3090 3091 /* reset PHY */ 3092 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN); 3093 mdelay(1); 3094 3095 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0); 3096 mdelay(1); 3097 3098 /* enable ucode store */ 3099 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0); 3100 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS); 3101 mdelay(1); 3102 3103 /* write ucode */ 3104 /** 3105 * @bug 3106 * Do NOT set indirect address register once and then 3107 * store data to indirect data register in the loop. 3108 * It seems very reasonable, but in this case DINO do not 3109 * accept ucode. It is essential to set address each time. 3110 */ 3111 /* load new ipw uCode */ 3112 for (i = 0; i < len / 2; i++) 3113 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE, 3114 le16_to_cpu(image[i])); 3115 3116 /* enable DINO */ 3117 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3118 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM); 3119 3120 /* this is where the igx / win driver deveates from the VAP driver. */ 3121 3122 /* wait for alive response */ 3123 for (i = 0; i < 100; i++) { 3124 /* poll for incoming data */ 3125 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS); 3126 if (cr & DINO_RXFIFO_DATA) 3127 break; 3128 mdelay(1); 3129 } 3130 3131 if (cr & DINO_RXFIFO_DATA) { 3132 /* alive_command_responce size is NOT multiple of 4 */ 3133 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4]; 3134 3135 for (i = 0; i < ARRAY_SIZE(response_buffer); i++) 3136 response_buffer[i] = 3137 cpu_to_le32(ipw_read_reg32(priv, 3138 IPW_BASEBAND_RX_FIFO_READ)); 3139 memcpy(&priv->dino_alive, response_buffer, 3140 sizeof(priv->dino_alive)); 3141 if (priv->dino_alive.alive_command == 1 3142 && priv->dino_alive.ucode_valid == 1) { 3143 rc = 0; 3144 IPW_DEBUG_INFO 3145 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) " 3146 "of %02d/%02d/%02d %02d:%02d\n", 3147 priv->dino_alive.software_revision, 3148 priv->dino_alive.software_revision, 3149 priv->dino_alive.device_identifier, 3150 priv->dino_alive.device_identifier, 3151 priv->dino_alive.time_stamp[0], 3152 priv->dino_alive.time_stamp[1], 3153 priv->dino_alive.time_stamp[2], 3154 priv->dino_alive.time_stamp[3], 3155 priv->dino_alive.time_stamp[4]); 3156 } else { 3157 IPW_DEBUG_INFO("Microcode is not alive\n"); 3158 rc = -EINVAL; 3159 } 3160 } else { 3161 IPW_DEBUG_INFO("No alive response from DINO\n"); 3162 rc = -ETIME; 3163 } 3164 3165 /* disable DINO, otherwise for some reason 3166 firmware have problem getting alive resp. */ 3167 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3168 3169 return rc; 3170} 3171 3172static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) 3173{ 3174 int ret = -1; 3175 int offset = 0; 3176 struct fw_chunk *chunk; 3177 int total_nr = 0; 3178 int i; 3179 struct pci_pool *pool; 3180 void **virts; 3181 dma_addr_t *phys; 3182 3183 IPW_DEBUG_TRACE("<< : \n"); 3184 3185 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL, 3186 GFP_KERNEL); 3187 if (!virts) 3188 return -ENOMEM; 3189 3190 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL, 3191 GFP_KERNEL); 3192 if (!phys) { 3193 kfree(virts); 3194 return -ENOMEM; 3195 } 3196 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0); 3197 if (!pool) { 3198 IPW_ERROR("pci_pool_create failed\n"); 3199 kfree(phys); 3200 kfree(virts); 3201 return -ENOMEM; 3202 } 3203 3204 /* Start the Dma */ 3205 ret = ipw_fw_dma_enable(priv); 3206 3207 /* the DMA is already ready this would be a bug. */ 3208 BUG_ON(priv->sram_desc.last_cb_index > 0); 3209 3210 do { 3211 u32 chunk_len; 3212 u8 *start; 3213 int size; 3214 int nr = 0; 3215 3216 chunk = (struct fw_chunk *)(data + offset); 3217 offset += sizeof(struct fw_chunk); 3218 chunk_len = le32_to_cpu(chunk->length); 3219 start = data + offset; 3220 3221 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH; 3222 for (i = 0; i < nr; i++) { 3223 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL, 3224 &phys[total_nr]); 3225 if (!virts[total_nr]) { 3226 ret = -ENOMEM; 3227 goto out; 3228 } 3229 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH, 3230 CB_MAX_LENGTH); 3231 memcpy(virts[total_nr], start, size); 3232 start += size; 3233 total_nr++; 3234 /* We don't support fw chunk larger than 64*8K */ 3235 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL); 3236 } 3237 3238 /* build DMA packet and queue up for sending */ 3239 /* dma to chunk->address, the chunk->length bytes from data + 3240 * offeset*/ 3241 /* Dma loading */ 3242 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr], 3243 nr, le32_to_cpu(chunk->address), 3244 chunk_len); 3245 if (ret) { 3246 IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); 3247 goto out; 3248 } 3249 3250 offset += chunk_len; 3251 } while (offset < len); 3252 3253 /* Run the DMA and wait for the answer */ 3254 ret = ipw_fw_dma_kick(priv); 3255 if (ret) { 3256 IPW_ERROR("dmaKick Failed\n"); 3257 goto out; 3258 } 3259 3260 ret = ipw_fw_dma_wait(priv); 3261 if (ret) { 3262 IPW_ERROR("dmaWaitSync Failed\n"); 3263 goto out; 3264 } 3265 out: 3266 for (i = 0; i < total_nr; i++) 3267 pci_pool_free(pool, virts[i], phys[i]); 3268 3269 pci_pool_destroy(pool); 3270 kfree(phys); 3271 kfree(virts); 3272 3273 return ret; 3274} 3275 3276/* stop nic */ 3277static int ipw_stop_nic(struct ipw_priv *priv) 3278{ 3279 int rc = 0; 3280 3281 /* stop */ 3282 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3283 3284 rc = ipw_poll_bit(priv, IPW_RESET_REG, 3285 IPW_RESET_REG_MASTER_DISABLED, 500); 3286 if (rc < 0) { 3287 IPW_ERROR("wait for reg master disabled failed after 500ms\n"); 3288 return rc; 3289 } 3290 3291 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3292 3293 return rc; 3294} 3295 3296static void ipw_start_nic(struct ipw_priv *priv) 3297{ 3298 IPW_DEBUG_TRACE(">>\n"); 3299 3300 /* prvHwStartNic release ARC */ 3301 ipw_clear_bit(priv, IPW_RESET_REG, 3302 IPW_RESET_REG_MASTER_DISABLED | 3303 IPW_RESET_REG_STOP_MASTER | 3304 CBD_RESET_REG_PRINCETON_RESET); 3305 3306 /* enable power management */ 3307 ipw_set_bit(priv, IPW_GP_CNTRL_RW, 3308 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); 3309 3310 IPW_DEBUG_TRACE("<<\n"); 3311} 3312 3313static int ipw_init_nic(struct ipw_priv *priv) 3314{ 3315 int rc; 3316 3317 IPW_DEBUG_TRACE(">>\n"); 3318 /* reset */ 3319 /*prvHwInitNic */ 3320 /* set "initialization complete" bit to move adapter to D0 state */ 3321 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); 3322 3323 /* low-level PLL activation */ 3324 ipw_write32(priv, IPW_READ_INT_REGISTER, 3325 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER); 3326 3327 /* wait for clock stabilization */ 3328 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW, 3329 IPW_GP_CNTRL_BIT_CLOCK_READY, 250); 3330 if (rc < 0) 3331 IPW_DEBUG_INFO("FAILED wait for clock stablization\n"); 3332 3333 /* assert SW reset */ 3334 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET); 3335 3336 udelay(10); 3337 3338 /* set "initialization complete" bit to move adapter to D0 state */ 3339 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE); 3340 3341 IPW_DEBUG_TRACE(">>\n"); 3342 return 0; 3343} 3344 3345/* Call this function from process context, it will sleep in request_firmware. 3346 * Probe is an ok place to call this from. 3347 */ 3348static int ipw_reset_nic(struct ipw_priv *priv) 3349{ 3350 int rc = 0; 3351 unsigned long flags; 3352 3353 IPW_DEBUG_TRACE(">>\n"); 3354 3355 rc = ipw_init_nic(priv); 3356 3357 spin_lock_irqsave(&priv->lock, flags); 3358 /* Clear the 'host command active' bit... */ 3359 priv->status &= ~STATUS_HCMD_ACTIVE; 3360 wake_up_interruptible(&priv->wait_command_queue); 3361 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); 3362 wake_up_interruptible(&priv->wait_state); 3363 spin_unlock_irqrestore(&priv->lock, flags); 3364 3365 IPW_DEBUG_TRACE("<<\n"); 3366 return rc; 3367} 3368 3369 3370struct ipw_fw { 3371 __le32 ver; 3372 __le32 boot_size; 3373 __le32 ucode_size; 3374 __le32 fw_size; 3375 u8 data[0]; 3376}; 3377 3378static int ipw_get_fw(struct ipw_priv *priv, 3379 const struct firmware **raw, const char *name) 3380{ 3381 struct ipw_fw *fw; 3382 int rc; 3383 3384 /* ask firmware_class module to get the boot firmware off disk */ 3385 rc = request_firmware(raw, name, &priv->pci_dev->dev); 3386 if (rc < 0) { 3387 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc); 3388 return rc; 3389 } 3390 3391 if ((*raw)->size < sizeof(*fw)) { 3392 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size); 3393 return -EINVAL; 3394 } 3395 3396 fw = (void *)(*raw)->data; 3397 3398 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) + 3399 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) { 3400 IPW_ERROR("%s is too small or corrupt (%zd)\n", 3401 name, (*raw)->size); 3402 return -EINVAL; 3403 } 3404 3405 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n", 3406 name, 3407 le32_to_cpu(fw->ver) >> 16, 3408 le32_to_cpu(fw->ver) & 0xff, 3409 (*raw)->size - sizeof(*fw)); 3410 return 0; 3411} 3412 3413#define IPW_RX_BUF_SIZE (3000) 3414 3415static void ipw_rx_queue_reset(struct ipw_priv *priv, 3416 struct ipw_rx_queue *rxq) 3417{ 3418 unsigned long flags; 3419 int i; 3420 3421 spin_lock_irqsave(&rxq->lock, flags); 3422 3423 INIT_LIST_HEAD(&rxq->rx_free); 3424 INIT_LIST_HEAD(&rxq->rx_used); 3425 3426 /* Fill the rx_used queue with _all_ of the Rx buffers */ 3427 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 3428 /* In the reset function, these buffers may have been allocated 3429 * to an SKB, so we need to unmap and free potential storage */ 3430 if (rxq->pool[i].skb != NULL) { 3431 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 3432 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 3433 dev_kfree_skb(rxq->pool[i].skb); 3434 rxq->pool[i].skb = NULL; 3435 } 3436 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 3437 } 3438 3439 /* Set us so that we have processed and used all buffers, but have 3440 * not restocked the Rx queue with fresh buffers */ 3441 rxq->read = rxq->write = 0; 3442 rxq->free_count = 0; 3443 spin_unlock_irqrestore(&rxq->lock, flags); 3444} 3445 3446#ifdef CONFIG_PM 3447static int fw_loaded = 0; 3448static const struct firmware *raw = NULL; 3449 3450static void free_firmware(void) 3451{ 3452 if (fw_loaded) { 3453 release_firmware(raw); 3454 raw = NULL; 3455 fw_loaded = 0; 3456 } 3457} 3458#else 3459#define free_firmware() do {} while (0) 3460#endif 3461 3462static int ipw_load(struct ipw_priv *priv) 3463{ 3464#ifndef CONFIG_PM 3465 const struct firmware *raw = NULL; 3466#endif 3467 struct ipw_fw *fw; 3468 u8 *boot_img, *ucode_img, *fw_img; 3469 u8 *name = NULL; 3470 int rc = 0, retries = 3; 3471 3472 switch (priv->ieee->iw_mode) { 3473 case IW_MODE_ADHOC: 3474 name = "ipw2200-ibss.fw"; 3475 break; 3476#ifdef CONFIG_IPW2200_MONITOR 3477 case IW_MODE_MONITOR: 3478 name = "ipw2200-sniffer.fw"; 3479 break; 3480#endif 3481 case IW_MODE_INFRA: 3482 name = "ipw2200-bss.fw"; 3483 break; 3484 } 3485 3486 if (!name) { 3487 rc = -EINVAL; 3488 goto error; 3489 } 3490 3491#ifdef CONFIG_PM 3492 if (!fw_loaded) { 3493#endif 3494 rc = ipw_get_fw(priv, &raw, name); 3495 if (rc < 0) 3496 goto error; 3497#ifdef CONFIG_PM 3498 } 3499#endif 3500 3501 fw = (void *)raw->data; 3502 boot_img = &fw->data[0]; 3503 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)]; 3504 fw_img = &fw->data[le32_to_cpu(fw->boot_size) + 3505 le32_to_cpu(fw->ucode_size)]; 3506 3507 if (rc < 0) 3508 goto error; 3509 3510 if (!priv->rxq) 3511 priv->rxq = ipw_rx_queue_alloc(priv); 3512 else 3513 ipw_rx_queue_reset(priv, priv->rxq); 3514 if (!priv->rxq) { 3515 IPW_ERROR("Unable to initialize Rx queue\n"); 3516 goto error; 3517 } 3518 3519 retry: 3520 /* Ensure interrupts are disabled */ 3521 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 3522 priv->status &= ~STATUS_INT_ENABLED; 3523 3524 /* ack pending interrupts */ 3525 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3526 3527 ipw_stop_nic(priv); 3528 3529 rc = ipw_reset_nic(priv); 3530 if (rc < 0) { 3531 IPW_ERROR("Unable to reset NIC\n"); 3532 goto error; 3533 } 3534 3535 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND, 3536 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND); 3537 3538 /* DMA the initial boot firmware into the device */ 3539 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size)); 3540 if (rc < 0) { 3541 IPW_ERROR("Unable to load boot firmware: %d\n", rc); 3542 goto error; 3543 } 3544 3545 /* kick start the device */ 3546 ipw_start_nic(priv); 3547 3548 /* wait for the device to finish its initial startup sequence */ 3549 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3550 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3551 if (rc < 0) { 3552 IPW_ERROR("device failed to boot initial fw image\n"); 3553 goto error; 3554 } 3555 IPW_DEBUG_INFO("initial device response after %dms\n", rc); 3556 3557 /* ack fw init done interrupt */ 3558 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3559 3560 /* DMA the ucode into the device */ 3561 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size)); 3562 if (rc < 0) { 3563 IPW_ERROR("Unable to load ucode: %d\n", rc); 3564 goto error; 3565 } 3566 3567 /* stop nic */ 3568 ipw_stop_nic(priv); 3569 3570 /* DMA bss firmware into the device */ 3571 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size)); 3572 if (rc < 0) { 3573 IPW_ERROR("Unable to load firmware: %d\n", rc); 3574 goto error; 3575 } 3576#ifdef CONFIG_PM 3577 fw_loaded = 1; 3578#endif 3579 3580 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 3581 3582 rc = ipw_queue_reset(priv); 3583 if (rc < 0) { 3584 IPW_ERROR("Unable to initialize queues\n"); 3585 goto error; 3586 } 3587 3588 /* Ensure interrupts are disabled */ 3589 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL); 3590 /* ack pending interrupts */ 3591 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3592 3593 /* kick start the device */ 3594 ipw_start_nic(priv); 3595 3596 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) { 3597 if (retries > 0) { 3598 IPW_WARNING("Parity error. Retrying init.\n"); 3599 retries--; 3600 goto retry; 3601 } 3602 3603 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n"); 3604 rc = -EIO; 3605 goto error; 3606 } 3607 3608 /* wait for the device */ 3609 rc = ipw_poll_bit(priv, IPW_INTA_RW, 3610 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500); 3611 if (rc < 0) { 3612 IPW_ERROR("device failed to start within 500ms\n"); 3613 goto error; 3614 } 3615 IPW_DEBUG_INFO("device response after %dms\n", rc); 3616 3617 /* ack fw init done interrupt */ 3618 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE); 3619 3620 /* read eeprom data and initialize the eeprom region of sram */ 3621 priv->eeprom_delay = 1; 3622 ipw_eeprom_init_sram(priv); 3623 3624 /* enable interrupts */ 3625 ipw_enable_interrupts(priv); 3626 3627 /* Ensure our queue has valid packets */ 3628 ipw_rx_queue_replenish(priv); 3629 3630 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read); 3631 3632 /* ack pending interrupts */ 3633 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL); 3634 3635#ifndef CONFIG_PM 3636 release_firmware(raw); 3637#endif 3638 return 0; 3639 3640 error: 3641 if (priv->rxq) { 3642 ipw_rx_queue_free(priv, priv->rxq); 3643 priv->rxq = NULL; 3644 } 3645 ipw_tx_queue_free(priv); 3646 if (raw) 3647 release_firmware(raw); 3648#ifdef CONFIG_PM 3649 fw_loaded = 0; 3650 raw = NULL; 3651#endif 3652 3653 return rc; 3654} 3655 3656/** 3657 * DMA services 3658 * 3659 * Theory of operation 3660 * 3661 * A queue is a circular buffers with 'Read' and 'Write' pointers. 3662 * 2 empty entries always kept in the buffer to protect from overflow. 3663 * 3664 * For Tx queue, there are low mark and high mark limits. If, after queuing 3665 * the packet for Tx, free space become < low mark, Tx queue stopped. When 3666 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 3667 * Tx queue resumed. 3668 * 3669 * The IPW operates with six queues, one receive queue in the device's 3670 * sram, one transmit queue for sending commands to the device firmware, 3671 * and four transmit queues for data. 3672 * 3673 * The four transmit queues allow for performing quality of service (qos) 3674 * transmissions as per the 802.11 protocol. Currently Linux does not 3675 * provide a mechanism to the user for utilizing prioritized queues, so 3676 * we only utilize the first data transmit queue (queue1). 3677 */ 3678 3679/** 3680 * Driver allocates buffers of this size for Rx 3681 */ 3682 3683/** 3684 * ipw_rx_queue_space - Return number of free slots available in queue. 3685 */ 3686static int ipw_rx_queue_space(const struct ipw_rx_queue *q) 3687{ 3688 int s = q->read - q->write; 3689 if (s <= 0) 3690 s += RX_QUEUE_SIZE; 3691 /* keep some buffer to not confuse full and empty queue */ 3692 s -= 2; 3693 if (s < 0) 3694 s = 0; 3695 return s; 3696} 3697 3698static inline int ipw_tx_queue_space(const struct clx2_queue *q) 3699{ 3700 int s = q->last_used - q->first_empty; 3701 if (s <= 0) 3702 s += q->n_bd; 3703 s -= 2; /* keep some reserve to not confuse empty and full situations */ 3704 if (s < 0) 3705 s = 0; 3706 return s; 3707} 3708 3709static inline int ipw_queue_inc_wrap(int index, int n_bd) 3710{ 3711 return (++index == n_bd) ? 0 : index; 3712} 3713 3714/** 3715 * Initialize common DMA queue structure 3716 * 3717 * @param q queue to init 3718 * @param count Number of BD's to allocate. Should be power of 2 3719 * @param read_register Address for 'read' register 3720 * (not offset within BAR, full address) 3721 * @param write_register Address for 'write' register 3722 * (not offset within BAR, full address) 3723 * @param base_register Address for 'base' register 3724 * (not offset within BAR, full address) 3725 * @param size Address for 'size' register 3726 * (not offset within BAR, full address) 3727 */ 3728static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q, 3729 int count, u32 read, u32 write, u32 base, u32 size) 3730{ 3731 q->n_bd = count; 3732 3733 q->low_mark = q->n_bd / 4; 3734 if (q->low_mark < 4) 3735 q->low_mark = 4; 3736 3737 q->high_mark = q->n_bd / 8; 3738 if (q->high_mark < 2) 3739 q->high_mark = 2; 3740 3741 q->first_empty = q->last_used = 0; 3742 q->reg_r = read; 3743 q->reg_w = write; 3744 3745 ipw_write32(priv, base, q->dma_addr); 3746 ipw_write32(priv, size, count); 3747 ipw_write32(priv, read, 0); 3748 ipw_write32(priv, write, 0); 3749 3750 _ipw_read32(priv, 0x90); 3751} 3752 3753static int ipw_queue_tx_init(struct ipw_priv *priv, 3754 struct clx2_tx_queue *q, 3755 int count, u32 read, u32 write, u32 base, u32 size) 3756{ 3757 struct pci_dev *dev = priv->pci_dev; 3758 3759 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL); 3760 if (!q->txb) { 3761 IPW_ERROR("vmalloc for auxilary BD structures failed\n"); 3762 return -ENOMEM; 3763 } 3764 3765 q->bd = 3766 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); 3767 if (!q->bd) { 3768 IPW_ERROR("pci_alloc_consistent(%zd) failed\n", 3769 sizeof(q->bd[0]) * count); 3770 kfree(q->txb); 3771 q->txb = NULL; 3772 return -ENOMEM; 3773 } 3774 3775 ipw_queue_init(priv, &q->q, count, read, write, base, size); 3776 return 0; 3777} 3778 3779/** 3780 * Free one TFD, those at index [txq->q.last_used]. 3781 * Do NOT advance any indexes 3782 * 3783 * @param dev 3784 * @param txq 3785 */ 3786static void ipw_queue_tx_free_tfd(struct ipw_priv *priv, 3787 struct clx2_tx_queue *txq) 3788{ 3789 struct tfd_frame *bd = &txq->bd[txq->q.last_used]; 3790 struct pci_dev *dev = priv->pci_dev; 3791 int i; 3792 3793 /* classify bd */ 3794 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE) 3795 /* nothing to cleanup after for host commands */ 3796 return; 3797 3798 /* sanity check */ 3799 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) { 3800 IPW_ERROR("Too many chunks: %i\n", 3801 le32_to_cpu(bd->u.data.num_chunks)); 3802 /** @todo issue fatal error, it is quite serious situation */ 3803 return; 3804 } 3805 3806 /* unmap chunks if any */ 3807 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) { 3808 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]), 3809 le16_to_cpu(bd->u.data.chunk_len[i]), 3810 PCI_DMA_TODEVICE); 3811 if (txq->txb[txq->q.last_used]) { 3812 libipw_txb_free(txq->txb[txq->q.last_used]); 3813 txq->txb[txq->q.last_used] = NULL; 3814 } 3815 } 3816} 3817 3818/** 3819 * Deallocate DMA queue. 3820 * 3821 * Empty queue by removing and destroying all BD's. 3822 * Free all buffers. 3823 * 3824 * @param dev 3825 * @param q 3826 */ 3827static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq) 3828{ 3829 struct clx2_queue *q = &txq->q; 3830 struct pci_dev *dev = priv->pci_dev; 3831 3832 if (q->n_bd == 0) 3833 return; 3834 3835 /* first, empty all BD's */ 3836 for (; q->first_empty != q->last_used; 3837 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { 3838 ipw_queue_tx_free_tfd(priv, txq); 3839 } 3840 3841 /* free buffers belonging to queue itself */ 3842 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, 3843 q->dma_addr); 3844 kfree(txq->txb); 3845 3846 /* 0 fill whole structure */ 3847 memset(txq, 0, sizeof(*txq)); 3848} 3849 3850/** 3851 * Destroy all DMA queues and structures 3852 * 3853 * @param priv 3854 */ 3855static void ipw_tx_queue_free(struct ipw_priv *priv) 3856{ 3857 /* Tx CMD queue */ 3858 ipw_queue_tx_free(priv, &priv->txq_cmd); 3859 3860 /* Tx queues */ 3861 ipw_queue_tx_free(priv, &priv->txq[0]); 3862 ipw_queue_tx_free(priv, &priv->txq[1]); 3863 ipw_queue_tx_free(priv, &priv->txq[2]); 3864 ipw_queue_tx_free(priv, &priv->txq[3]); 3865} 3866 3867static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid) 3868{ 3869 /* First 3 bytes are manufacturer */ 3870 bssid[0] = priv->mac_addr[0]; 3871 bssid[1] = priv->mac_addr[1]; 3872 bssid[2] = priv->mac_addr[2]; 3873 3874 /* Last bytes are random */ 3875 get_random_bytes(&bssid[3], ETH_ALEN - 3); 3876 3877 bssid[0] &= 0xfe; /* clear multicast bit */ 3878 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ 3879} 3880 3881static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) 3882{ 3883 struct ipw_station_entry entry; 3884 int i; 3885 3886 for (i = 0; i < priv->num_stations; i++) { 3887 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) { 3888 /* Another node is active in network */ 3889 priv->missed_adhoc_beacons = 0; 3890 if (!(priv->config & CFG_STATIC_CHANNEL)) 3891 /* when other nodes drop out, we drop out */ 3892 priv->config &= ~CFG_ADHOC_PERSIST; 3893 3894 return i; 3895 } 3896 } 3897 3898 if (i == MAX_STATIONS) 3899 return IPW_INVALID_STATION; 3900 3901 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid); 3902 3903 entry.reserved = 0; 3904 entry.support_mode = 0; 3905 memcpy(entry.mac_addr, bssid, ETH_ALEN); 3906 memcpy(priv->stations[i], bssid, ETH_ALEN); 3907 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry), 3908 &entry, sizeof(entry)); 3909 priv->num_stations++; 3910 3911 return i; 3912} 3913 3914static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) 3915{ 3916 int i; 3917 3918 for (i = 0; i < priv->num_stations; i++) 3919 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) 3920 return i; 3921 3922 return IPW_INVALID_STATION; 3923} 3924 3925static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) 3926{ 3927 int err; 3928 3929 if (priv->status & STATUS_ASSOCIATING) { 3930 IPW_DEBUG_ASSOC("Disassociating while associating.\n"); 3931 queue_work(priv->workqueue, &priv->disassociate); 3932 return; 3933 } 3934 3935 if (!(priv->status & STATUS_ASSOCIATED)) { 3936 IPW_DEBUG_ASSOC("Disassociating while not associated.\n"); 3937 return; 3938 } 3939 3940 IPW_DEBUG_ASSOC("Disassocation attempt from %pM " 3941 "on channel %d.\n", 3942 priv->assoc_request.bssid, 3943 priv->assoc_request.channel); 3944 3945 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); 3946 priv->status |= STATUS_DISASSOCIATING; 3947 3948 if (quiet) 3949 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET; 3950 else 3951 priv->assoc_request.assoc_type = HC_DISASSOCIATE; 3952 3953 err = ipw_send_associate(priv, &priv->assoc_request); 3954 if (err) { 3955 IPW_DEBUG_HC("Attempt to send [dis]associate command " 3956 "failed.\n"); 3957 return; 3958 } 3959 3960} 3961 3962static int ipw_disassociate(void *data) 3963{ 3964 struct ipw_priv *priv = data; 3965 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) 3966 return 0; 3967 ipw_send_disassociate(data, 0); 3968 netif_carrier_off(priv->net_dev); 3969 return 1; 3970} 3971 3972static void ipw_bg_disassociate(struct work_struct *work) 3973{ 3974 struct ipw_priv *priv = 3975 container_of(work, struct ipw_priv, disassociate); 3976 mutex_lock(&priv->mutex); 3977 ipw_disassociate(priv); 3978 mutex_unlock(&priv->mutex); 3979} 3980 3981static void ipw_system_config(struct work_struct *work) 3982{ 3983 struct ipw_priv *priv = 3984 container_of(work, struct ipw_priv, system_config); 3985 3986#ifdef CONFIG_IPW2200_PROMISCUOUS 3987 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 3988 priv->sys_config.accept_all_data_frames = 1; 3989 priv->sys_config.accept_non_directed_frames = 1; 3990 priv->sys_config.accept_all_mgmt_bcpr = 1; 3991 priv->sys_config.accept_all_mgmt_frames = 1; 3992 } 3993#endif 3994 3995 ipw_send_system_config(priv); 3996} 3997 3998struct ipw_status_code { 3999 u16 status; 4000 const char *reason; 4001}; 4002 4003static const struct ipw_status_code ipw_status_codes[] = { 4004 {0x00, "Successful"}, 4005 {0x01, "Unspecified failure"}, 4006 {0x0A, "Cannot support all requested capabilities in the " 4007 "Capability information field"}, 4008 {0x0B, "Reassociation denied due to inability to confirm that " 4009 "association exists"}, 4010 {0x0C, "Association denied due to reason outside the scope of this " 4011 "standard"}, 4012 {0x0D, 4013 "Responding station does not support the specified authentication " 4014 "algorithm"}, 4015 {0x0E, 4016 "Received an Authentication frame with authentication sequence " 4017 "transaction sequence number out of expected sequence"}, 4018 {0x0F, "Authentication rejected because of challenge failure"}, 4019 {0x10, "Authentication rejected due to timeout waiting for next " 4020 "frame in sequence"}, 4021 {0x11, "Association denied because AP is unable to handle additional " 4022 "associated stations"}, 4023 {0x12, 4024 "Association denied due to requesting station not supporting all " 4025 "of the datarates in the BSSBasicServiceSet Parameter"}, 4026 {0x13, 4027 "Association denied due to requesting station not supporting " 4028 "short preamble operation"}, 4029 {0x14, 4030 "Association denied due to requesting station not supporting " 4031 "PBCC encoding"}, 4032 {0x15, 4033 "Association denied due to requesting station not supporting " 4034 "channel agility"}, 4035 {0x19, 4036 "Association denied due to requesting station not supporting " 4037 "short slot operation"}, 4038 {0x1A, 4039 "Association denied due to requesting station not supporting " 4040 "DSSS-OFDM operation"}, 4041 {0x28, "Invalid Information Element"}, 4042 {0x29, "Group Cipher is not valid"}, 4043 {0x2A, "Pairwise Cipher is not valid"}, 4044 {0x2B, "AKMP is not valid"}, 4045 {0x2C, "Unsupported RSN IE version"}, 4046 {0x2D, "Invalid RSN IE Capabilities"}, 4047 {0x2E, "Cipher suite is rejected per security policy"}, 4048}; 4049 4050static const char *ipw_get_status_code(u16 status) 4051{ 4052 int i; 4053 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++) 4054 if (ipw_status_codes[i].status == (status & 0xff)) 4055 return ipw_status_codes[i].reason; 4056 return "Unknown status value."; 4057} 4058 4059static void inline average_init(struct average *avg) 4060{ 4061 memset(avg, 0, sizeof(*avg)); 4062} 4063 4064#define DEPTH_RSSI 8 4065#define DEPTH_NOISE 16 4066static s16 exponential_average(s16 prev_avg, s16 val, u8 depth) 4067{ 4068 return ((depth-1)*prev_avg + val)/depth; 4069} 4070 4071static void average_add(struct average *avg, s16 val) 4072{ 4073 avg->sum -= avg->entries[avg->pos]; 4074 avg->sum += val; 4075 avg->entries[avg->pos++] = val; 4076 if (unlikely(avg->pos == AVG_ENTRIES)) { 4077 avg->init = 1; 4078 avg->pos = 0; 4079 } 4080} 4081 4082static s16 average_value(struct average *avg) 4083{ 4084 if (!unlikely(avg->init)) { 4085 if (avg->pos) 4086 return avg->sum / avg->pos; 4087 return 0; 4088 } 4089 4090 return avg->sum / AVG_ENTRIES; 4091} 4092 4093static void ipw_reset_stats(struct ipw_priv *priv) 4094{ 4095 u32 len = sizeof(u32); 4096 4097 priv->quality = 0; 4098 4099 average_init(&priv->average_missed_beacons); 4100 priv->exp_avg_rssi = -60; 4101 priv->exp_avg_noise = -85 + 0x100; 4102 4103 priv->last_rate = 0; 4104 priv->last_missed_beacons = 0; 4105 priv->last_rx_packets = 0; 4106 priv->last_tx_packets = 0; 4107 priv->last_tx_failures = 0; 4108 4109 /* Firmware managed, reset only when NIC is restarted, so we have to 4110 * normalize on the current value */ 4111 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, 4112 &priv->last_rx_err, &len); 4113 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, 4114 &priv->last_tx_failures, &len); 4115 4116 /* Driver managed, reset with each association */ 4117 priv->missed_adhoc_beacons = 0; 4118 priv->missed_beacons = 0; 4119 priv->tx_packets = 0; 4120 priv->rx_packets = 0; 4121 4122} 4123 4124static u32 ipw_get_max_rate(struct ipw_priv *priv) 4125{ 4126 u32 i = 0x80000000; 4127 u32 mask = priv->rates_mask; 4128 /* If currently associated in B mode, restrict the maximum 4129 * rate match to B rates */ 4130 if (priv->assoc_request.ieee_mode == IPW_B_MODE) 4131 mask &= LIBIPW_CCK_RATES_MASK; 4132 4133 /* TODO: Verify that the rate is supported by the current rates 4134 * list. */ 4135 4136 while (i && !(mask & i)) 4137 i >>= 1; 4138 switch (i) { 4139 case LIBIPW_CCK_RATE_1MB_MASK: 4140 return 1000000; 4141 case LIBIPW_CCK_RATE_2MB_MASK: 4142 return 2000000; 4143 case LIBIPW_CCK_RATE_5MB_MASK: 4144 return 5500000; 4145 case LIBIPW_OFDM_RATE_6MB_MASK: 4146 return 6000000; 4147 case LIBIPW_OFDM_RATE_9MB_MASK: 4148 return 9000000; 4149 case LIBIPW_CCK_RATE_11MB_MASK: 4150 return 11000000; 4151 case LIBIPW_OFDM_RATE_12MB_MASK: 4152 return 12000000; 4153 case LIBIPW_OFDM_RATE_18MB_MASK: 4154 return 18000000; 4155 case LIBIPW_OFDM_RATE_24MB_MASK: 4156 return 24000000; 4157 case LIBIPW_OFDM_RATE_36MB_MASK: 4158 return 36000000; 4159 case LIBIPW_OFDM_RATE_48MB_MASK: 4160 return 48000000; 4161 case LIBIPW_OFDM_RATE_54MB_MASK: 4162 return 54000000; 4163 } 4164 4165 if (priv->ieee->mode == IEEE_B) 4166 return 11000000; 4167 else 4168 return 54000000; 4169} 4170 4171static u32 ipw_get_current_rate(struct ipw_priv *priv) 4172{ 4173 u32 rate, len = sizeof(rate); 4174 int err; 4175 4176 if (!(priv->status & STATUS_ASSOCIATED)) 4177 return 0; 4178 4179 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) { 4180 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate, 4181 &len); 4182 if (err) { 4183 IPW_DEBUG_INFO("failed querying ordinals.\n"); 4184 return 0; 4185 } 4186 } else 4187 return ipw_get_max_rate(priv); 4188 4189 switch (rate) { 4190 case IPW_TX_RATE_1MB: 4191 return 1000000; 4192 case IPW_TX_RATE_2MB: 4193 return 2000000; 4194 case IPW_TX_RATE_5MB: 4195 return 5500000; 4196 case IPW_TX_RATE_6MB: 4197 return 6000000; 4198 case IPW_TX_RATE_9MB: 4199 return 9000000; 4200 case IPW_TX_RATE_11MB: 4201 return 11000000; 4202 case IPW_TX_RATE_12MB: 4203 return 12000000; 4204 case IPW_TX_RATE_18MB: 4205 return 18000000; 4206 case IPW_TX_RATE_24MB: 4207 return 24000000; 4208 case IPW_TX_RATE_36MB: 4209 return 36000000; 4210 case IPW_TX_RATE_48MB: 4211 return 48000000; 4212 case IPW_TX_RATE_54MB: 4213 return 54000000; 4214 } 4215 4216 return 0; 4217} 4218 4219#define IPW_STATS_INTERVAL (2 * HZ) 4220static void ipw_gather_stats(struct ipw_priv *priv) 4221{ 4222 u32 rx_err, rx_err_delta, rx_packets_delta; 4223 u32 tx_failures, tx_failures_delta, tx_packets_delta; 4224 u32 missed_beacons_percent, missed_beacons_delta; 4225 u32 quality = 0; 4226 u32 len = sizeof(u32); 4227 s16 rssi; 4228 u32 beacon_quality, signal_quality, tx_quality, rx_quality, 4229 rate_quality; 4230 u32 max_rate; 4231 4232 if (!(priv->status & STATUS_ASSOCIATED)) { 4233 priv->quality = 0; 4234 return; 4235 } 4236 4237 /* Update the statistics */ 4238 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS, 4239 &priv->missed_beacons, &len); 4240 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons; 4241 priv->last_missed_beacons = priv->missed_beacons; 4242 if (priv->assoc_request.beacon_interval) { 4243 missed_beacons_percent = missed_beacons_delta * 4244 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) / 4245 (IPW_STATS_INTERVAL * 10); 4246 } else { 4247 missed_beacons_percent = 0; 4248 } 4249 average_add(&priv->average_missed_beacons, missed_beacons_percent); 4250 4251 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len); 4252 rx_err_delta = rx_err - priv->last_rx_err; 4253 priv->last_rx_err = rx_err; 4254 4255 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len); 4256 tx_failures_delta = tx_failures - priv->last_tx_failures; 4257 priv->last_tx_failures = tx_failures; 4258 4259 rx_packets_delta = priv->rx_packets - priv->last_rx_packets; 4260 priv->last_rx_packets = priv->rx_packets; 4261 4262 tx_packets_delta = priv->tx_packets - priv->last_tx_packets; 4263 priv->last_tx_packets = priv->tx_packets; 4264 4265 /* Calculate quality based on the following: 4266 * 4267 * Missed beacon: 100% = 0, 0% = 70% missed 4268 * Rate: 60% = 1Mbs, 100% = Max 4269 * Rx and Tx errors represent a straight % of total Rx/Tx 4270 * RSSI: 100% = > -50, 0% = < -80 4271 * Rx errors: 100% = 0, 0% = 50% missed 4272 * 4273 * The lowest computed quality is used. 4274 * 4275 */ 4276#define BEACON_THRESHOLD 5 4277 beacon_quality = 100 - missed_beacons_percent; 4278 if (beacon_quality < BEACON_THRESHOLD) 4279 beacon_quality = 0; 4280 else 4281 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 / 4282 (100 - BEACON_THRESHOLD); 4283 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n", 4284 beacon_quality, missed_beacons_percent); 4285 4286 priv->last_rate = ipw_get_current_rate(priv); 4287 max_rate = ipw_get_max_rate(priv); 4288 rate_quality = priv->last_rate * 40 / max_rate + 60; 4289 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n", 4290 rate_quality, priv->last_rate / 1000000); 4291 4292 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta) 4293 rx_quality = 100 - (rx_err_delta * 100) / 4294 (rx_packets_delta + rx_err_delta); 4295 else 4296 rx_quality = 100; 4297 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n", 4298 rx_quality, rx_err_delta, rx_packets_delta); 4299 4300 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta) 4301 tx_quality = 100 - (tx_failures_delta * 100) / 4302 (tx_packets_delta + tx_failures_delta); 4303 else 4304 tx_quality = 100; 4305 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", 4306 tx_quality, tx_failures_delta, tx_packets_delta); 4307 4308 rssi = priv->exp_avg_rssi; 4309 signal_quality = 4310 (100 * 4311 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * 4312 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) - 4313 (priv->ieee->perfect_rssi - rssi) * 4314 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) + 4315 62 * (priv->ieee->perfect_rssi - rssi))) / 4316 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) * 4317 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi)); 4318 if (signal_quality > 100) 4319 signal_quality = 100; 4320 else if (signal_quality < 1) 4321 signal_quality = 0; 4322 4323 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n", 4324 signal_quality, rssi); 4325 4326 quality = min(rx_quality, signal_quality); 4327 quality = min(tx_quality, quality); 4328 quality = min(rate_quality, quality); 4329 quality = min(beacon_quality, quality); 4330 if (quality == beacon_quality) 4331 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n", 4332 quality); 4333 if (quality == rate_quality) 4334 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n", 4335 quality); 4336 if (quality == tx_quality) 4337 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n", 4338 quality); 4339 if (quality == rx_quality) 4340 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n", 4341 quality); 4342 if (quality == signal_quality) 4343 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n", 4344 quality); 4345 4346 priv->quality = quality; 4347 4348 queue_delayed_work(priv->workqueue, &priv->gather_stats, 4349 IPW_STATS_INTERVAL); 4350} 4351 4352static void ipw_bg_gather_stats(struct work_struct *work) 4353{ 4354 struct ipw_priv *priv = 4355 container_of(work, struct ipw_priv, gather_stats.work); 4356 mutex_lock(&priv->mutex); 4357 ipw_gather_stats(priv); 4358 mutex_unlock(&priv->mutex); 4359} 4360 4361/* Missed beacon behavior: 4362 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam. 4363 * roaming_threshold -> disassociate_threshold, scan and roam for better signal. 4364 * Above disassociate threshold, give up and stop scanning. 4365 * Roaming is disabled if disassociate_threshold <= roaming_threshold */ 4366static void ipw_handle_missed_beacon(struct ipw_priv *priv, 4367 int missed_count) 4368{ 4369 priv->notif_missed_beacons = missed_count; 4370 4371 if (missed_count > priv->disassociate_threshold && 4372 priv->status & STATUS_ASSOCIATED) { 4373 /* If associated and we've hit the missed 4374 * beacon threshold, disassociate, turn 4375 * off roaming, and abort any active scans */ 4376 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4377 IPW_DL_STATE | IPW_DL_ASSOC, 4378 "Missed beacon: %d - disassociate\n", missed_count); 4379 priv->status &= ~STATUS_ROAMING; 4380 if (priv->status & STATUS_SCANNING) { 4381 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 4382 IPW_DL_STATE, 4383 "Aborting scan with missed beacon.\n"); 4384 queue_work(priv->workqueue, &priv->abort_scan); 4385 } 4386 4387 queue_work(priv->workqueue, &priv->disassociate); 4388 return; 4389 } 4390 4391 if (priv->status & STATUS_ROAMING) { 4392 /* If we are currently roaming, then just 4393 * print a debug statement... */ 4394 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4395 "Missed beacon: %d - roam in progress\n", 4396 missed_count); 4397 return; 4398 } 4399 4400 if (roaming && 4401 (missed_count > priv->roaming_threshold && 4402 missed_count <= priv->disassociate_threshold)) { 4403 /* If we are not already roaming, set the ROAM 4404 * bit in the status and kick off a scan. 4405 * This can happen several times before we reach 4406 * disassociate_threshold. */ 4407 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4408 "Missed beacon: %d - initiate " 4409 "roaming\n", missed_count); 4410 if (!(priv->status & STATUS_ROAMING)) { 4411 priv->status |= STATUS_ROAMING; 4412 if (!(priv->status & STATUS_SCANNING)) 4413 queue_delayed_work(priv->workqueue, 4414 &priv->request_scan, 0); 4415 } 4416 return; 4417 } 4418 4419 if (priv->status & STATUS_SCANNING && 4420 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) { 4421 /* Stop scan to keep fw from getting 4422 * stuck (only if we aren't roaming -- 4423 * otherwise we'll never scan more than 2 or 3 4424 * channels..) */ 4425 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, 4426 "Aborting scan with missed beacon.\n"); 4427 queue_work(priv->workqueue, &priv->abort_scan); 4428 } 4429 4430 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); 4431} 4432 4433static void ipw_scan_event(struct work_struct *work) 4434{ 4435 union iwreq_data wrqu; 4436 4437 struct ipw_priv *priv = 4438 container_of(work, struct ipw_priv, scan_event.work); 4439 4440 wrqu.data.length = 0; 4441 wrqu.data.flags = 0; 4442 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); 4443} 4444 4445static void handle_scan_event(struct ipw_priv *priv) 4446{ 4447 /* Only userspace-requested scan completion events go out immediately */ 4448 if (!priv->user_requested_scan) { 4449 if (!delayed_work_pending(&priv->scan_event)) 4450 queue_delayed_work(priv->workqueue, &priv->scan_event, 4451 round_jiffies_relative(msecs_to_jiffies(4000))); 4452 } else { 4453 union iwreq_data wrqu; 4454 4455 priv->user_requested_scan = 0; 4456 cancel_delayed_work(&priv->scan_event); 4457 4458 wrqu.data.length = 0; 4459 wrqu.data.flags = 0; 4460 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); 4461 } 4462} 4463 4464/** 4465 * Handle host notification packet. 4466 * Called from interrupt routine 4467 */ 4468static void ipw_rx_notification(struct ipw_priv *priv, 4469 struct ipw_rx_notification *notif) 4470{ 4471 DECLARE_SSID_BUF(ssid); 4472 u16 size = le16_to_cpu(notif->size); 4473 4474 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size); 4475 4476 switch (notif->subtype) { 4477 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{ 4478 struct notif_association *assoc = ¬if->u.assoc; 4479 4480 switch (assoc->state) { 4481 case CMAS_ASSOCIATED:{ 4482 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4483 IPW_DL_ASSOC, 4484 "associated: '%s' %pM \n", 4485 print_ssid(ssid, priv->essid, 4486 priv->essid_len), 4487 priv->bssid); 4488 4489 switch (priv->ieee->iw_mode) { 4490 case IW_MODE_INFRA: 4491 memcpy(priv->ieee->bssid, 4492 priv->bssid, ETH_ALEN); 4493 break; 4494 4495 case IW_MODE_ADHOC: 4496 memcpy(priv->ieee->bssid, 4497 priv->bssid, ETH_ALEN); 4498 4499 /* clear out the station table */ 4500 priv->num_stations = 0; 4501 4502 IPW_DEBUG_ASSOC 4503 ("queueing adhoc check\n"); 4504 queue_delayed_work(priv-> 4505 workqueue, 4506 &priv-> 4507 adhoc_check, 4508 le16_to_cpu(priv-> 4509 assoc_request. 4510 beacon_interval)); 4511 break; 4512 } 4513 4514 priv->status &= ~STATUS_ASSOCIATING; 4515 priv->status |= STATUS_ASSOCIATED; 4516 queue_work(priv->workqueue, 4517 &priv->system_config); 4518 4519#ifdef CONFIG_IPW2200_QOS 4520#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ 4521 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control)) 4522 if ((priv->status & STATUS_AUTH) && 4523 (IPW_GET_PACKET_STYPE(¬if->u.raw) 4524 == IEEE80211_STYPE_ASSOC_RESP)) { 4525 if ((sizeof 4526 (struct 4527 libipw_assoc_response) 4528 <= size) 4529 && (size <= 2314)) { 4530 struct 4531 libipw_rx_stats 4532 stats = { 4533 .len = size - 1, 4534 }; 4535 4536 IPW_DEBUG_QOS 4537 ("QoS Associate " 4538 "size %d\n", size); 4539 libipw_rx_mgt(priv-> 4540 ieee, 4541 (struct 4542 libipw_hdr_4addr 4543 *) 4544 ¬if->u.raw, &stats); 4545 } 4546 } 4547#endif 4548 4549 schedule_work(&priv->link_up); 4550 4551 break; 4552 } 4553 4554 case CMAS_AUTHENTICATED:{ 4555 if (priv-> 4556 status & (STATUS_ASSOCIATED | 4557 STATUS_AUTH)) { 4558 struct notif_authenticate *auth 4559 = ¬if->u.auth; 4560 IPW_DEBUG(IPW_DL_NOTIF | 4561 IPW_DL_STATE | 4562 IPW_DL_ASSOC, 4563 "deauthenticated: '%s' " 4564 "%pM" 4565 ": (0x%04X) - %s \n", 4566 print_ssid(ssid, 4567 priv-> 4568 essid, 4569 priv-> 4570 essid_len), 4571 priv->bssid, 4572 le16_to_cpu(auth->status), 4573 ipw_get_status_code 4574 (le16_to_cpu 4575 (auth->status))); 4576 4577 priv->status &= 4578 ~(STATUS_ASSOCIATING | 4579 STATUS_AUTH | 4580 STATUS_ASSOCIATED); 4581 4582 schedule_work(&priv->link_down); 4583 break; 4584 } 4585 4586 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4587 IPW_DL_ASSOC, 4588 "authenticated: '%s' %pM\n", 4589 print_ssid(ssid, priv->essid, 4590 priv->essid_len), 4591 priv->bssid); 4592 break; 4593 } 4594 4595 case CMAS_INIT:{ 4596 if (priv->status & STATUS_AUTH) { 4597 struct 4598 libipw_assoc_response 4599 *resp; 4600 resp = 4601 (struct 4602 libipw_assoc_response 4603 *)¬if->u.raw; 4604 IPW_DEBUG(IPW_DL_NOTIF | 4605 IPW_DL_STATE | 4606 IPW_DL_ASSOC, 4607 "association failed (0x%04X): %s\n", 4608 le16_to_cpu(resp->status), 4609 ipw_get_status_code 4610 (le16_to_cpu 4611 (resp->status))); 4612 } 4613 4614 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4615 IPW_DL_ASSOC, 4616 "disassociated: '%s' %pM \n", 4617 print_ssid(ssid, priv->essid, 4618 priv->essid_len), 4619 priv->bssid); 4620 4621 priv->status &= 4622 ~(STATUS_DISASSOCIATING | 4623 STATUS_ASSOCIATING | 4624 STATUS_ASSOCIATED | STATUS_AUTH); 4625 if (priv->assoc_network 4626 && (priv->assoc_network-> 4627 capability & 4628 WLAN_CAPABILITY_IBSS)) 4629 ipw_remove_current_network 4630 (priv); 4631 4632 schedule_work(&priv->link_down); 4633 4634 break; 4635 } 4636 4637 case CMAS_RX_ASSOC_RESP: 4638 break; 4639 4640 default: 4641 IPW_ERROR("assoc: unknown (%d)\n", 4642 assoc->state); 4643 break; 4644 } 4645 4646 break; 4647 } 4648 4649 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{ 4650 struct notif_authenticate *auth = ¬if->u.auth; 4651 switch (auth->state) { 4652 case CMAS_AUTHENTICATED: 4653 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4654 "authenticated: '%s' %pM \n", 4655 print_ssid(ssid, priv->essid, 4656 priv->essid_len), 4657 priv->bssid); 4658 priv->status |= STATUS_AUTH; 4659 break; 4660 4661 case CMAS_INIT: 4662 if (priv->status & STATUS_AUTH) { 4663 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4664 IPW_DL_ASSOC, 4665 "authentication failed (0x%04X): %s\n", 4666 le16_to_cpu(auth->status), 4667 ipw_get_status_code(le16_to_cpu 4668 (auth-> 4669 status))); 4670 } 4671 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4672 IPW_DL_ASSOC, 4673 "deauthenticated: '%s' %pM\n", 4674 print_ssid(ssid, priv->essid, 4675 priv->essid_len), 4676 priv->bssid); 4677 4678 priv->status &= ~(STATUS_ASSOCIATING | 4679 STATUS_AUTH | 4680 STATUS_ASSOCIATED); 4681 4682 schedule_work(&priv->link_down); 4683 break; 4684 4685 case CMAS_TX_AUTH_SEQ_1: 4686 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4687 IPW_DL_ASSOC, "AUTH_SEQ_1\n"); 4688 break; 4689 case CMAS_RX_AUTH_SEQ_2: 4690 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4691 IPW_DL_ASSOC, "AUTH_SEQ_2\n"); 4692 break; 4693 case CMAS_AUTH_SEQ_1_PASS: 4694 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4695 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n"); 4696 break; 4697 case CMAS_AUTH_SEQ_1_FAIL: 4698 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4699 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n"); 4700 break; 4701 case CMAS_TX_AUTH_SEQ_3: 4702 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4703 IPW_DL_ASSOC, "AUTH_SEQ_3\n"); 4704 break; 4705 case CMAS_RX_AUTH_SEQ_4: 4706 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4707 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n"); 4708 break; 4709 case CMAS_AUTH_SEQ_2_PASS: 4710 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4711 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n"); 4712 break; 4713 case CMAS_AUTH_SEQ_2_FAIL: 4714 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4715 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n"); 4716 break; 4717 case CMAS_TX_ASSOC: 4718 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4719 IPW_DL_ASSOC, "TX_ASSOC\n"); 4720 break; 4721 case CMAS_RX_ASSOC_RESP: 4722 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4723 IPW_DL_ASSOC, "RX_ASSOC_RESP\n"); 4724 4725 break; 4726 case CMAS_ASSOCIATED: 4727 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4728 IPW_DL_ASSOC, "ASSOCIATED\n"); 4729 break; 4730 default: 4731 IPW_DEBUG_NOTIF("auth: failure - %d\n", 4732 auth->state); 4733 break; 4734 } 4735 break; 4736 } 4737 4738 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{ 4739 struct notif_channel_result *x = 4740 ¬if->u.channel_result; 4741 4742 if (size == sizeof(*x)) { 4743 IPW_DEBUG_SCAN("Scan result for channel %d\n", 4744 x->channel_num); 4745 } else { 4746 IPW_DEBUG_SCAN("Scan result of wrong size %d " 4747 "(should be %zd)\n", 4748 size, sizeof(*x)); 4749 } 4750 break; 4751 } 4752 4753 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{ 4754 struct notif_scan_complete *x = ¬if->u.scan_complete; 4755 if (size == sizeof(*x)) { 4756 IPW_DEBUG_SCAN 4757 ("Scan completed: type %d, %d channels, " 4758 "%d status\n", x->scan_type, 4759 x->num_channels, x->status); 4760 } else { 4761 IPW_ERROR("Scan completed of wrong size %d " 4762 "(should be %zd)\n", 4763 size, sizeof(*x)); 4764 } 4765 4766 priv->status &= 4767 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING); 4768 4769 wake_up_interruptible(&priv->wait_state); 4770 cancel_delayed_work(&priv->scan_check); 4771 4772 if (priv->status & STATUS_EXIT_PENDING) 4773 break; 4774 4775 priv->ieee->scans++; 4776 4777#ifdef CONFIG_IPW2200_MONITOR 4778 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4779 priv->status |= STATUS_SCAN_FORCED; 4780 queue_delayed_work(priv->workqueue, 4781 &priv->request_scan, 0); 4782 break; 4783 } 4784 priv->status &= ~STATUS_SCAN_FORCED; 4785#endif /* CONFIG_IPW2200_MONITOR */ 4786 4787 /* Do queued direct scans first */ 4788 if (priv->status & STATUS_DIRECT_SCAN_PENDING) { 4789 queue_delayed_work(priv->workqueue, 4790 &priv->request_direct_scan, 0); 4791 } 4792 4793 if (!(priv->status & (STATUS_ASSOCIATED | 4794 STATUS_ASSOCIATING | 4795 STATUS_ROAMING | 4796 STATUS_DISASSOCIATING))) 4797 queue_work(priv->workqueue, &priv->associate); 4798 else if (priv->status & STATUS_ROAMING) { 4799 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4800 /* If a scan completed and we are in roam mode, then 4801 * the scan that completed was the one requested as a 4802 * result of entering roam... so, schedule the 4803 * roam work */ 4804 queue_work(priv->workqueue, 4805 &priv->roam); 4806 else 4807 /* Don't schedule if we aborted the scan */ 4808 priv->status &= ~STATUS_ROAMING; 4809 } else if (priv->status & STATUS_SCAN_PENDING) 4810 queue_delayed_work(priv->workqueue, 4811 &priv->request_scan, 0); 4812 else if (priv->config & CFG_BACKGROUND_SCAN 4813 && priv->status & STATUS_ASSOCIATED) 4814 queue_delayed_work(priv->workqueue, 4815 &priv->request_scan, 4816 round_jiffies_relative(HZ)); 4817 4818 /* Send an empty event to user space. 4819 * We don't send the received data on the event because 4820 * it would require us to do complex transcoding, and 4821 * we want to minimise the work done in the irq handler 4822 * Use a request to extract the data. 4823 * Also, we generate this even for any scan, regardless 4824 * on how the scan was initiated. User space can just 4825 * sync on periodic scan to get fresh data... 4826 * Jean II */ 4827 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) 4828 handle_scan_event(priv); 4829 break; 4830 } 4831 4832 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{ 4833 struct notif_frag_length *x = ¬if->u.frag_len; 4834 4835 if (size == sizeof(*x)) 4836 IPW_ERROR("Frag length: %d\n", 4837 le16_to_cpu(x->frag_length)); 4838 else 4839 IPW_ERROR("Frag length of wrong size %d " 4840 "(should be %zd)\n", 4841 size, sizeof(*x)); 4842 break; 4843 } 4844 4845 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{ 4846 struct notif_link_deterioration *x = 4847 ¬if->u.link_deterioration; 4848 4849 if (size == sizeof(*x)) { 4850 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4851 "link deterioration: type %d, cnt %d\n", 4852 x->silence_notification_type, 4853 x->silence_count); 4854 memcpy(&priv->last_link_deterioration, x, 4855 sizeof(*x)); 4856 } else { 4857 IPW_ERROR("Link Deterioration of wrong size %d " 4858 "(should be %zd)\n", 4859 size, sizeof(*x)); 4860 } 4861 break; 4862 } 4863 4864 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{ 4865 IPW_ERROR("Dino config\n"); 4866 if (priv->hcmd 4867 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG) 4868 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n"); 4869 4870 break; 4871 } 4872 4873 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{ 4874 struct notif_beacon_state *x = ¬if->u.beacon_state; 4875 if (size != sizeof(*x)) { 4876 IPW_ERROR 4877 ("Beacon state of wrong size %d (should " 4878 "be %zd)\n", size, sizeof(*x)); 4879 break; 4880 } 4881 4882 if (le32_to_cpu(x->state) == 4883 HOST_NOTIFICATION_STATUS_BEACON_MISSING) 4884 ipw_handle_missed_beacon(priv, 4885 le32_to_cpu(x-> 4886 number)); 4887 4888 break; 4889 } 4890 4891 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{ 4892 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key; 4893 if (size == sizeof(*x)) { 4894 IPW_ERROR("TGi Tx Key: state 0x%02x sec type " 4895 "0x%02x station %d\n", 4896 x->key_state, x->security_type, 4897 x->station_index); 4898 break; 4899 } 4900 4901 IPW_ERROR 4902 ("TGi Tx Key of wrong size %d (should be %zd)\n", 4903 size, sizeof(*x)); 4904 break; 4905 } 4906 4907 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{ 4908 struct notif_calibration *x = ¬if->u.calibration; 4909 4910 if (size == sizeof(*x)) { 4911 memcpy(&priv->calib, x, sizeof(*x)); 4912 IPW_DEBUG_INFO("TODO: Calibration\n"); 4913 break; 4914 } 4915 4916 IPW_ERROR 4917 ("Calibration of wrong size %d (should be %zd)\n", 4918 size, sizeof(*x)); 4919 break; 4920 } 4921 4922 case HOST_NOTIFICATION_NOISE_STATS:{ 4923 if (size == sizeof(u32)) { 4924 priv->exp_avg_noise = 4925 exponential_average(priv->exp_avg_noise, 4926 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff), 4927 DEPTH_NOISE); 4928 break; 4929 } 4930 4931 IPW_ERROR 4932 ("Noise stat is wrong size %d (should be %zd)\n", 4933 size, sizeof(u32)); 4934 break; 4935 } 4936 4937 default: 4938 IPW_DEBUG_NOTIF("Unknown notification: " 4939 "subtype=%d,flags=0x%2x,size=%d\n", 4940 notif->subtype, notif->flags, size); 4941 } 4942} 4943 4944/** 4945 * Destroys all DMA structures and initialise them again 4946 * 4947 * @param priv 4948 * @return error code 4949 */ 4950static int ipw_queue_reset(struct ipw_priv *priv) 4951{ 4952 int rc = 0; 4953 /** @todo customize queue sizes */ 4954 int nTx = 64, nTxCmd = 8; 4955 ipw_tx_queue_free(priv); 4956 /* Tx CMD queue */ 4957 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd, 4958 IPW_TX_CMD_QUEUE_READ_INDEX, 4959 IPW_TX_CMD_QUEUE_WRITE_INDEX, 4960 IPW_TX_CMD_QUEUE_BD_BASE, 4961 IPW_TX_CMD_QUEUE_BD_SIZE); 4962 if (rc) { 4963 IPW_ERROR("Tx Cmd queue init failed\n"); 4964 goto error; 4965 } 4966 /* Tx queue(s) */ 4967 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx, 4968 IPW_TX_QUEUE_0_READ_INDEX, 4969 IPW_TX_QUEUE_0_WRITE_INDEX, 4970 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE); 4971 if (rc) { 4972 IPW_ERROR("Tx 0 queue init failed\n"); 4973 goto error; 4974 } 4975 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx, 4976 IPW_TX_QUEUE_1_READ_INDEX, 4977 IPW_TX_QUEUE_1_WRITE_INDEX, 4978 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE); 4979 if (rc) { 4980 IPW_ERROR("Tx 1 queue init failed\n"); 4981 goto error; 4982 } 4983 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx, 4984 IPW_TX_QUEUE_2_READ_INDEX, 4985 IPW_TX_QUEUE_2_WRITE_INDEX, 4986 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE); 4987 if (rc) { 4988 IPW_ERROR("Tx 2 queue init failed\n"); 4989 goto error; 4990 } 4991 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx, 4992 IPW_TX_QUEUE_3_READ_INDEX, 4993 IPW_TX_QUEUE_3_WRITE_INDEX, 4994 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE); 4995 if (rc) { 4996 IPW_ERROR("Tx 3 queue init failed\n"); 4997 goto error; 4998 } 4999 /* statistics */ 5000 priv->rx_bufs_min = 0; 5001 priv->rx_pend_max = 0; 5002 return rc; 5003 5004 error: 5005 ipw_tx_queue_free(priv); 5006 return rc; 5007} 5008 5009/** 5010 * Reclaim Tx queue entries no more used by NIC. 5011 * 5012 * When FW advances 'R' index, all entries between old and 5013 * new 'R' index need to be reclaimed. As result, some free space 5014 * forms. If there is enough free space (> low mark), wake Tx queue. 5015 * 5016 * @note Need to protect against garbage in 'R' index 5017 * @param priv 5018 * @param txq 5019 * @param qindex 5020 * @return Number of used entries remains in the queue 5021 */ 5022static int ipw_queue_tx_reclaim(struct ipw_priv *priv, 5023 struct clx2_tx_queue *txq, int qindex) 5024{ 5025 u32 hw_tail; 5026 int used; 5027 struct clx2_queue *q = &txq->q; 5028 5029 hw_tail = ipw_read32(priv, q->reg_r); 5030 if (hw_tail >= q->n_bd) { 5031 IPW_ERROR 5032 ("Read index for DMA queue (%d) is out of range [0-%d)\n", 5033 hw_tail, q->n_bd); 5034 goto done; 5035 } 5036 for (; q->last_used != hw_tail; 5037 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { 5038 ipw_queue_tx_free_tfd(priv, txq); 5039 priv->tx_packets++; 5040 } 5041 done: 5042 if ((ipw_tx_queue_space(q) > q->low_mark) && 5043 (qindex >= 0)) 5044 netif_wake_queue(priv->net_dev); 5045 used = q->first_empty - q->last_used; 5046 if (used < 0) 5047 used += q->n_bd; 5048 5049 return used; 5050} 5051 5052static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf, 5053 int len, int sync) 5054{ 5055 struct clx2_tx_queue *txq = &priv->txq_cmd; 5056 struct clx2_queue *q = &txq->q; 5057 struct tfd_frame *tfd; 5058 5059 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) { 5060 IPW_ERROR("No space for Tx\n"); 5061 return -EBUSY; 5062 } 5063 5064 tfd = &txq->bd[q->first_empty]; 5065 txq->txb[q->first_empty] = NULL; 5066 5067 memset(tfd, 0, sizeof(*tfd)); 5068 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE; 5069 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; 5070 priv->hcmd_seq++; 5071 tfd->u.cmd.index = hcmd; 5072 tfd->u.cmd.length = len; 5073 memcpy(tfd->u.cmd.payload, buf, len); 5074 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 5075 ipw_write32(priv, q->reg_w, q->first_empty); 5076 _ipw_read32(priv, 0x90); 5077 5078 return 0; 5079} 5080 5081/* 5082 * Rx theory of operation 5083 * 5084 * The host allocates 32 DMA target addresses and passes the host address 5085 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is 5086 * 0 to 31 5087 * 5088 * Rx Queue Indexes 5089 * The host/firmware share two index registers for managing the Rx buffers. 5090 * 5091 * The READ index maps to the first position that the firmware may be writing 5092 * to -- the driver can read up to (but not including) this position and get 5093 * good data. 5094 * The READ index is managed by the firmware once the card is enabled. 5095 * 5096 * The WRITE index maps to the last position the driver has read from -- the 5097 * position preceding WRITE is the last slot the firmware can place a packet. 5098 * 5099 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 5100 * WRITE = READ. 5101 * 5102 * During initialization the host sets up the READ queue position to the first 5103 * INDEX position, and WRITE to the last (READ - 1 wrapped) 5104 * 5105 * When the firmware places a packet in a buffer it will advance the READ index 5106 * and fire the RX interrupt. The driver can then query the READ index and 5107 * process as many packets as possible, moving the WRITE index forward as it 5108 * resets the Rx queue buffers with new memory. 5109 * 5110 * The management in the driver is as follows: 5111 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When 5112 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 5113 * to replensish the ipw->rxq->rx_free. 5114 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the 5115 * ipw->rxq is replenished and the READ INDEX is updated (updating the 5116 * 'processed' and 'read' driver indexes as well) 5117 * + A received packet is processed and handed to the kernel network stack, 5118 * detached from the ipw->rxq. The driver 'processed' index is updated. 5119 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free 5120 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ 5121 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there 5122 * were enough free buffers and RX_STALLED is set it is cleared. 5123 * 5124 * 5125 * Driver sequence: 5126 * 5127 * ipw_rx_queue_alloc() Allocates rx_free 5128 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls 5129 * ipw_rx_queue_restock 5130 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx 5131 * queue, updates firmware pointers, and updates 5132 * the WRITE index. If insufficient rx_free buffers 5133 * are available, schedules ipw_rx_queue_replenish 5134 * 5135 * -- enable interrupts -- 5136 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the 5137 * READ INDEX, detaching the SKB from the pool. 5138 * Moves the packet buffer from queue to rx_used. 5139 * Calls ipw_rx_queue_restock to refill any empty 5140 * slots. 5141 * ... 5142 * 5143 */ 5144 5145/* 5146 * If there are slots in the RX queue that need to be restocked, 5147 * and we have free pre-allocated buffers, fill the ranks as much 5148 * as we can pulling from rx_free. 5149 * 5150 * This moves the 'write' index forward to catch up with 'processed', and 5151 * also updates the memory address in the firmware to reference the new 5152 * target buffer. 5153 */ 5154static void ipw_rx_queue_restock(struct ipw_priv *priv) 5155{ 5156 struct ipw_rx_queue *rxq = priv->rxq; 5157 struct list_head *element; 5158 struct ipw_rx_mem_buffer *rxb; 5159 unsigned long flags; 5160 int write; 5161 5162 spin_lock_irqsave(&rxq->lock, flags); 5163 write = rxq->write; 5164 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 5165 element = rxq->rx_free.next; 5166 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5167 list_del(element); 5168 5169 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE, 5170 rxb->dma_addr); 5171 rxq->queue[rxq->write] = rxb; 5172 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE; 5173 rxq->free_count--; 5174 } 5175 spin_unlock_irqrestore(&rxq->lock, flags); 5176 5177 /* If the pre-allocated buffer pool is dropping low, schedule to 5178 * refill it */ 5179 if (rxq->free_count <= RX_LOW_WATERMARK) 5180 queue_work(priv->workqueue, &priv->rx_replenish); 5181 5182 /* If we've added more space for the firmware to place data, tell it */ 5183 if (write != rxq->write) 5184 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write); 5185} 5186 5187/* 5188 * Move all used packet from rx_used to rx_free, allocating a new SKB for each. 5189 * Also restock the Rx queue via ipw_rx_queue_restock. 5190 * 5191 * This is called as a scheduled work item (except for during intialization) 5192 */ 5193static void ipw_rx_queue_replenish(void *data) 5194{ 5195 struct ipw_priv *priv = data; 5196 struct ipw_rx_queue *rxq = priv->rxq; 5197 struct list_head *element; 5198 struct ipw_rx_mem_buffer *rxb; 5199 unsigned long flags; 5200 5201 spin_lock_irqsave(&rxq->lock, flags); 5202 while (!list_empty(&rxq->rx_used)) { 5203 element = rxq->rx_used.next; 5204 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5205 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC); 5206 if (!rxb->skb) { 5207 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n", 5208 priv->net_dev->name); 5209 /* We don't reschedule replenish work here -- we will 5210 * call the restock method and if it still needs 5211 * more buffers it will schedule replenish */ 5212 break; 5213 } 5214 list_del(element); 5215 5216 rxb->dma_addr = 5217 pci_map_single(priv->pci_dev, rxb->skb->data, 5218 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5219 5220 list_add_tail(&rxb->list, &rxq->rx_free); 5221 rxq->free_count++; 5222 } 5223 spin_unlock_irqrestore(&rxq->lock, flags); 5224 5225 ipw_rx_queue_restock(priv); 5226} 5227 5228static void ipw_bg_rx_queue_replenish(struct work_struct *work) 5229{ 5230 struct ipw_priv *priv = 5231 container_of(work, struct ipw_priv, rx_replenish); 5232 mutex_lock(&priv->mutex); 5233 ipw_rx_queue_replenish(priv); 5234 mutex_unlock(&priv->mutex); 5235} 5236 5237/* Assumes that the skb field of the buffers in 'pool' is kept accurate. 5238 * If an SKB has been detached, the POOL needs to have its SKB set to NULL 5239 * This free routine walks the list of POOL entries and if SKB is set to 5240 * non NULL it is unmapped and freed 5241 */ 5242static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq) 5243{ 5244 int i; 5245 5246 if (!rxq) 5247 return; 5248 5249 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 5250 if (rxq->pool[i].skb != NULL) { 5251 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 5252 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5253 dev_kfree_skb(rxq->pool[i].skb); 5254 } 5255 } 5256 5257 kfree(rxq); 5258} 5259 5260static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv) 5261{ 5262 struct ipw_rx_queue *rxq; 5263 int i; 5264 5265 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); 5266 if (unlikely(!rxq)) { 5267 IPW_ERROR("memory allocation failed\n"); 5268 return NULL; 5269 } 5270 spin_lock_init(&rxq->lock); 5271 INIT_LIST_HEAD(&rxq->rx_free); 5272 INIT_LIST_HEAD(&rxq->rx_used); 5273 5274 /* Fill the rx_used queue with _all_ of the Rx buffers */ 5275 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 5276 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 5277 5278 /* Set us so that we have processed and used all buffers, but have 5279 * not restocked the Rx queue with fresh buffers */ 5280 rxq->read = rxq->write = 0; 5281 rxq->free_count = 0; 5282 5283 return rxq; 5284} 5285 5286static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate) 5287{ 5288 rate &= ~LIBIPW_BASIC_RATE_MASK; 5289 if (ieee_mode == IEEE_A) { 5290 switch (rate) { 5291 case LIBIPW_OFDM_RATE_6MB: 5292 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 5293 1 : 0; 5294 case LIBIPW_OFDM_RATE_9MB: 5295 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 5296 1 : 0; 5297 case LIBIPW_OFDM_RATE_12MB: 5298 return priv-> 5299 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; 5300 case LIBIPW_OFDM_RATE_18MB: 5301 return priv-> 5302 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; 5303 case LIBIPW_OFDM_RATE_24MB: 5304 return priv-> 5305 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; 5306 case LIBIPW_OFDM_RATE_36MB: 5307 return priv-> 5308 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; 5309 case LIBIPW_OFDM_RATE_48MB: 5310 return priv-> 5311 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; 5312 case LIBIPW_OFDM_RATE_54MB: 5313 return priv-> 5314 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; 5315 default: 5316 return 0; 5317 } 5318 } 5319 5320 /* B and G mixed */ 5321 switch (rate) { 5322 case LIBIPW_CCK_RATE_1MB: 5323 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0; 5324 case LIBIPW_CCK_RATE_2MB: 5325 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0; 5326 case LIBIPW_CCK_RATE_5MB: 5327 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0; 5328 case LIBIPW_CCK_RATE_11MB: 5329 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0; 5330 } 5331 5332 /* If we are limited to B modulations, bail at this point */ 5333 if (ieee_mode == IEEE_B) 5334 return 0; 5335 5336 /* G */ 5337 switch (rate) { 5338 case LIBIPW_OFDM_RATE_6MB: 5339 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0; 5340 case LIBIPW_OFDM_RATE_9MB: 5341 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0; 5342 case LIBIPW_OFDM_RATE_12MB: 5343 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0; 5344 case LIBIPW_OFDM_RATE_18MB: 5345 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0; 5346 case LIBIPW_OFDM_RATE_24MB: 5347 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0; 5348 case LIBIPW_OFDM_RATE_36MB: 5349 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0; 5350 case LIBIPW_OFDM_RATE_48MB: 5351 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0; 5352 case LIBIPW_OFDM_RATE_54MB: 5353 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0; 5354 } 5355 5356 return 0; 5357} 5358 5359static int ipw_compatible_rates(struct ipw_priv *priv, 5360 const struct libipw_network *network, 5361 struct ipw_supported_rates *rates) 5362{ 5363 int num_rates, i; 5364 5365 memset(rates, 0, sizeof(*rates)); 5366 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES); 5367 rates->num_rates = 0; 5368 for (i = 0; i < num_rates; i++) { 5369 if (!ipw_is_rate_in_mask(priv, network->mode, 5370 network->rates[i])) { 5371 5372 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) { 5373 IPW_DEBUG_SCAN("Adding masked mandatory " 5374 "rate %02X\n", 5375 network->rates[i]); 5376 rates->supported_rates[rates->num_rates++] = 5377 network->rates[i]; 5378 continue; 5379 } 5380 5381 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 5382 network->rates[i], priv->rates_mask); 5383 continue; 5384 } 5385 5386 rates->supported_rates[rates->num_rates++] = network->rates[i]; 5387 } 5388 5389 num_rates = min(network->rates_ex_len, 5390 (u8) (IPW_MAX_RATES - num_rates)); 5391 for (i = 0; i < num_rates; i++) { 5392 if (!ipw_is_rate_in_mask(priv, network->mode, 5393 network->rates_ex[i])) { 5394 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) { 5395 IPW_DEBUG_SCAN("Adding masked mandatory " 5396 "rate %02X\n", 5397 network->rates_ex[i]); 5398 rates->supported_rates[rates->num_rates++] = 5399 network->rates[i]; 5400 continue; 5401 } 5402 5403 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 5404 network->rates_ex[i], priv->rates_mask); 5405 continue; 5406 } 5407 5408 rates->supported_rates[rates->num_rates++] = 5409 network->rates_ex[i]; 5410 } 5411 5412 return 1; 5413} 5414 5415static void ipw_copy_rates(struct ipw_supported_rates *dest, 5416 const struct ipw_supported_rates *src) 5417{ 5418 u8 i; 5419 for (i = 0; i < src->num_rates; i++) 5420 dest->supported_rates[i] = src->supported_rates[i]; 5421 dest->num_rates = src->num_rates; 5422} 5423 5424/* TODO: Look at sniffed packets in the air to determine if the basic rate 5425 * mask should ever be used -- right now all callers to add the scan rates are 5426 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */ 5427static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates, 5428 u8 modulation, u32 rate_mask) 5429{ 5430 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? 5431 LIBIPW_BASIC_RATE_MASK : 0; 5432 5433 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK) 5434 rates->supported_rates[rates->num_rates++] = 5435 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB; 5436 5437 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK) 5438 rates->supported_rates[rates->num_rates++] = 5439 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB; 5440 5441 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK) 5442 rates->supported_rates[rates->num_rates++] = basic_mask | 5443 LIBIPW_CCK_RATE_5MB; 5444 5445 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK) 5446 rates->supported_rates[rates->num_rates++] = basic_mask | 5447 LIBIPW_CCK_RATE_11MB; 5448} 5449 5450static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates, 5451 u8 modulation, u32 rate_mask) 5452{ 5453 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ? 5454 LIBIPW_BASIC_RATE_MASK : 0; 5455 5456 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK) 5457 rates->supported_rates[rates->num_rates++] = basic_mask | 5458 LIBIPW_OFDM_RATE_6MB; 5459 5460 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK) 5461 rates->supported_rates[rates->num_rates++] = 5462 LIBIPW_OFDM_RATE_9MB; 5463 5464 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK) 5465 rates->supported_rates[rates->num_rates++] = basic_mask | 5466 LIBIPW_OFDM_RATE_12MB; 5467 5468 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK) 5469 rates->supported_rates[rates->num_rates++] = 5470 LIBIPW_OFDM_RATE_18MB; 5471 5472 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK) 5473 rates->supported_rates[rates->num_rates++] = basic_mask | 5474 LIBIPW_OFDM_RATE_24MB; 5475 5476 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK) 5477 rates->supported_rates[rates->num_rates++] = 5478 LIBIPW_OFDM_RATE_36MB; 5479 5480 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK) 5481 rates->supported_rates[rates->num_rates++] = 5482 LIBIPW_OFDM_RATE_48MB; 5483 5484 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK) 5485 rates->supported_rates[rates->num_rates++] = 5486 LIBIPW_OFDM_RATE_54MB; 5487} 5488 5489struct ipw_network_match { 5490 struct libipw_network *network; 5491 struct ipw_supported_rates rates; 5492}; 5493 5494static int ipw_find_adhoc_network(struct ipw_priv *priv, 5495 struct ipw_network_match *match, 5496 struct libipw_network *network, 5497 int roaming) 5498{ 5499 struct ipw_supported_rates rates; 5500 DECLARE_SSID_BUF(ssid); 5501 5502 /* Verify that this network's capability is compatible with the 5503 * current mode (AdHoc or Infrastructure) */ 5504 if ((priv->ieee->iw_mode == IW_MODE_ADHOC && 5505 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5506 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to " 5507 "capability mismatch.\n", 5508 print_ssid(ssid, network->ssid, 5509 network->ssid_len), 5510 network->bssid); 5511 return 0; 5512 } 5513 5514 if (unlikely(roaming)) { 5515 /* If we are roaming, then ensure check if this is a valid 5516 * network to try and roam to */ 5517 if ((network->ssid_len != match->network->ssid_len) || 5518 memcmp(network->ssid, match->network->ssid, 5519 network->ssid_len)) { 5520 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5521 "because of non-network ESSID.\n", 5522 print_ssid(ssid, network->ssid, 5523 network->ssid_len), 5524 network->bssid); 5525 return 0; 5526 } 5527 } else { 5528 /* If an ESSID has been configured then compare the broadcast 5529 * ESSID to ours */ 5530 if ((priv->config & CFG_STATIC_ESSID) && 5531 ((network->ssid_len != priv->essid_len) || 5532 memcmp(network->ssid, priv->essid, 5533 min(network->ssid_len, priv->essid_len)))) { 5534 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5535 5536 strncpy(escaped, 5537 print_ssid(ssid, network->ssid, 5538 network->ssid_len), 5539 sizeof(escaped)); 5540 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5541 "because of ESSID mismatch: '%s'.\n", 5542 escaped, network->bssid, 5543 print_ssid(ssid, priv->essid, 5544 priv->essid_len)); 5545 return 0; 5546 } 5547 } 5548 5549 /* If the old network rate is better than this one, don't bother 5550 * testing everything else. */ 5551 5552 if (network->time_stamp[0] < match->network->time_stamp[0]) { 5553 IPW_DEBUG_MERGE("Network '%s excluded because newer than " 5554 "current network.\n", 5555 print_ssid(ssid, match->network->ssid, 5556 match->network->ssid_len)); 5557 return 0; 5558 } else if (network->time_stamp[1] < match->network->time_stamp[1]) { 5559 IPW_DEBUG_MERGE("Network '%s excluded because newer than " 5560 "current network.\n", 5561 print_ssid(ssid, match->network->ssid, 5562 match->network->ssid_len)); 5563 return 0; 5564 } 5565 5566 /* Now go through and see if the requested network is valid... */ 5567 if (priv->ieee->scan_age != 0 && 5568 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5569 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5570 "because of age: %ums.\n", 5571 print_ssid(ssid, network->ssid, 5572 network->ssid_len), 5573 network->bssid, 5574 jiffies_to_msecs(jiffies - 5575 network->last_scanned)); 5576 return 0; 5577 } 5578 5579 if ((priv->config & CFG_STATIC_CHANNEL) && 5580 (network->channel != priv->channel)) { 5581 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5582 "because of channel mismatch: %d != %d.\n", 5583 print_ssid(ssid, network->ssid, 5584 network->ssid_len), 5585 network->bssid, 5586 network->channel, priv->channel); 5587 return 0; 5588 } 5589 5590 /* Verify privacy compatability */ 5591 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5592 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5593 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5594 "because of privacy mismatch: %s != %s.\n", 5595 print_ssid(ssid, network->ssid, 5596 network->ssid_len), 5597 network->bssid, 5598 priv-> 5599 capability & CAP_PRIVACY_ON ? "on" : "off", 5600 network-> 5601 capability & WLAN_CAPABILITY_PRIVACY ? "on" : 5602 "off"); 5603 return 0; 5604 } 5605 5606 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 5607 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5608 "because of the same BSSID match: %pM" 5609 ".\n", print_ssid(ssid, network->ssid, 5610 network->ssid_len), 5611 network->bssid, 5612 priv->bssid); 5613 return 0; 5614 } 5615 5616 /* Filter out any incompatible freq / mode combinations */ 5617 if (!libipw_is_valid_mode(priv->ieee, network->mode)) { 5618 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5619 "because of invalid frequency/mode " 5620 "combination.\n", 5621 print_ssid(ssid, network->ssid, 5622 network->ssid_len), 5623 network->bssid); 5624 return 0; 5625 } 5626 5627 /* Ensure that the rates supported by the driver are compatible with 5628 * this AP, including verification of basic rates (mandatory) */ 5629 if (!ipw_compatible_rates(priv, network, &rates)) { 5630 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5631 "because configured rate mask excludes " 5632 "AP mandatory rate.\n", 5633 print_ssid(ssid, network->ssid, 5634 network->ssid_len), 5635 network->bssid); 5636 return 0; 5637 } 5638 5639 if (rates.num_rates == 0) { 5640 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded " 5641 "because of no compatible rates.\n", 5642 print_ssid(ssid, network->ssid, 5643 network->ssid_len), 5644 network->bssid); 5645 return 0; 5646 } 5647 5648 /* TODO: Perform any further minimal comparititive tests. We do not 5649 * want to put too much policy logic here; intelligent scan selection 5650 * should occur within a generic IEEE 802.11 user space tool. */ 5651 5652 /* Set up 'new' AP to this network */ 5653 ipw_copy_rates(&match->rates, &rates); 5654 match->network = network; 5655 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n", 5656 print_ssid(ssid, network->ssid, network->ssid_len), 5657 network->bssid); 5658 5659 return 1; 5660} 5661 5662static void ipw_merge_adhoc_network(struct work_struct *work) 5663{ 5664 DECLARE_SSID_BUF(ssid); 5665 struct ipw_priv *priv = 5666 container_of(work, struct ipw_priv, merge_networks); 5667 struct libipw_network *network = NULL; 5668 struct ipw_network_match match = { 5669 .network = priv->assoc_network 5670 }; 5671 5672 if ((priv->status & STATUS_ASSOCIATED) && 5673 (priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5674 /* First pass through ROAM process -- look for a better 5675 * network */ 5676 unsigned long flags; 5677 5678 spin_lock_irqsave(&priv->ieee->lock, flags); 5679 list_for_each_entry(network, &priv->ieee->network_list, list) { 5680 if (network != priv->assoc_network) 5681 ipw_find_adhoc_network(priv, &match, network, 5682 1); 5683 } 5684 spin_unlock_irqrestore(&priv->ieee->lock, flags); 5685 5686 if (match.network == priv->assoc_network) { 5687 IPW_DEBUG_MERGE("No better ADHOC in this network to " 5688 "merge to.\n"); 5689 return; 5690 } 5691 5692 mutex_lock(&priv->mutex); 5693 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5694 IPW_DEBUG_MERGE("remove network %s\n", 5695 print_ssid(ssid, priv->essid, 5696 priv->essid_len)); 5697 ipw_remove_current_network(priv); 5698 } 5699 5700 ipw_disassociate(priv); 5701 priv->assoc_network = match.network; 5702 mutex_unlock(&priv->mutex); 5703 return; 5704 } 5705} 5706 5707static int ipw_best_network(struct ipw_priv *priv, 5708 struct ipw_network_match *match, 5709 struct libipw_network *network, int roaming) 5710{ 5711 struct ipw_supported_rates rates; 5712 DECLARE_SSID_BUF(ssid); 5713 5714 /* Verify that this network's capability is compatible with the 5715 * current mode (AdHoc or Infrastructure) */ 5716 if ((priv->ieee->iw_mode == IW_MODE_INFRA && 5717 !(network->capability & WLAN_CAPABILITY_ESS)) || 5718 (priv->ieee->iw_mode == IW_MODE_ADHOC && 5719 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5720 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to " 5721 "capability mismatch.\n", 5722 print_ssid(ssid, network->ssid, 5723 network->ssid_len), 5724 network->bssid); 5725 return 0; 5726 } 5727 5728 if (unlikely(roaming)) { 5729 /* If we are roaming, then ensure check if this is a valid 5730 * network to try and roam to */ 5731 if ((network->ssid_len != match->network->ssid_len) || 5732 memcmp(network->ssid, match->network->ssid, 5733 network->ssid_len)) { 5734 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5735 "because of non-network ESSID.\n", 5736 print_ssid(ssid, network->ssid, 5737 network->ssid_len), 5738 network->bssid); 5739 return 0; 5740 } 5741 } else { 5742 /* If an ESSID has been configured then compare the broadcast 5743 * ESSID to ours */ 5744 if ((priv->config & CFG_STATIC_ESSID) && 5745 ((network->ssid_len != priv->essid_len) || 5746 memcmp(network->ssid, priv->essid, 5747 min(network->ssid_len, priv->essid_len)))) { 5748 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5749 strncpy(escaped, 5750 print_ssid(ssid, network->ssid, 5751 network->ssid_len), 5752 sizeof(escaped)); 5753 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5754 "because of ESSID mismatch: '%s'.\n", 5755 escaped, network->bssid, 5756 print_ssid(ssid, priv->essid, 5757 priv->essid_len)); 5758 return 0; 5759 } 5760 } 5761 5762 /* If the old network rate is better than this one, don't bother 5763 * testing everything else. */ 5764 if (match->network && match->network->stats.rssi > network->stats.rssi) { 5765 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5766 strncpy(escaped, 5767 print_ssid(ssid, network->ssid, network->ssid_len), 5768 sizeof(escaped)); 5769 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because " 5770 "'%s (%pM)' has a stronger signal.\n", 5771 escaped, network->bssid, 5772 print_ssid(ssid, match->network->ssid, 5773 match->network->ssid_len), 5774 match->network->bssid); 5775 return 0; 5776 } 5777 5778 /* If this network has already had an association attempt within the 5779 * last 3 seconds, do not try and associate again... */ 5780 if (network->last_associate && 5781 time_after(network->last_associate + (HZ * 3UL), jiffies)) { 5782 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5783 "because of storming (%ums since last " 5784 "assoc attempt).\n", 5785 print_ssid(ssid, network->ssid, 5786 network->ssid_len), 5787 network->bssid, 5788 jiffies_to_msecs(jiffies - 5789 network->last_associate)); 5790 return 0; 5791 } 5792 5793 /* Now go through and see if the requested network is valid... */ 5794 if (priv->ieee->scan_age != 0 && 5795 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5796 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5797 "because of age: %ums.\n", 5798 print_ssid(ssid, network->ssid, 5799 network->ssid_len), 5800 network->bssid, 5801 jiffies_to_msecs(jiffies - 5802 network->last_scanned)); 5803 return 0; 5804 } 5805 5806 if ((priv->config & CFG_STATIC_CHANNEL) && 5807 (network->channel != priv->channel)) { 5808 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5809 "because of channel mismatch: %d != %d.\n", 5810 print_ssid(ssid, network->ssid, 5811 network->ssid_len), 5812 network->bssid, 5813 network->channel, priv->channel); 5814 return 0; 5815 } 5816 5817 /* Verify privacy compatability */ 5818 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5819 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5820 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5821 "because of privacy mismatch: %s != %s.\n", 5822 print_ssid(ssid, network->ssid, 5823 network->ssid_len), 5824 network->bssid, 5825 priv->capability & CAP_PRIVACY_ON ? "on" : 5826 "off", 5827 network->capability & 5828 WLAN_CAPABILITY_PRIVACY ? "on" : "off"); 5829 return 0; 5830 } 5831 5832 if ((priv->config & CFG_STATIC_BSSID) && 5833 memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 5834 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5835 "because of BSSID mismatch: %pM.\n", 5836 print_ssid(ssid, network->ssid, 5837 network->ssid_len), 5838 network->bssid, priv->bssid); 5839 return 0; 5840 } 5841 5842 /* Filter out any incompatible freq / mode combinations */ 5843 if (!libipw_is_valid_mode(priv->ieee, network->mode)) { 5844 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5845 "because of invalid frequency/mode " 5846 "combination.\n", 5847 print_ssid(ssid, network->ssid, 5848 network->ssid_len), 5849 network->bssid); 5850 return 0; 5851 } 5852 5853 /* Filter out invalid channel in current GEO */ 5854 if (!libipw_is_valid_channel(priv->ieee, network->channel)) { 5855 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5856 "because of invalid channel in current GEO\n", 5857 print_ssid(ssid, network->ssid, 5858 network->ssid_len), 5859 network->bssid); 5860 return 0; 5861 } 5862 5863 /* Ensure that the rates supported by the driver are compatible with 5864 * this AP, including verification of basic rates (mandatory) */ 5865 if (!ipw_compatible_rates(priv, network, &rates)) { 5866 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5867 "because configured rate mask excludes " 5868 "AP mandatory rate.\n", 5869 print_ssid(ssid, network->ssid, 5870 network->ssid_len), 5871 network->bssid); 5872 return 0; 5873 } 5874 5875 if (rates.num_rates == 0) { 5876 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded " 5877 "because of no compatible rates.\n", 5878 print_ssid(ssid, network->ssid, 5879 network->ssid_len), 5880 network->bssid); 5881 return 0; 5882 } 5883 5884 /* TODO: Perform any further minimal comparititive tests. We do not 5885 * want to put too much policy logic here; intelligent scan selection 5886 * should occur within a generic IEEE 802.11 user space tool. */ 5887 5888 /* Set up 'new' AP to this network */ 5889 ipw_copy_rates(&match->rates, &rates); 5890 match->network = network; 5891 5892 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n", 5893 print_ssid(ssid, network->ssid, network->ssid_len), 5894 network->bssid); 5895 5896 return 1; 5897} 5898 5899static void ipw_adhoc_create(struct ipw_priv *priv, 5900 struct libipw_network *network) 5901{ 5902 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 5903 int i; 5904 5905 /* 5906 * For the purposes of scanning, we can set our wireless mode 5907 * to trigger scans across combinations of bands, but when it 5908 * comes to creating a new ad-hoc network, we have tell the FW 5909 * exactly which band to use. 5910 * 5911 * We also have the possibility of an invalid channel for the 5912 * chossen band. Attempting to create a new ad-hoc network 5913 * with an invalid channel for wireless mode will trigger a 5914 * FW fatal error. 5915 * 5916 */ 5917 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 5918 case LIBIPW_52GHZ_BAND: 5919 network->mode = IEEE_A; 5920 i = libipw_channel_to_index(priv->ieee, priv->channel); 5921 BUG_ON(i == -1); 5922 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) { 5923 IPW_WARNING("Overriding invalid channel\n"); 5924 priv->channel = geo->a[0].channel; 5925 } 5926 break; 5927 5928 case LIBIPW_24GHZ_BAND: 5929 if (priv->ieee->mode & IEEE_G) 5930 network->mode = IEEE_G; 5931 else 5932 network->mode = IEEE_B; 5933 i = libipw_channel_to_index(priv->ieee, priv->channel); 5934 BUG_ON(i == -1); 5935 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) { 5936 IPW_WARNING("Overriding invalid channel\n"); 5937 priv->channel = geo->bg[0].channel; 5938 } 5939 break; 5940 5941 default: 5942 IPW_WARNING("Overriding invalid channel\n"); 5943 if (priv->ieee->mode & IEEE_A) { 5944 network->mode = IEEE_A; 5945 priv->channel = geo->a[0].channel; 5946 } else if (priv->ieee->mode & IEEE_G) { 5947 network->mode = IEEE_G; 5948 priv->channel = geo->bg[0].channel; 5949 } else { 5950 network->mode = IEEE_B; 5951 priv->channel = geo->bg[0].channel; 5952 } 5953 break; 5954 } 5955 5956 network->channel = priv->channel; 5957 priv->config |= CFG_ADHOC_PERSIST; 5958 ipw_create_bssid(priv, network->bssid); 5959 network->ssid_len = priv->essid_len; 5960 memcpy(network->ssid, priv->essid, priv->essid_len); 5961 memset(&network->stats, 0, sizeof(network->stats)); 5962 network->capability = WLAN_CAPABILITY_IBSS; 5963 if (!(priv->config & CFG_PREAMBLE_LONG)) 5964 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE; 5965 if (priv->capability & CAP_PRIVACY_ON) 5966 network->capability |= WLAN_CAPABILITY_PRIVACY; 5967 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH); 5968 memcpy(network->rates, priv->rates.supported_rates, network->rates_len); 5969 network->rates_ex_len = priv->rates.num_rates - network->rates_len; 5970 memcpy(network->rates_ex, 5971 &priv->rates.supported_rates[network->rates_len], 5972 network->rates_ex_len); 5973 network->last_scanned = 0; 5974 network->flags = 0; 5975 network->last_associate = 0; 5976 network->time_stamp[0] = 0; 5977 network->time_stamp[1] = 0; 5978 network->beacon_interval = 100; /* Default */ 5979 network->listen_interval = 10; /* Default */ 5980 network->atim_window = 0; /* Default */ 5981 network->wpa_ie_len = 0; 5982 network->rsn_ie_len = 0; 5983} 5984 5985static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index) 5986{ 5987 struct ipw_tgi_tx_key key; 5988 5989 if (!(priv->ieee->sec.flags & (1 << index))) 5990 return; 5991 5992 key.key_id = index; 5993 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH); 5994 key.security_type = type; 5995 key.station_index = 0; /* always 0 for BSS */ 5996 key.flags = 0; 5997 /* 0 for new key; previous value of counter (after fatal error) */ 5998 key.tx_counter[0] = cpu_to_le32(0); 5999 key.tx_counter[1] = cpu_to_le32(0); 6000 6001 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); 6002} 6003 6004static void ipw_send_wep_keys(struct ipw_priv *priv, int type) 6005{ 6006 struct ipw_wep_key key; 6007 int i; 6008 6009 key.cmd_id = DINO_CMD_WEP_KEY; 6010 key.seq_num = 0; 6011 6012 /* Note: AES keys cannot be set for multiple times. 6013 * Only set it at the first time. */ 6014 for (i = 0; i < 4; i++) { 6015 key.key_index = i | type; 6016 if (!(priv->ieee->sec.flags & (1 << i))) { 6017 key.key_size = 0; 6018 continue; 6019 } 6020 6021 key.key_size = priv->ieee->sec.key_sizes[i]; 6022 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size); 6023 6024 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key); 6025 } 6026} 6027 6028static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level) 6029{ 6030 if (priv->ieee->host_encrypt) 6031 return; 6032 6033 switch (level) { 6034 case SEC_LEVEL_3: 6035 priv->sys_config.disable_unicast_decryption = 0; 6036 priv->ieee->host_decrypt = 0; 6037 break; 6038 case SEC_LEVEL_2: 6039 priv->sys_config.disable_unicast_decryption = 1; 6040 priv->ieee->host_decrypt = 1; 6041 break; 6042 case SEC_LEVEL_1: 6043 priv->sys_config.disable_unicast_decryption = 0; 6044 priv->ieee->host_decrypt = 0; 6045 break; 6046 case SEC_LEVEL_0: 6047 priv->sys_config.disable_unicast_decryption = 1; 6048 break; 6049 default: 6050 break; 6051 } 6052} 6053 6054static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level) 6055{ 6056 if (priv->ieee->host_encrypt) 6057 return; 6058 6059 switch (level) { 6060 case SEC_LEVEL_3: 6061 priv->sys_config.disable_multicast_decryption = 0; 6062 break; 6063 case SEC_LEVEL_2: 6064 priv->sys_config.disable_multicast_decryption = 1; 6065 break; 6066 case SEC_LEVEL_1: 6067 priv->sys_config.disable_multicast_decryption = 0; 6068 break; 6069 case SEC_LEVEL_0: 6070 priv->sys_config.disable_multicast_decryption = 1; 6071 break; 6072 default: 6073 break; 6074 } 6075} 6076 6077static void ipw_set_hwcrypto_keys(struct ipw_priv *priv) 6078{ 6079 switch (priv->ieee->sec.level) { 6080 case SEC_LEVEL_3: 6081 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) 6082 ipw_send_tgi_tx_key(priv, 6083 DCT_FLAG_EXT_SECURITY_CCM, 6084 priv->ieee->sec.active_key); 6085 6086 if (!priv->ieee->host_mc_decrypt) 6087 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM); 6088 break; 6089 case SEC_LEVEL_2: 6090 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY) 6091 ipw_send_tgi_tx_key(priv, 6092 DCT_FLAG_EXT_SECURITY_TKIP, 6093 priv->ieee->sec.active_key); 6094 break; 6095 case SEC_LEVEL_1: 6096 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 6097 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level); 6098 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level); 6099 break; 6100 case SEC_LEVEL_0: 6101 default: 6102 break; 6103 } 6104} 6105 6106static void ipw_adhoc_check(void *data) 6107{ 6108 struct ipw_priv *priv = data; 6109 6110 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold && 6111 !(priv->config & CFG_ADHOC_PERSIST)) { 6112 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 6113 IPW_DL_STATE | IPW_DL_ASSOC, 6114 "Missed beacon: %d - disassociate\n", 6115 priv->missed_adhoc_beacons); 6116 ipw_remove_current_network(priv); 6117 ipw_disassociate(priv); 6118 return; 6119 } 6120 6121 queue_delayed_work(priv->workqueue, &priv->adhoc_check, 6122 le16_to_cpu(priv->assoc_request.beacon_interval)); 6123} 6124 6125static void ipw_bg_adhoc_check(struct work_struct *work) 6126{ 6127 struct ipw_priv *priv = 6128 container_of(work, struct ipw_priv, adhoc_check.work); 6129 mutex_lock(&priv->mutex); 6130 ipw_adhoc_check(priv); 6131 mutex_unlock(&priv->mutex); 6132} 6133 6134static void ipw_debug_config(struct ipw_priv *priv) 6135{ 6136 DECLARE_SSID_BUF(ssid); 6137 IPW_DEBUG_INFO("Scan completed, no valid APs matched " 6138 "[CFG 0x%08X]\n", priv->config); 6139 if (priv->config & CFG_STATIC_CHANNEL) 6140 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel); 6141 else 6142 IPW_DEBUG_INFO("Channel unlocked.\n"); 6143 if (priv->config & CFG_STATIC_ESSID) 6144 IPW_DEBUG_INFO("ESSID locked to '%s'\n", 6145 print_ssid(ssid, priv->essid, priv->essid_len)); 6146 else 6147 IPW_DEBUG_INFO("ESSID unlocked.\n"); 6148 if (priv->config & CFG_STATIC_BSSID) 6149 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid); 6150 else 6151 IPW_DEBUG_INFO("BSSID unlocked.\n"); 6152 if (priv->capability & CAP_PRIVACY_ON) 6153 IPW_DEBUG_INFO("PRIVACY on\n"); 6154 else 6155 IPW_DEBUG_INFO("PRIVACY off\n"); 6156 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); 6157} 6158 6159static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) 6160{ 6161 /* TODO: Verify that this works... */ 6162 struct ipw_fixed_rate fr; 6163 u32 reg; 6164 u16 mask = 0; 6165 u16 new_tx_rates = priv->rates_mask; 6166 6167 /* Identify 'current FW band' and match it with the fixed 6168 * Tx rates */ 6169 6170 switch (priv->ieee->freq_band) { 6171 case LIBIPW_52GHZ_BAND: /* A only */ 6172 /* IEEE_A */ 6173 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) { 6174 /* Invalid fixed rate mask */ 6175 IPW_DEBUG_WX 6176 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6177 new_tx_rates = 0; 6178 break; 6179 } 6180 6181 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A; 6182 break; 6183 6184 default: /* 2.4Ghz or Mixed */ 6185 /* IEEE_B */ 6186 if (mode == IEEE_B) { 6187 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) { 6188 /* Invalid fixed rate mask */ 6189 IPW_DEBUG_WX 6190 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6191 new_tx_rates = 0; 6192 } 6193 break; 6194 } 6195 6196 /* IEEE_G */ 6197 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK | 6198 LIBIPW_OFDM_RATES_MASK)) { 6199 /* Invalid fixed rate mask */ 6200 IPW_DEBUG_WX 6201 ("invalid fixed rate mask in ipw_set_fixed_rate\n"); 6202 new_tx_rates = 0; 6203 break; 6204 } 6205 6206 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) { 6207 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1); 6208 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK; 6209 } 6210 6211 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) { 6212 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1); 6213 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK; 6214 } 6215 6216 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) { 6217 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1); 6218 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK; 6219 } 6220 6221 new_tx_rates |= mask; 6222 break; 6223 } 6224 6225 fr.tx_rates = cpu_to_le16(new_tx_rates); 6226 6227 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE); 6228 ipw_write_reg32(priv, reg, *(u32 *) & fr); 6229} 6230 6231static void ipw_abort_scan(struct ipw_priv *priv) 6232{ 6233 int err; 6234 6235 if (priv->status & STATUS_SCAN_ABORTING) { 6236 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n"); 6237 return; 6238 } 6239 priv->status |= STATUS_SCAN_ABORTING; 6240 6241 err = ipw_send_scan_abort(priv); 6242 if (err) 6243 IPW_DEBUG_HC("Request to abort scan failed.\n"); 6244} 6245 6246static void ipw_add_scan_channels(struct ipw_priv *priv, 6247 struct ipw_scan_request_ext *scan, 6248 int scan_type) 6249{ 6250 int channel_index = 0; 6251 const struct libipw_geo *geo; 6252 int i; 6253 6254 geo = libipw_get_geo(priv->ieee); 6255 6256 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) { 6257 int start = channel_index; 6258 for (i = 0; i < geo->a_channels; i++) { 6259 if ((priv->status & STATUS_ASSOCIATED) && 6260 geo->a[i].channel == priv->channel) 6261 continue; 6262 channel_index++; 6263 scan->channels_list[channel_index] = geo->a[i].channel; 6264 ipw_set_scan_type(scan, channel_index, 6265 geo->a[i]. 6266 flags & LIBIPW_CH_PASSIVE_ONLY ? 6267 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN : 6268 scan_type); 6269 } 6270 6271 if (start != channel_index) { 6272 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) | 6273 (channel_index - start); 6274 channel_index++; 6275 } 6276 } 6277 6278 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) { 6279 int start = channel_index; 6280 if (priv->config & CFG_SPEED_SCAN) { 6281 int index; 6282 u8 channels[LIBIPW_24GHZ_CHANNELS] = { 6283 /* nop out the list */ 6284 [0] = 0 6285 }; 6286 6287 u8 channel; 6288 while (channel_index < IPW_SCAN_CHANNELS - 1) { 6289 channel = 6290 priv->speed_scan[priv->speed_scan_pos]; 6291 if (channel == 0) { 6292 priv->speed_scan_pos = 0; 6293 channel = priv->speed_scan[0]; 6294 } 6295 if ((priv->status & STATUS_ASSOCIATED) && 6296 channel == priv->channel) { 6297 priv->speed_scan_pos++; 6298 continue; 6299 } 6300 6301 /* If this channel has already been 6302 * added in scan, break from loop 6303 * and this will be the first channel 6304 * in the next scan. 6305 */ 6306 if (channels[channel - 1] != 0) 6307 break; 6308 6309 channels[channel - 1] = 1; 6310 priv->speed_scan_pos++; 6311 channel_index++; 6312 scan->channels_list[channel_index] = channel; 6313 index = 6314 libipw_channel_to_index(priv->ieee, channel); 6315 ipw_set_scan_type(scan, channel_index, 6316 geo->bg[index]. 6317 flags & 6318 LIBIPW_CH_PASSIVE_ONLY ? 6319 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN 6320 : scan_type); 6321 } 6322 } else { 6323 for (i = 0; i < geo->bg_channels; i++) { 6324 if ((priv->status & STATUS_ASSOCIATED) && 6325 geo->bg[i].channel == priv->channel) 6326 continue; 6327 channel_index++; 6328 scan->channels_list[channel_index] = 6329 geo->bg[i].channel; 6330 ipw_set_scan_type(scan, channel_index, 6331 geo->bg[i]. 6332 flags & 6333 LIBIPW_CH_PASSIVE_ONLY ? 6334 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN 6335 : scan_type); 6336 } 6337 } 6338 6339 if (start != channel_index) { 6340 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) | 6341 (channel_index - start); 6342 } 6343 } 6344} 6345 6346static int ipw_passive_dwell_time(struct ipw_priv *priv) 6347{ 6348 /* staying on passive channels longer than the DTIM interval during a 6349 * scan, while associated, causes the firmware to cancel the scan 6350 * without notification. Hence, don't stay on passive channels longer 6351 * than the beacon interval. 6352 */ 6353 if (priv->status & STATUS_ASSOCIATED 6354 && priv->assoc_network->beacon_interval > 10) 6355 return priv->assoc_network->beacon_interval - 10; 6356 else 6357 return 120; 6358} 6359 6360static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) 6361{ 6362 struct ipw_scan_request_ext scan; 6363 int err = 0, scan_type; 6364 6365 if (!(priv->status & STATUS_INIT) || 6366 (priv->status & STATUS_EXIT_PENDING)) 6367 return 0; 6368 6369 mutex_lock(&priv->mutex); 6370 6371 if (direct && (priv->direct_scan_ssid_len == 0)) { 6372 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n"); 6373 priv->status &= ~STATUS_DIRECT_SCAN_PENDING; 6374 goto done; 6375 } 6376 6377 if (priv->status & STATUS_SCANNING) { 6378 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n"); 6379 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6380 STATUS_SCAN_PENDING; 6381 goto done; 6382 } 6383 6384 if (!(priv->status & STATUS_SCAN_FORCED) && 6385 priv->status & STATUS_SCAN_ABORTING) { 6386 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n"); 6387 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6388 STATUS_SCAN_PENDING; 6389 goto done; 6390 } 6391 6392 if (priv->status & STATUS_RF_KILL_MASK) { 6393 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n"); 6394 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING : 6395 STATUS_SCAN_PENDING; 6396 goto done; 6397 } 6398 6399 memset(&scan, 0, sizeof(scan)); 6400 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee)); 6401 6402 if (type == IW_SCAN_TYPE_PASSIVE) { 6403 IPW_DEBUG_WX("use passive scanning\n"); 6404 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN; 6405 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6406 cpu_to_le16(ipw_passive_dwell_time(priv)); 6407 ipw_add_scan_channels(priv, &scan, scan_type); 6408 goto send_request; 6409 } 6410 6411 /* Use active scan by default. */ 6412 if (priv->config & CFG_SPEED_SCAN) 6413 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6414 cpu_to_le16(30); 6415 else 6416 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6417 cpu_to_le16(20); 6418 6419 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 6420 cpu_to_le16(20); 6421 6422 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6423 cpu_to_le16(ipw_passive_dwell_time(priv)); 6424 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); 6425 6426#ifdef CONFIG_IPW2200_MONITOR 6427 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 6428 u8 channel; 6429 u8 band = 0; 6430 6431 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 6432 case LIBIPW_52GHZ_BAND: 6433 band = (u8) (IPW_A_MODE << 6) | 1; 6434 channel = priv->channel; 6435 break; 6436 6437 case LIBIPW_24GHZ_BAND: 6438 band = (u8) (IPW_B_MODE << 6) | 1; 6439 channel = priv->channel; 6440 break; 6441 6442 default: 6443 band = (u8) (IPW_B_MODE << 6) | 1; 6444 channel = 9; 6445 break; 6446 } 6447 6448 scan.channels_list[0] = band; 6449 scan.channels_list[1] = channel; 6450 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN); 6451 6452 /* NOTE: The card will sit on this channel for this time 6453 * period. Scan aborts are timing sensitive and frequently 6454 * result in firmware restarts. As such, it is best to 6455 * set a small dwell_time here and just keep re-issuing 6456 * scans. Otherwise fast channel hopping will not actually 6457 * hop channels. 6458 * 6459 * TODO: Move SPEED SCAN support to all modes and bands */ 6460 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6461 cpu_to_le16(2000); 6462 } else { 6463#endif /* CONFIG_IPW2200_MONITOR */ 6464 /* Honor direct scans first, otherwise if we are roaming make 6465 * this a direct scan for the current network. Finally, 6466 * ensure that every other scan is a fast channel hop scan */ 6467 if (direct) { 6468 err = ipw_send_ssid(priv, priv->direct_scan_ssid, 6469 priv->direct_scan_ssid_len); 6470 if (err) { 6471 IPW_DEBUG_HC("Attempt to send SSID command " 6472 "failed\n"); 6473 goto done; 6474 } 6475 6476 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; 6477 } else if ((priv->status & STATUS_ROAMING) 6478 || (!(priv->status & STATUS_ASSOCIATED) 6479 && (priv->config & CFG_STATIC_ESSID) 6480 && (le32_to_cpu(scan.full_scan_index) % 2))) { 6481 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 6482 if (err) { 6483 IPW_DEBUG_HC("Attempt to send SSID command " 6484 "failed.\n"); 6485 goto done; 6486 } 6487 6488 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN; 6489 } else 6490 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN; 6491 6492 ipw_add_scan_channels(priv, &scan, scan_type); 6493#ifdef CONFIG_IPW2200_MONITOR 6494 } 6495#endif 6496 6497send_request: 6498 err = ipw_send_scan_request_ext(priv, &scan); 6499 if (err) { 6500 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); 6501 goto done; 6502 } 6503 6504 priv->status |= STATUS_SCANNING; 6505 if (direct) { 6506 priv->status &= ~STATUS_DIRECT_SCAN_PENDING; 6507 priv->direct_scan_ssid_len = 0; 6508 } else 6509 priv->status &= ~STATUS_SCAN_PENDING; 6510 6511 queue_delayed_work(priv->workqueue, &priv->scan_check, 6512 IPW_SCAN_CHECK_WATCHDOG); 6513done: 6514 mutex_unlock(&priv->mutex); 6515 return err; 6516} 6517 6518static void ipw_request_passive_scan(struct work_struct *work) 6519{ 6520 struct ipw_priv *priv = 6521 container_of(work, struct ipw_priv, request_passive_scan.work); 6522 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0); 6523} 6524 6525static void ipw_request_scan(struct work_struct *work) 6526{ 6527 struct ipw_priv *priv = 6528 container_of(work, struct ipw_priv, request_scan.work); 6529 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0); 6530} 6531 6532static void ipw_request_direct_scan(struct work_struct *work) 6533{ 6534 struct ipw_priv *priv = 6535 container_of(work, struct ipw_priv, request_direct_scan.work); 6536 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1); 6537} 6538 6539static void ipw_bg_abort_scan(struct work_struct *work) 6540{ 6541 struct ipw_priv *priv = 6542 container_of(work, struct ipw_priv, abort_scan); 6543 mutex_lock(&priv->mutex); 6544 ipw_abort_scan(priv); 6545 mutex_unlock(&priv->mutex); 6546} 6547 6548static int ipw_wpa_enable(struct ipw_priv *priv, int value) 6549{ 6550 /* This is called when wpa_supplicant loads and closes the driver 6551 * interface. */ 6552 priv->ieee->wpa_enabled = value; 6553 return 0; 6554} 6555 6556static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value) 6557{ 6558 struct libipw_device *ieee = priv->ieee; 6559 struct libipw_security sec = { 6560 .flags = SEC_AUTH_MODE, 6561 }; 6562 int ret = 0; 6563 6564 if (value & IW_AUTH_ALG_SHARED_KEY) { 6565 sec.auth_mode = WLAN_AUTH_SHARED_KEY; 6566 ieee->open_wep = 0; 6567 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { 6568 sec.auth_mode = WLAN_AUTH_OPEN; 6569 ieee->open_wep = 1; 6570 } else if (value & IW_AUTH_ALG_LEAP) { 6571 sec.auth_mode = WLAN_AUTH_LEAP; 6572 ieee->open_wep = 1; 6573 } else 6574 return -EINVAL; 6575 6576 if (ieee->set_security) 6577 ieee->set_security(ieee->dev, &sec); 6578 else 6579 ret = -EOPNOTSUPP; 6580 6581 return ret; 6582} 6583 6584static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie, 6585 int wpa_ie_len) 6586{ 6587 /* make sure WPA is enabled */ 6588 ipw_wpa_enable(priv, 1); 6589} 6590 6591static int ipw_set_rsn_capa(struct ipw_priv *priv, 6592 char *capabilities, int length) 6593{ 6594 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n"); 6595 6596 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length, 6597 capabilities); 6598} 6599 6600/* 6601 * WE-18 support 6602 */ 6603 6604/* SIOCSIWGENIE */ 6605static int ipw_wx_set_genie(struct net_device *dev, 6606 struct iw_request_info *info, 6607 union iwreq_data *wrqu, char *extra) 6608{ 6609 struct ipw_priv *priv = libipw_priv(dev); 6610 struct libipw_device *ieee = priv->ieee; 6611 u8 *buf; 6612 int err = 0; 6613 6614 if (wrqu->data.length > MAX_WPA_IE_LEN || 6615 (wrqu->data.length && extra == NULL)) 6616 return -EINVAL; 6617 6618 if (wrqu->data.length) { 6619 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 6620 if (buf == NULL) { 6621 err = -ENOMEM; 6622 goto out; 6623 } 6624 6625 memcpy(buf, extra, wrqu->data.length); 6626 kfree(ieee->wpa_ie); 6627 ieee->wpa_ie = buf; 6628 ieee->wpa_ie_len = wrqu->data.length; 6629 } else { 6630 kfree(ieee->wpa_ie); 6631 ieee->wpa_ie = NULL; 6632 ieee->wpa_ie_len = 0; 6633 } 6634 6635 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); 6636 out: 6637 return err; 6638} 6639 6640/* SIOCGIWGENIE */ 6641static int ipw_wx_get_genie(struct net_device *dev, 6642 struct iw_request_info *info, 6643 union iwreq_data *wrqu, char *extra) 6644{ 6645 struct ipw_priv *priv = libipw_priv(dev); 6646 struct libipw_device *ieee = priv->ieee; 6647 int err = 0; 6648 6649 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { 6650 wrqu->data.length = 0; 6651 goto out; 6652 } 6653 6654 if (wrqu->data.length < ieee->wpa_ie_len) { 6655 err = -E2BIG; 6656 goto out; 6657 } 6658 6659 wrqu->data.length = ieee->wpa_ie_len; 6660 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); 6661 6662 out: 6663 return err; 6664} 6665 6666static int wext_cipher2level(int cipher) 6667{ 6668 switch (cipher) { 6669 case IW_AUTH_CIPHER_NONE: 6670 return SEC_LEVEL_0; 6671 case IW_AUTH_CIPHER_WEP40: 6672 case IW_AUTH_CIPHER_WEP104: 6673 return SEC_LEVEL_1; 6674 case IW_AUTH_CIPHER_TKIP: 6675 return SEC_LEVEL_2; 6676 case IW_AUTH_CIPHER_CCMP: 6677 return SEC_LEVEL_3; 6678 default: 6679 return -1; 6680 } 6681} 6682 6683/* SIOCSIWAUTH */ 6684static int ipw_wx_set_auth(struct net_device *dev, 6685 struct iw_request_info *info, 6686 union iwreq_data *wrqu, char *extra) 6687{ 6688 struct ipw_priv *priv = libipw_priv(dev); 6689 struct libipw_device *ieee = priv->ieee; 6690 struct iw_param *param = &wrqu->param; 6691 struct lib80211_crypt_data *crypt; 6692 unsigned long flags; 6693 int ret = 0; 6694 6695 switch (param->flags & IW_AUTH_INDEX) { 6696 case IW_AUTH_WPA_VERSION: 6697 break; 6698 case IW_AUTH_CIPHER_PAIRWISE: 6699 ipw_set_hw_decrypt_unicast(priv, 6700 wext_cipher2level(param->value)); 6701 break; 6702 case IW_AUTH_CIPHER_GROUP: 6703 ipw_set_hw_decrypt_multicast(priv, 6704 wext_cipher2level(param->value)); 6705 break; 6706 case IW_AUTH_KEY_MGMT: 6707 /* 6708 * ipw2200 does not use these parameters 6709 */ 6710 break; 6711 6712 case IW_AUTH_TKIP_COUNTERMEASURES: 6713 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6714 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) 6715 break; 6716 6717 flags = crypt->ops->get_flags(crypt->priv); 6718 6719 if (param->value) 6720 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; 6721 else 6722 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; 6723 6724 crypt->ops->set_flags(flags, crypt->priv); 6725 6726 break; 6727 6728 case IW_AUTH_DROP_UNENCRYPTED:{ 6729 /* HACK: 6730 * 6731 * wpa_supplicant calls set_wpa_enabled when the driver 6732 * is loaded and unloaded, regardless of if WPA is being 6733 * used. No other calls are made which can be used to 6734 * determine if encryption will be used or not prior to 6735 * association being expected. If encryption is not being 6736 * used, drop_unencrypted is set to false, else true -- we 6737 * can use this to determine if the CAP_PRIVACY_ON bit should 6738 * be set. 6739 */ 6740 struct libipw_security sec = { 6741 .flags = SEC_ENABLED, 6742 .enabled = param->value, 6743 }; 6744 priv->ieee->drop_unencrypted = param->value; 6745 /* We only change SEC_LEVEL for open mode. Others 6746 * are set by ipw_wpa_set_encryption. 6747 */ 6748 if (!param->value) { 6749 sec.flags |= SEC_LEVEL; 6750 sec.level = SEC_LEVEL_0; 6751 } else { 6752 sec.flags |= SEC_LEVEL; 6753 sec.level = SEC_LEVEL_1; 6754 } 6755 if (priv->ieee->set_security) 6756 priv->ieee->set_security(priv->ieee->dev, &sec); 6757 break; 6758 } 6759 6760 case IW_AUTH_80211_AUTH_ALG: 6761 ret = ipw_wpa_set_auth_algs(priv, param->value); 6762 break; 6763 6764 case IW_AUTH_WPA_ENABLED: 6765 ret = ipw_wpa_enable(priv, param->value); 6766 ipw_disassociate(priv); 6767 break; 6768 6769 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 6770 ieee->ieee802_1x = param->value; 6771 break; 6772 6773 case IW_AUTH_PRIVACY_INVOKED: 6774 ieee->privacy_invoked = param->value; 6775 break; 6776 6777 default: 6778 return -EOPNOTSUPP; 6779 } 6780 return ret; 6781} 6782 6783/* SIOCGIWAUTH */ 6784static int ipw_wx_get_auth(struct net_device *dev, 6785 struct iw_request_info *info, 6786 union iwreq_data *wrqu, char *extra) 6787{ 6788 struct ipw_priv *priv = libipw_priv(dev); 6789 struct libipw_device *ieee = priv->ieee; 6790 struct lib80211_crypt_data *crypt; 6791 struct iw_param *param = &wrqu->param; 6792 int ret = 0; 6793 6794 switch (param->flags & IW_AUTH_INDEX) { 6795 case IW_AUTH_WPA_VERSION: 6796 case IW_AUTH_CIPHER_PAIRWISE: 6797 case IW_AUTH_CIPHER_GROUP: 6798 case IW_AUTH_KEY_MGMT: 6799 /* 6800 * wpa_supplicant will control these internally 6801 */ 6802 ret = -EOPNOTSUPP; 6803 break; 6804 6805 case IW_AUTH_TKIP_COUNTERMEASURES: 6806 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6807 if (!crypt || !crypt->ops->get_flags) 6808 break; 6809 6810 param->value = (crypt->ops->get_flags(crypt->priv) & 6811 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0; 6812 6813 break; 6814 6815 case IW_AUTH_DROP_UNENCRYPTED: 6816 param->value = ieee->drop_unencrypted; 6817 break; 6818 6819 case IW_AUTH_80211_AUTH_ALG: 6820 param->value = ieee->sec.auth_mode; 6821 break; 6822 6823 case IW_AUTH_WPA_ENABLED: 6824 param->value = ieee->wpa_enabled; 6825 break; 6826 6827 case IW_AUTH_RX_UNENCRYPTED_EAPOL: 6828 param->value = ieee->ieee802_1x; 6829 break; 6830 6831 case IW_AUTH_ROAMING_CONTROL: 6832 case IW_AUTH_PRIVACY_INVOKED: 6833 param->value = ieee->privacy_invoked; 6834 break; 6835 6836 default: 6837 return -EOPNOTSUPP; 6838 } 6839 return 0; 6840} 6841 6842/* SIOCSIWENCODEEXT */ 6843static int ipw_wx_set_encodeext(struct net_device *dev, 6844 struct iw_request_info *info, 6845 union iwreq_data *wrqu, char *extra) 6846{ 6847 struct ipw_priv *priv = libipw_priv(dev); 6848 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 6849 6850 if (hwcrypto) { 6851 if (ext->alg == IW_ENCODE_ALG_TKIP) { 6852 /* IPW HW can't build TKIP MIC, 6853 host decryption still needed */ 6854 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) 6855 priv->ieee->host_mc_decrypt = 1; 6856 else { 6857 priv->ieee->host_encrypt = 0; 6858 priv->ieee->host_encrypt_msdu = 1; 6859 priv->ieee->host_decrypt = 1; 6860 } 6861 } else { 6862 priv->ieee->host_encrypt = 0; 6863 priv->ieee->host_encrypt_msdu = 0; 6864 priv->ieee->host_decrypt = 0; 6865 priv->ieee->host_mc_decrypt = 0; 6866 } 6867 } 6868 6869 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra); 6870} 6871 6872/* SIOCGIWENCODEEXT */ 6873static int ipw_wx_get_encodeext(struct net_device *dev, 6874 struct iw_request_info *info, 6875 union iwreq_data *wrqu, char *extra) 6876{ 6877 struct ipw_priv *priv = libipw_priv(dev); 6878 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra); 6879} 6880 6881/* SIOCSIWMLME */ 6882static int ipw_wx_set_mlme(struct net_device *dev, 6883 struct iw_request_info *info, 6884 union iwreq_data *wrqu, char *extra) 6885{ 6886 struct ipw_priv *priv = libipw_priv(dev); 6887 struct iw_mlme *mlme = (struct iw_mlme *)extra; 6888 __le16 reason; 6889 6890 reason = cpu_to_le16(mlme->reason_code); 6891 6892 switch (mlme->cmd) { 6893 case IW_MLME_DEAUTH: 6894 /* silently ignore */ 6895 break; 6896 6897 case IW_MLME_DISASSOC: 6898 ipw_disassociate(priv); 6899 break; 6900 6901 default: 6902 return -EOPNOTSUPP; 6903 } 6904 return 0; 6905} 6906 6907#ifdef CONFIG_IPW2200_QOS 6908 6909/* QoS */ 6910/* 6911* get the modulation type of the current network or 6912* the card current mode 6913*/ 6914static u8 ipw_qos_current_mode(struct ipw_priv * priv) 6915{ 6916 u8 mode = 0; 6917 6918 if (priv->status & STATUS_ASSOCIATED) { 6919 unsigned long flags; 6920 6921 spin_lock_irqsave(&priv->ieee->lock, flags); 6922 mode = priv->assoc_network->mode; 6923 spin_unlock_irqrestore(&priv->ieee->lock, flags); 6924 } else { 6925 mode = priv->ieee->mode; 6926 } 6927 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode); 6928 return mode; 6929} 6930 6931/* 6932* Handle management frame beacon and probe response 6933*/ 6934static int ipw_qos_handle_probe_response(struct ipw_priv *priv, 6935 int active_network, 6936 struct libipw_network *network) 6937{ 6938 u32 size = sizeof(struct libipw_qos_parameters); 6939 6940 if (network->capability & WLAN_CAPABILITY_IBSS) 6941 network->qos_data.active = network->qos_data.supported; 6942 6943 if (network->flags & NETWORK_HAS_QOS_MASK) { 6944 if (active_network && 6945 (network->flags & NETWORK_HAS_QOS_PARAMETERS)) 6946 network->qos_data.active = network->qos_data.supported; 6947 6948 if ((network->qos_data.active == 1) && (active_network == 1) && 6949 (network->flags & NETWORK_HAS_QOS_PARAMETERS) && 6950 (network->qos_data.old_param_count != 6951 network->qos_data.param_count)) { 6952 network->qos_data.old_param_count = 6953 network->qos_data.param_count; 6954 schedule_work(&priv->qos_activate); 6955 IPW_DEBUG_QOS("QoS parameters change call " 6956 "qos_activate\n"); 6957 } 6958 } else { 6959 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B)) 6960 memcpy(&network->qos_data.parameters, 6961 &def_parameters_CCK, size); 6962 else 6963 memcpy(&network->qos_data.parameters, 6964 &def_parameters_OFDM, size); 6965 6966 if ((network->qos_data.active == 1) && (active_network == 1)) { 6967 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n"); 6968 schedule_work(&priv->qos_activate); 6969 } 6970 6971 network->qos_data.active = 0; 6972 network->qos_data.supported = 0; 6973 } 6974 if ((priv->status & STATUS_ASSOCIATED) && 6975 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) { 6976 if (memcmp(network->bssid, priv->bssid, ETH_ALEN)) 6977 if (network->capability & WLAN_CAPABILITY_IBSS) 6978 if ((network->ssid_len == 6979 priv->assoc_network->ssid_len) && 6980 !memcmp(network->ssid, 6981 priv->assoc_network->ssid, 6982 network->ssid_len)) { 6983 queue_work(priv->workqueue, 6984 &priv->merge_networks); 6985 } 6986 } 6987 6988 return 0; 6989} 6990 6991/* 6992* This function set up the firmware to support QoS. It sends 6993* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO 6994*/ 6995static int ipw_qos_activate(struct ipw_priv *priv, 6996 struct libipw_qos_data *qos_network_data) 6997{ 6998 int err; 6999 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS]; 7000 struct libipw_qos_parameters *active_one = NULL; 7001 u32 size = sizeof(struct libipw_qos_parameters); 7002 u32 burst_duration; 7003 int i; 7004 u8 type; 7005 7006 type = ipw_qos_current_mode(priv); 7007 7008 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]); 7009 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size); 7010 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]); 7011 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size); 7012 7013 if (qos_network_data == NULL) { 7014 if (type == IEEE_B) { 7015 IPW_DEBUG_QOS("QoS activate network mode %d\n", type); 7016 active_one = &def_parameters_CCK; 7017 } else 7018 active_one = &def_parameters_OFDM; 7019 7020 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 7021 burst_duration = ipw_qos_get_burst_duration(priv); 7022 for (i = 0; i < QOS_QUEUE_NUM; i++) 7023 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] = 7024 cpu_to_le16(burst_duration); 7025 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7026 if (type == IEEE_B) { 7027 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", 7028 type); 7029 if (priv->qos_data.qos_enable == 0) 7030 active_one = &def_parameters_CCK; 7031 else 7032 active_one = priv->qos_data.def_qos_parm_CCK; 7033 } else { 7034 if (priv->qos_data.qos_enable == 0) 7035 active_one = &def_parameters_OFDM; 7036 else 7037 active_one = priv->qos_data.def_qos_parm_OFDM; 7038 } 7039 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 7040 } else { 7041 unsigned long flags; 7042 int active; 7043 7044 spin_lock_irqsave(&priv->ieee->lock, flags); 7045 active_one = &(qos_network_data->parameters); 7046 qos_network_data->old_param_count = 7047 qos_network_data->param_count; 7048 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size); 7049 active = qos_network_data->supported; 7050 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7051 7052 if (active == 0) { 7053 burst_duration = ipw_qos_get_burst_duration(priv); 7054 for (i = 0; i < QOS_QUEUE_NUM; i++) 7055 qos_parameters[QOS_PARAM_SET_ACTIVE]. 7056 tx_op_limit[i] = cpu_to_le16(burst_duration); 7057 } 7058 } 7059 7060 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); 7061 err = ipw_send_qos_params_command(priv, 7062 (struct libipw_qos_parameters *) 7063 &(qos_parameters[0])); 7064 if (err) 7065 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n"); 7066 7067 return err; 7068} 7069 7070/* 7071* send IPW_CMD_WME_INFO to the firmware 7072*/ 7073static int ipw_qos_set_info_element(struct ipw_priv *priv) 7074{ 7075 int ret = 0; 7076 struct libipw_qos_information_element qos_info; 7077 7078 if (priv == NULL) 7079 return -1; 7080 7081 qos_info.elementID = QOS_ELEMENT_ID; 7082 qos_info.length = sizeof(struct libipw_qos_information_element) - 2; 7083 7084 qos_info.version = QOS_VERSION_1; 7085 qos_info.ac_info = 0; 7086 7087 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN); 7088 qos_info.qui_type = QOS_OUI_TYPE; 7089 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE; 7090 7091 ret = ipw_send_qos_info_command(priv, &qos_info); 7092 if (ret != 0) { 7093 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n"); 7094 } 7095 return ret; 7096} 7097 7098/* 7099* Set the QoS parameter with the association request structure 7100*/ 7101static int ipw_qos_association(struct ipw_priv *priv, 7102 struct libipw_network *network) 7103{ 7104 int err = 0; 7105 struct libipw_qos_data *qos_data = NULL; 7106 struct libipw_qos_data ibss_data = { 7107 .supported = 1, 7108 .active = 1, 7109 }; 7110 7111 switch (priv->ieee->iw_mode) { 7112 case IW_MODE_ADHOC: 7113 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS)); 7114 7115 qos_data = &ibss_data; 7116 break; 7117 7118 case IW_MODE_INFRA: 7119 qos_data = &network->qos_data; 7120 break; 7121 7122 default: 7123 BUG(); 7124 break; 7125 } 7126 7127 err = ipw_qos_activate(priv, qos_data); 7128 if (err) { 7129 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC; 7130 return err; 7131 } 7132 7133 if (priv->qos_data.qos_enable && qos_data->supported) { 7134 IPW_DEBUG_QOS("QoS will be enabled for this association\n"); 7135 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC; 7136 return ipw_qos_set_info_element(priv); 7137 } 7138 7139 return 0; 7140} 7141 7142/* 7143* handling the beaconing responses. if we get different QoS setting 7144* off the network from the associated setting, adjust the QoS 7145* setting 7146*/ 7147static int ipw_qos_association_resp(struct ipw_priv *priv, 7148 struct libipw_network *network) 7149{ 7150 int ret = 0; 7151 unsigned long flags; 7152 u32 size = sizeof(struct libipw_qos_parameters); 7153 int set_qos_param = 0; 7154 7155 if ((priv == NULL) || (network == NULL) || 7156 (priv->assoc_network == NULL)) 7157 return ret; 7158 7159 if (!(priv->status & STATUS_ASSOCIATED)) 7160 return ret; 7161 7162 if ((priv->ieee->iw_mode != IW_MODE_INFRA)) 7163 return ret; 7164 7165 spin_lock_irqsave(&priv->ieee->lock, flags); 7166 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) { 7167 memcpy(&priv->assoc_network->qos_data, &network->qos_data, 7168 sizeof(struct libipw_qos_data)); 7169 priv->assoc_network->qos_data.active = 1; 7170 if ((network->qos_data.old_param_count != 7171 network->qos_data.param_count)) { 7172 set_qos_param = 1; 7173 network->qos_data.old_param_count = 7174 network->qos_data.param_count; 7175 } 7176 7177 } else { 7178 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B)) 7179 memcpy(&priv->assoc_network->qos_data.parameters, 7180 &def_parameters_CCK, size); 7181 else 7182 memcpy(&priv->assoc_network->qos_data.parameters, 7183 &def_parameters_OFDM, size); 7184 priv->assoc_network->qos_data.active = 0; 7185 priv->assoc_network->qos_data.supported = 0; 7186 set_qos_param = 1; 7187 } 7188 7189 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7190 7191 if (set_qos_param == 1) 7192 schedule_work(&priv->qos_activate); 7193 7194 return ret; 7195} 7196 7197static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv) 7198{ 7199 u32 ret = 0; 7200 7201 if ((priv == NULL)) 7202 return 0; 7203 7204 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION)) 7205 ret = priv->qos_data.burst_duration_CCK; 7206 else 7207 ret = priv->qos_data.burst_duration_OFDM; 7208 7209 return ret; 7210} 7211 7212/* 7213* Initialize the setting of QoS global 7214*/ 7215static void ipw_qos_init(struct ipw_priv *priv, int enable, 7216 int burst_enable, u32 burst_duration_CCK, 7217 u32 burst_duration_OFDM) 7218{ 7219 priv->qos_data.qos_enable = enable; 7220 7221 if (priv->qos_data.qos_enable) { 7222 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK; 7223 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM; 7224 IPW_DEBUG_QOS("QoS is enabled\n"); 7225 } else { 7226 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK; 7227 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM; 7228 IPW_DEBUG_QOS("QoS is not enabled\n"); 7229 } 7230 7231 priv->qos_data.burst_enable = burst_enable; 7232 7233 if (burst_enable) { 7234 priv->qos_data.burst_duration_CCK = burst_duration_CCK; 7235 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM; 7236 } else { 7237 priv->qos_data.burst_duration_CCK = 0; 7238 priv->qos_data.burst_duration_OFDM = 0; 7239 } 7240} 7241 7242/* 7243* map the packet priority to the right TX Queue 7244*/ 7245static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority) 7246{ 7247 if (priority > 7 || !priv->qos_data.qos_enable) 7248 priority = 0; 7249 7250 return from_priority_to_tx_queue[priority] - 1; 7251} 7252 7253static int ipw_is_qos_active(struct net_device *dev, 7254 struct sk_buff *skb) 7255{ 7256 struct ipw_priv *priv = libipw_priv(dev); 7257 struct libipw_qos_data *qos_data = NULL; 7258 int active, supported; 7259 u8 *daddr = skb->data + ETH_ALEN; 7260 int unicast = !is_multicast_ether_addr(daddr); 7261 7262 if (!(priv->status & STATUS_ASSOCIATED)) 7263 return 0; 7264 7265 qos_data = &priv->assoc_network->qos_data; 7266 7267 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7268 if (unicast == 0) 7269 qos_data->active = 0; 7270 else 7271 qos_data->active = qos_data->supported; 7272 } 7273 active = qos_data->active; 7274 supported = qos_data->supported; 7275 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d " 7276 "unicast %d\n", 7277 priv->qos_data.qos_enable, active, supported, unicast); 7278 if (active && priv->qos_data.qos_enable) 7279 return 1; 7280 7281 return 0; 7282 7283} 7284/* 7285* add QoS parameter to the TX command 7286*/ 7287static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, 7288 u16 priority, 7289 struct tfd_data *tfd) 7290{ 7291 int tx_queue_id = 0; 7292 7293 7294 tx_queue_id = from_priority_to_tx_queue[priority] - 1; 7295 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED; 7296 7297 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { 7298 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; 7299 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK); 7300 } 7301 return 0; 7302} 7303 7304/* 7305* background support to run QoS activate functionality 7306*/ 7307static void ipw_bg_qos_activate(struct work_struct *work) 7308{ 7309 struct ipw_priv *priv = 7310 container_of(work, struct ipw_priv, qos_activate); 7311 7312 mutex_lock(&priv->mutex); 7313 7314 if (priv->status & STATUS_ASSOCIATED) 7315 ipw_qos_activate(priv, &(priv->assoc_network->qos_data)); 7316 7317 mutex_unlock(&priv->mutex); 7318} 7319 7320static int ipw_handle_probe_response(struct net_device *dev, 7321 struct libipw_probe_response *resp, 7322 struct libipw_network *network) 7323{ 7324 struct ipw_priv *priv = libipw_priv(dev); 7325 int active_network = ((priv->status & STATUS_ASSOCIATED) && 7326 (network == priv->assoc_network)); 7327 7328 ipw_qos_handle_probe_response(priv, active_network, network); 7329 7330 return 0; 7331} 7332 7333static int ipw_handle_beacon(struct net_device *dev, 7334 struct libipw_beacon *resp, 7335 struct libipw_network *network) 7336{ 7337 struct ipw_priv *priv = libipw_priv(dev); 7338 int active_network = ((priv->status & STATUS_ASSOCIATED) && 7339 (network == priv->assoc_network)); 7340 7341 ipw_qos_handle_probe_response(priv, active_network, network); 7342 7343 return 0; 7344} 7345 7346static int ipw_handle_assoc_response(struct net_device *dev, 7347 struct libipw_assoc_response *resp, 7348 struct libipw_network *network) 7349{ 7350 struct ipw_priv *priv = libipw_priv(dev); 7351 ipw_qos_association_resp(priv, network); 7352 return 0; 7353} 7354 7355static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters 7356 *qos_param) 7357{ 7358 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, 7359 sizeof(*qos_param) * 3, qos_param); 7360} 7361 7362static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element 7363 *qos_param) 7364{ 7365 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param), 7366 qos_param); 7367} 7368 7369#endif /* CONFIG_IPW2200_QOS */ 7370 7371static int ipw_associate_network(struct ipw_priv *priv, 7372 struct libipw_network *network, 7373 struct ipw_supported_rates *rates, int roaming) 7374{ 7375 int err; 7376 DECLARE_SSID_BUF(ssid); 7377 7378 if (priv->config & CFG_FIXED_RATE) 7379 ipw_set_fixed_rate(priv, network->mode); 7380 7381 if (!(priv->config & CFG_STATIC_ESSID)) { 7382 priv->essid_len = min(network->ssid_len, 7383 (u8) IW_ESSID_MAX_SIZE); 7384 memcpy(priv->essid, network->ssid, priv->essid_len); 7385 } 7386 7387 network->last_associate = jiffies; 7388 7389 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request)); 7390 priv->assoc_request.channel = network->channel; 7391 priv->assoc_request.auth_key = 0; 7392 7393 if ((priv->capability & CAP_PRIVACY_ON) && 7394 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) { 7395 priv->assoc_request.auth_type = AUTH_SHARED_KEY; 7396 priv->assoc_request.auth_key = priv->ieee->sec.active_key; 7397 7398 if (priv->ieee->sec.level == SEC_LEVEL_1) 7399 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP); 7400 7401 } else if ((priv->capability & CAP_PRIVACY_ON) && 7402 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)) 7403 priv->assoc_request.auth_type = AUTH_LEAP; 7404 else 7405 priv->assoc_request.auth_type = AUTH_OPEN; 7406 7407 if (priv->ieee->wpa_ie_len) { 7408 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */ 7409 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie, 7410 priv->ieee->wpa_ie_len); 7411 } 7412 7413 /* 7414 * It is valid for our ieee device to support multiple modes, but 7415 * when it comes to associating to a given network we have to choose 7416 * just one mode. 7417 */ 7418 if (network->mode & priv->ieee->mode & IEEE_A) 7419 priv->assoc_request.ieee_mode = IPW_A_MODE; 7420 else if (network->mode & priv->ieee->mode & IEEE_G) 7421 priv->assoc_request.ieee_mode = IPW_G_MODE; 7422 else if (network->mode & priv->ieee->mode & IEEE_B) 7423 priv->assoc_request.ieee_mode = IPW_B_MODE; 7424 7425 priv->assoc_request.capability = cpu_to_le16(network->capability); 7426 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 7427 && !(priv->config & CFG_PREAMBLE_LONG)) { 7428 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE; 7429 } else { 7430 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE; 7431 7432 /* Clear the short preamble if we won't be supporting it */ 7433 priv->assoc_request.capability &= 7434 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE); 7435 } 7436 7437 /* Clear capability bits that aren't used in Ad Hoc */ 7438 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 7439 priv->assoc_request.capability &= 7440 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME); 7441 7442 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, " 7443 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n", 7444 roaming ? "Rea" : "A", 7445 print_ssid(ssid, priv->essid, priv->essid_len), 7446 network->channel, 7447 ipw_modes[priv->assoc_request.ieee_mode], 7448 rates->num_rates, 7449 (priv->assoc_request.preamble_length == 7450 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short", 7451 network->capability & 7452 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long", 7453 priv->capability & CAP_PRIVACY_ON ? "on " : "off", 7454 priv->capability & CAP_PRIVACY_ON ? 7455 (priv->capability & CAP_SHARED_KEY ? "(shared)" : 7456 "(open)") : "", 7457 priv->capability & CAP_PRIVACY_ON ? " key=" : "", 7458 priv->capability & CAP_PRIVACY_ON ? 7459 '1' + priv->ieee->sec.active_key : '.', 7460 priv->capability & CAP_PRIVACY_ON ? '.' : ' '); 7461 7462 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval); 7463 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 7464 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) { 7465 priv->assoc_request.assoc_type = HC_IBSS_START; 7466 priv->assoc_request.assoc_tsf_msw = 0; 7467 priv->assoc_request.assoc_tsf_lsw = 0; 7468 } else { 7469 if (unlikely(roaming)) 7470 priv->assoc_request.assoc_type = HC_REASSOCIATE; 7471 else 7472 priv->assoc_request.assoc_type = HC_ASSOCIATE; 7473 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]); 7474 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]); 7475 } 7476 7477 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); 7478 7479 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7480 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN); 7481 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); 7482 } else { 7483 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); 7484 priv->assoc_request.atim_window = 0; 7485 } 7486 7487 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval); 7488 7489 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 7490 if (err) { 7491 IPW_DEBUG_HC("Attempt to send SSID command failed.\n"); 7492 return err; 7493 } 7494 7495 rates->ieee_mode = priv->assoc_request.ieee_mode; 7496 rates->purpose = IPW_RATE_CONNECT; 7497 ipw_send_supported_rates(priv, rates); 7498 7499 if (priv->assoc_request.ieee_mode == IPW_G_MODE) 7500 priv->sys_config.dot11g_auto_detection = 1; 7501 else 7502 priv->sys_config.dot11g_auto_detection = 0; 7503 7504 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 7505 priv->sys_config.answer_broadcast_ssid_probe = 1; 7506 else 7507 priv->sys_config.answer_broadcast_ssid_probe = 0; 7508 7509 err = ipw_send_system_config(priv); 7510 if (err) { 7511 IPW_DEBUG_HC("Attempt to send sys config command failed.\n"); 7512 return err; 7513 } 7514 7515 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi); 7516 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM); 7517 if (err) { 7518 IPW_DEBUG_HC("Attempt to send associate command failed.\n"); 7519 return err; 7520 } 7521 7522 /* 7523 * If preemption is enabled, it is possible for the association 7524 * to complete before we return from ipw_send_associate. Therefore 7525 * we have to be sure and update our priviate data first. 7526 */ 7527 priv->channel = network->channel; 7528 memcpy(priv->bssid, network->bssid, ETH_ALEN); 7529 priv->status |= STATUS_ASSOCIATING; 7530 priv->status &= ~STATUS_SECURITY_UPDATED; 7531 7532 priv->assoc_network = network; 7533 7534#ifdef CONFIG_IPW2200_QOS 7535 ipw_qos_association(priv, network); 7536#endif 7537 7538 err = ipw_send_associate(priv, &priv->assoc_request); 7539 if (err) { 7540 IPW_DEBUG_HC("Attempt to send associate command failed.\n"); 7541 return err; 7542 } 7543 7544 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n", 7545 print_ssid(ssid, priv->essid, priv->essid_len), 7546 priv->bssid); 7547 7548 return 0; 7549} 7550 7551static void ipw_roam(void *data) 7552{ 7553 struct ipw_priv *priv = data; 7554 struct libipw_network *network = NULL; 7555 struct ipw_network_match match = { 7556 .network = priv->assoc_network 7557 }; 7558 7559 /* The roaming process is as follows: 7560 * 7561 * 1. Missed beacon threshold triggers the roaming process by 7562 * setting the status ROAM bit and requesting a scan. 7563 * 2. When the scan completes, it schedules the ROAM work 7564 * 3. The ROAM work looks at all of the known networks for one that 7565 * is a better network than the currently associated. If none 7566 * found, the ROAM process is over (ROAM bit cleared) 7567 * 4. If a better network is found, a disassociation request is 7568 * sent. 7569 * 5. When the disassociation completes, the roam work is again 7570 * scheduled. The second time through, the driver is no longer 7571 * associated, and the newly selected network is sent an 7572 * association request. 7573 * 6. At this point ,the roaming process is complete and the ROAM 7574 * status bit is cleared. 7575 */ 7576 7577 /* If we are no longer associated, and the roaming bit is no longer 7578 * set, then we are not actively roaming, so just return */ 7579 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING))) 7580 return; 7581 7582 if (priv->status & STATUS_ASSOCIATED) { 7583 /* First pass through ROAM process -- look for a better 7584 * network */ 7585 unsigned long flags; 7586 u8 rssi = priv->assoc_network->stats.rssi; 7587 priv->assoc_network->stats.rssi = -128; 7588 spin_lock_irqsave(&priv->ieee->lock, flags); 7589 list_for_each_entry(network, &priv->ieee->network_list, list) { 7590 if (network != priv->assoc_network) 7591 ipw_best_network(priv, &match, network, 1); 7592 } 7593 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7594 priv->assoc_network->stats.rssi = rssi; 7595 7596 if (match.network == priv->assoc_network) { 7597 IPW_DEBUG_ASSOC("No better APs in this network to " 7598 "roam to.\n"); 7599 priv->status &= ~STATUS_ROAMING; 7600 ipw_debug_config(priv); 7601 return; 7602 } 7603 7604 ipw_send_disassociate(priv, 1); 7605 priv->assoc_network = match.network; 7606 7607 return; 7608 } 7609 7610 /* Second pass through ROAM process -- request association */ 7611 ipw_compatible_rates(priv, priv->assoc_network, &match.rates); 7612 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1); 7613 priv->status &= ~STATUS_ROAMING; 7614} 7615 7616static void ipw_bg_roam(struct work_struct *work) 7617{ 7618 struct ipw_priv *priv = 7619 container_of(work, struct ipw_priv, roam); 7620 mutex_lock(&priv->mutex); 7621 ipw_roam(priv); 7622 mutex_unlock(&priv->mutex); 7623} 7624 7625static int ipw_associate(void *data) 7626{ 7627 struct ipw_priv *priv = data; 7628 7629 struct libipw_network *network = NULL; 7630 struct ipw_network_match match = { 7631 .network = NULL 7632 }; 7633 struct ipw_supported_rates *rates; 7634 struct list_head *element; 7635 unsigned long flags; 7636 DECLARE_SSID_BUF(ssid); 7637 7638 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 7639 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n"); 7640 return 0; 7641 } 7642 7643 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 7644 IPW_DEBUG_ASSOC("Not attempting association (already in " 7645 "progress)\n"); 7646 return 0; 7647 } 7648 7649 if (priv->status & STATUS_DISASSOCIATING) { 7650 IPW_DEBUG_ASSOC("Not attempting association (in " 7651 "disassociating)\n "); 7652 queue_work(priv->workqueue, &priv->associate); 7653 return 0; 7654 } 7655 7656 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) { 7657 IPW_DEBUG_ASSOC("Not attempting association (scanning or not " 7658 "initialized)\n"); 7659 return 0; 7660 } 7661 7662 if (!(priv->config & CFG_ASSOCIATE) && 7663 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) { 7664 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n"); 7665 return 0; 7666 } 7667 7668 /* Protect our use of the network_list */ 7669 spin_lock_irqsave(&priv->ieee->lock, flags); 7670 list_for_each_entry(network, &priv->ieee->network_list, list) 7671 ipw_best_network(priv, &match, network, 0); 7672 7673 network = match.network; 7674 rates = &match.rates; 7675 7676 if (network == NULL && 7677 priv->ieee->iw_mode == IW_MODE_ADHOC && 7678 priv->config & CFG_ADHOC_CREATE && 7679 priv->config & CFG_STATIC_ESSID && 7680 priv->config & CFG_STATIC_CHANNEL) { 7681 /* Use oldest network if the free list is empty */ 7682 if (list_empty(&priv->ieee->network_free_list)) { 7683 struct libipw_network *oldest = NULL; 7684 struct libipw_network *target; 7685 7686 list_for_each_entry(target, &priv->ieee->network_list, list) { 7687 if ((oldest == NULL) || 7688 (target->last_scanned < oldest->last_scanned)) 7689 oldest = target; 7690 } 7691 7692 /* If there are no more slots, expire the oldest */ 7693 list_del(&oldest->list); 7694 target = oldest; 7695 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from " 7696 "network list.\n", 7697 print_ssid(ssid, target->ssid, 7698 target->ssid_len), 7699 target->bssid); 7700 list_add_tail(&target->list, 7701 &priv->ieee->network_free_list); 7702 } 7703 7704 element = priv->ieee->network_free_list.next; 7705 network = list_entry(element, struct libipw_network, list); 7706 ipw_adhoc_create(priv, network); 7707 rates = &priv->rates; 7708 list_del(element); 7709 list_add_tail(&network->list, &priv->ieee->network_list); 7710 } 7711 spin_unlock_irqrestore(&priv->ieee->lock, flags); 7712 7713 /* If we reached the end of the list, then we don't have any valid 7714 * matching APs */ 7715 if (!network) { 7716 ipw_debug_config(priv); 7717 7718 if (!(priv->status & STATUS_SCANNING)) { 7719 if (!(priv->config & CFG_SPEED_SCAN)) 7720 queue_delayed_work(priv->workqueue, 7721 &priv->request_scan, 7722 SCAN_INTERVAL); 7723 else 7724 queue_delayed_work(priv->workqueue, 7725 &priv->request_scan, 0); 7726 } 7727 7728 return 0; 7729 } 7730 7731 ipw_associate_network(priv, network, rates, 0); 7732 7733 return 1; 7734} 7735 7736static void ipw_bg_associate(struct work_struct *work) 7737{ 7738 struct ipw_priv *priv = 7739 container_of(work, struct ipw_priv, associate); 7740 mutex_lock(&priv->mutex); 7741 ipw_associate(priv); 7742 mutex_unlock(&priv->mutex); 7743} 7744 7745static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, 7746 struct sk_buff *skb) 7747{ 7748 struct ieee80211_hdr *hdr; 7749 u16 fc; 7750 7751 hdr = (struct ieee80211_hdr *)skb->data; 7752 fc = le16_to_cpu(hdr->frame_control); 7753 if (!(fc & IEEE80211_FCTL_PROTECTED)) 7754 return; 7755 7756 fc &= ~IEEE80211_FCTL_PROTECTED; 7757 hdr->frame_control = cpu_to_le16(fc); 7758 switch (priv->ieee->sec.level) { 7759 case SEC_LEVEL_3: 7760 /* Remove CCMP HDR */ 7761 memmove(skb->data + LIBIPW_3ADDR_LEN, 7762 skb->data + LIBIPW_3ADDR_LEN + 8, 7763 skb->len - LIBIPW_3ADDR_LEN - 8); 7764 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */ 7765 break; 7766 case SEC_LEVEL_2: 7767 break; 7768 case SEC_LEVEL_1: 7769 /* Remove IV */ 7770 memmove(skb->data + LIBIPW_3ADDR_LEN, 7771 skb->data + LIBIPW_3ADDR_LEN + 4, 7772 skb->len - LIBIPW_3ADDR_LEN - 4); 7773 skb_trim(skb, skb->len - 8); /* IV + ICV */ 7774 break; 7775 case SEC_LEVEL_0: 7776 break; 7777 default: 7778 printk(KERN_ERR "Unknown security level %d\n", 7779 priv->ieee->sec.level); 7780 break; 7781 } 7782} 7783 7784static void ipw_handle_data_packet(struct ipw_priv *priv, 7785 struct ipw_rx_mem_buffer *rxb, 7786 struct libipw_rx_stats *stats) 7787{ 7788 struct net_device *dev = priv->net_dev; 7789 struct libipw_hdr_4addr *hdr; 7790 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7791 7792 /* We received data from the HW, so stop the watchdog */ 7793 dev->trans_start = jiffies; 7794 7795 /* We only process data packets if the 7796 * interface is open */ 7797 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > 7798 skb_tailroom(rxb->skb))) { 7799 dev->stats.rx_errors++; 7800 priv->wstats.discard.misc++; 7801 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7802 return; 7803 } else if (unlikely(!netif_running(priv->net_dev))) { 7804 dev->stats.rx_dropped++; 7805 priv->wstats.discard.misc++; 7806 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7807 return; 7808 } 7809 7810 /* Advance skb->data to the start of the actual payload */ 7811 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data)); 7812 7813 /* Set the size of the skb to the size of the frame */ 7814 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length)); 7815 7816 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); 7817 7818 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */ 7819 hdr = (struct libipw_hdr_4addr *)rxb->skb->data; 7820 if (priv->ieee->iw_mode != IW_MODE_MONITOR && 7821 (is_multicast_ether_addr(hdr->addr1) ? 7822 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt)) 7823 ipw_rebuild_decrypted_skb(priv, rxb->skb); 7824 7825 if (!libipw_rx(priv->ieee, rxb->skb, stats)) 7826 dev->stats.rx_errors++; 7827 else { /* libipw_rx succeeded, so it now owns the SKB */ 7828 rxb->skb = NULL; 7829 __ipw_led_activity_on(priv); 7830 } 7831} 7832 7833#ifdef CONFIG_IPW2200_RADIOTAP 7834static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, 7835 struct ipw_rx_mem_buffer *rxb, 7836 struct libipw_rx_stats *stats) 7837{ 7838 struct net_device *dev = priv->net_dev; 7839 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 7840 struct ipw_rx_frame *frame = &pkt->u.frame; 7841 7842 /* initial pull of some data */ 7843 u16 received_channel = frame->received_channel; 7844 u8 antennaAndPhy = frame->antennaAndPhy; 7845 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */ 7846 u16 pktrate = frame->rate; 7847 7848 /* Magic struct that slots into the radiotap header -- no reason 7849 * to build this manually element by element, we can write it much 7850 * more efficiently than we can parse it. ORDER MATTERS HERE */ 7851 struct ipw_rt_hdr *ipw_rt; 7852 7853 short len = le16_to_cpu(pkt->u.frame.length); 7854 7855 /* We received data from the HW, so stop the watchdog */ 7856 dev->trans_start = jiffies; 7857 7858 /* We only process data packets if the 7859 * interface is open */ 7860 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) > 7861 skb_tailroom(rxb->skb))) { 7862 dev->stats.rx_errors++; 7863 priv->wstats.discard.misc++; 7864 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 7865 return; 7866 } else if (unlikely(!netif_running(priv->net_dev))) { 7867 dev->stats.rx_dropped++; 7868 priv->wstats.discard.misc++; 7869 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 7870 return; 7871 } 7872 7873 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use 7874 * that now */ 7875 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { 7876 /* FIXME: Should alloc bigger skb instead */ 7877 dev->stats.rx_dropped++; 7878 priv->wstats.discard.misc++; 7879 IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); 7880 return; 7881 } 7882 7883 /* copy the frame itself */ 7884 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr), 7885 rxb->skb->data + IPW_RX_FRAME_SIZE, len); 7886 7887 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data; 7888 7889 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 7890 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 7891 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */ 7892 7893 /* Big bitfield of all the fields we provide in radiotap */ 7894 ipw_rt->rt_hdr.it_present = cpu_to_le32( 7895 (1 << IEEE80211_RADIOTAP_TSFT) | 7896 (1 << IEEE80211_RADIOTAP_FLAGS) | 7897 (1 << IEEE80211_RADIOTAP_RATE) | 7898 (1 << IEEE80211_RADIOTAP_CHANNEL) | 7899 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 7900 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 7901 (1 << IEEE80211_RADIOTAP_ANTENNA)); 7902 7903 /* Zero the flags, we'll add to them as we go */ 7904 ipw_rt->rt_flags = 0; 7905 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | 7906 frame->parent_tsf[2] << 16 | 7907 frame->parent_tsf[1] << 8 | 7908 frame->parent_tsf[0]); 7909 7910 /* Convert signal to DBM */ 7911 ipw_rt->rt_dbmsignal = antsignal; 7912 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise); 7913 7914 /* Convert the channel data and set the flags */ 7915 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel)); 7916 if (received_channel > 14) { /* 802.11a */ 7917 ipw_rt->rt_chbitmask = 7918 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); 7919 } else if (antennaAndPhy & 32) { /* 802.11b */ 7920 ipw_rt->rt_chbitmask = 7921 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); 7922 } else { /* 802.11g */ 7923 ipw_rt->rt_chbitmask = 7924 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); 7925 } 7926 7927 /* set the rate in multiples of 500k/s */ 7928 switch (pktrate) { 7929 case IPW_TX_RATE_1MB: 7930 ipw_rt->rt_rate = 2; 7931 break; 7932 case IPW_TX_RATE_2MB: 7933 ipw_rt->rt_rate = 4; 7934 break; 7935 case IPW_TX_RATE_5MB: 7936 ipw_rt->rt_rate = 10; 7937 break; 7938 case IPW_TX_RATE_6MB: 7939 ipw_rt->rt_rate = 12; 7940 break; 7941 case IPW_TX_RATE_9MB: 7942 ipw_rt->rt_rate = 18; 7943 break; 7944 case IPW_TX_RATE_11MB: 7945 ipw_rt->rt_rate = 22; 7946 break; 7947 case IPW_TX_RATE_12MB: 7948 ipw_rt->rt_rate = 24; 7949 break; 7950 case IPW_TX_RATE_18MB: 7951 ipw_rt->rt_rate = 36; 7952 break; 7953 case IPW_TX_RATE_24MB: 7954 ipw_rt->rt_rate = 48; 7955 break; 7956 case IPW_TX_RATE_36MB: 7957 ipw_rt->rt_rate = 72; 7958 break; 7959 case IPW_TX_RATE_48MB: 7960 ipw_rt->rt_rate = 96; 7961 break; 7962 case IPW_TX_RATE_54MB: 7963 ipw_rt->rt_rate = 108; 7964 break; 7965 default: 7966 ipw_rt->rt_rate = 0; 7967 break; 7968 } 7969 7970 /* antenna number */ 7971 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */ 7972 7973 /* set the preamble flag if we have it */ 7974 if ((antennaAndPhy & 64)) 7975 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 7976 7977 /* Set the size of the skb to the size of the frame */ 7978 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr)); 7979 7980 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); 7981 7982 if (!libipw_rx(priv->ieee, rxb->skb, stats)) 7983 dev->stats.rx_errors++; 7984 else { /* libipw_rx succeeded, so it now owns the SKB */ 7985 rxb->skb = NULL; 7986 /* no LED during capture */ 7987 } 7988} 7989#endif 7990 7991#ifdef CONFIG_IPW2200_PROMISCUOUS 7992#define libipw_is_probe_response(fc) \ 7993 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \ 7994 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP ) 7995 7996#define libipw_is_management(fc) \ 7997 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) 7998 7999#define libipw_is_control(fc) \ 8000 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) 8001 8002#define libipw_is_data(fc) \ 8003 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 8004 8005#define libipw_is_assoc_request(fc) \ 8006 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ) 8007 8008#define libipw_is_reassoc_request(fc) \ 8009 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) 8010 8011static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, 8012 struct ipw_rx_mem_buffer *rxb, 8013 struct libipw_rx_stats *stats) 8014{ 8015 struct net_device *dev = priv->prom_net_dev; 8016 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 8017 struct ipw_rx_frame *frame = &pkt->u.frame; 8018 struct ipw_rt_hdr *ipw_rt; 8019 8020 /* First cache any information we need before we overwrite 8021 * the information provided in the skb from the hardware */ 8022 struct ieee80211_hdr *hdr; 8023 u16 channel = frame->received_channel; 8024 u8 phy_flags = frame->antennaAndPhy; 8025 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM; 8026 s8 noise = (s8) le16_to_cpu(frame->noise); 8027 u8 rate = frame->rate; 8028 short len = le16_to_cpu(pkt->u.frame.length); 8029 struct sk_buff *skb; 8030 int hdr_only = 0; 8031 u16 filter = priv->prom_priv->filter; 8032 8033 /* If the filter is set to not include Rx frames then return */ 8034 if (filter & IPW_PROM_NO_RX) 8035 return; 8036 8037 /* We received data from the HW, so stop the watchdog */ 8038 dev->trans_start = jiffies; 8039 8040 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { 8041 dev->stats.rx_errors++; 8042 IPW_DEBUG_DROP("Corruption detected! Oh no!\n"); 8043 return; 8044 } 8045 8046 /* We only process data packets if the interface is open */ 8047 if (unlikely(!netif_running(dev))) { 8048 dev->stats.rx_dropped++; 8049 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); 8050 return; 8051 } 8052 8053 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use 8054 * that now */ 8055 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) { 8056 /* FIXME: Should alloc bigger skb instead */ 8057 dev->stats.rx_dropped++; 8058 IPW_DEBUG_DROP("Dropping too large packet in monitor\n"); 8059 return; 8060 } 8061 8062 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; 8063 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { 8064 if (filter & IPW_PROM_NO_MGMT) 8065 return; 8066 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 8067 hdr_only = 1; 8068 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { 8069 if (filter & IPW_PROM_NO_CTL) 8070 return; 8071 if (filter & IPW_PROM_CTL_HEADER_ONLY) 8072 hdr_only = 1; 8073 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { 8074 if (filter & IPW_PROM_NO_DATA) 8075 return; 8076 if (filter & IPW_PROM_DATA_HEADER_ONLY) 8077 hdr_only = 1; 8078 } 8079 8080 /* Copy the SKB since this is for the promiscuous side */ 8081 skb = skb_copy(rxb->skb, GFP_ATOMIC); 8082 if (skb == NULL) { 8083 IPW_ERROR("skb_clone failed for promiscuous copy.\n"); 8084 return; 8085 } 8086 8087 /* copy the frame data to write after where the radiotap header goes */ 8088 ipw_rt = (void *)skb->data; 8089 8090 if (hdr_only) 8091 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); 8092 8093 memcpy(ipw_rt->payload, hdr, len); 8094 8095 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 8096 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 8097 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */ 8098 8099 /* Set the size of the skb to the size of the frame */ 8100 skb_put(skb, sizeof(*ipw_rt) + len); 8101 8102 /* Big bitfield of all the fields we provide in radiotap */ 8103 ipw_rt->rt_hdr.it_present = cpu_to_le32( 8104 (1 << IEEE80211_RADIOTAP_TSFT) | 8105 (1 << IEEE80211_RADIOTAP_FLAGS) | 8106 (1 << IEEE80211_RADIOTAP_RATE) | 8107 (1 << IEEE80211_RADIOTAP_CHANNEL) | 8108 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 8109 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | 8110 (1 << IEEE80211_RADIOTAP_ANTENNA)); 8111 8112 /* Zero the flags, we'll add to them as we go */ 8113 ipw_rt->rt_flags = 0; 8114 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 | 8115 frame->parent_tsf[2] << 16 | 8116 frame->parent_tsf[1] << 8 | 8117 frame->parent_tsf[0]); 8118 8119 /* Convert to DBM */ 8120 ipw_rt->rt_dbmsignal = signal; 8121 ipw_rt->rt_dbmnoise = noise; 8122 8123 /* Convert the channel data and set the flags */ 8124 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel)); 8125 if (channel > 14) { /* 802.11a */ 8126 ipw_rt->rt_chbitmask = 8127 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); 8128 } else if (phy_flags & (1 << 5)) { /* 802.11b */ 8129 ipw_rt->rt_chbitmask = 8130 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); 8131 } else { /* 802.11g */ 8132 ipw_rt->rt_chbitmask = 8133 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ); 8134 } 8135 8136 /* set the rate in multiples of 500k/s */ 8137 switch (rate) { 8138 case IPW_TX_RATE_1MB: 8139 ipw_rt->rt_rate = 2; 8140 break; 8141 case IPW_TX_RATE_2MB: 8142 ipw_rt->rt_rate = 4; 8143 break; 8144 case IPW_TX_RATE_5MB: 8145 ipw_rt->rt_rate = 10; 8146 break; 8147 case IPW_TX_RATE_6MB: 8148 ipw_rt->rt_rate = 12; 8149 break; 8150 case IPW_TX_RATE_9MB: 8151 ipw_rt->rt_rate = 18; 8152 break; 8153 case IPW_TX_RATE_11MB: 8154 ipw_rt->rt_rate = 22; 8155 break; 8156 case IPW_TX_RATE_12MB: 8157 ipw_rt->rt_rate = 24; 8158 break; 8159 case IPW_TX_RATE_18MB: 8160 ipw_rt->rt_rate = 36; 8161 break; 8162 case IPW_TX_RATE_24MB: 8163 ipw_rt->rt_rate = 48; 8164 break; 8165 case IPW_TX_RATE_36MB: 8166 ipw_rt->rt_rate = 72; 8167 break; 8168 case IPW_TX_RATE_48MB: 8169 ipw_rt->rt_rate = 96; 8170 break; 8171 case IPW_TX_RATE_54MB: 8172 ipw_rt->rt_rate = 108; 8173 break; 8174 default: 8175 ipw_rt->rt_rate = 0; 8176 break; 8177 } 8178 8179 /* antenna number */ 8180 ipw_rt->rt_antenna = (phy_flags & 3); 8181 8182 /* set the preamble flag if we have it */ 8183 if (phy_flags & (1 << 6)) 8184 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 8185 8186 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len); 8187 8188 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) { 8189 dev->stats.rx_errors++; 8190 dev_kfree_skb_any(skb); 8191 } 8192} 8193#endif 8194 8195static int is_network_packet(struct ipw_priv *priv, 8196 struct libipw_hdr_4addr *header) 8197{ 8198 /* Filter incoming packets to determine if they are targetted toward 8199 * this network, discarding packets coming from ourselves */ 8200 switch (priv->ieee->iw_mode) { 8201 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */ 8202 /* packets from our adapter are dropped (echo) */ 8203 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN)) 8204 return 0; 8205 8206 /* {broad,multi}cast packets to our BSSID go through */ 8207 if (is_multicast_ether_addr(header->addr1)) 8208 return !memcmp(header->addr3, priv->bssid, ETH_ALEN); 8209 8210 /* packets to our adapter go through */ 8211 return !memcmp(header->addr1, priv->net_dev->dev_addr, 8212 ETH_ALEN); 8213 8214 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */ 8215 /* packets from our adapter are dropped (echo) */ 8216 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN)) 8217 return 0; 8218 8219 /* {broad,multi}cast packets to our BSS go through */ 8220 if (is_multicast_ether_addr(header->addr1)) 8221 return !memcmp(header->addr2, priv->bssid, ETH_ALEN); 8222 8223 /* packets to our adapter go through */ 8224 return !memcmp(header->addr1, priv->net_dev->dev_addr, 8225 ETH_ALEN); 8226 } 8227 8228 return 1; 8229} 8230 8231#define IPW_PACKET_RETRY_TIME HZ 8232 8233static int is_duplicate_packet(struct ipw_priv *priv, 8234 struct libipw_hdr_4addr *header) 8235{ 8236 u16 sc = le16_to_cpu(header->seq_ctl); 8237 u16 seq = WLAN_GET_SEQ_SEQ(sc); 8238 u16 frag = WLAN_GET_SEQ_FRAG(sc); 8239 u16 *last_seq, *last_frag; 8240 unsigned long *last_time; 8241 8242 switch (priv->ieee->iw_mode) { 8243 case IW_MODE_ADHOC: 8244 { 8245 struct list_head *p; 8246 struct ipw_ibss_seq *entry = NULL; 8247 u8 *mac = header->addr2; 8248 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE; 8249 8250 __list_for_each(p, &priv->ibss_mac_hash[index]) { 8251 entry = 8252 list_entry(p, struct ipw_ibss_seq, list); 8253 if (!memcmp(entry->mac, mac, ETH_ALEN)) 8254 break; 8255 } 8256 if (p == &priv->ibss_mac_hash[index]) { 8257 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 8258 if (!entry) { 8259 IPW_ERROR 8260 ("Cannot malloc new mac entry\n"); 8261 return 0; 8262 } 8263 memcpy(entry->mac, mac, ETH_ALEN); 8264 entry->seq_num = seq; 8265 entry->frag_num = frag; 8266 entry->packet_time = jiffies; 8267 list_add(&entry->list, 8268 &priv->ibss_mac_hash[index]); 8269 return 0; 8270 } 8271 last_seq = &entry->seq_num; 8272 last_frag = &entry->frag_num; 8273 last_time = &entry->packet_time; 8274 break; 8275 } 8276 case IW_MODE_INFRA: 8277 last_seq = &priv->last_seq_num; 8278 last_frag = &priv->last_frag_num; 8279 last_time = &priv->last_packet_time; 8280 break; 8281 default: 8282 return 0; 8283 } 8284 if ((*last_seq == seq) && 8285 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) { 8286 if (*last_frag == frag) 8287 goto drop; 8288 if (*last_frag + 1 != frag) 8289 /* out-of-order fragment */ 8290 goto drop; 8291 } else 8292 *last_seq = seq; 8293 8294 *last_frag = frag; 8295 *last_time = jiffies; 8296 return 0; 8297 8298 drop: 8299 /* Comment this line now since we observed the card receives 8300 * duplicate packets but the FCTL_RETRY bit is not set in the 8301 * IBSS mode with fragmentation enabled. 8302 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */ 8303 return 1; 8304} 8305 8306static void ipw_handle_mgmt_packet(struct ipw_priv *priv, 8307 struct ipw_rx_mem_buffer *rxb, 8308 struct libipw_rx_stats *stats) 8309{ 8310 struct sk_buff *skb = rxb->skb; 8311 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data; 8312 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *) 8313 (skb->data + IPW_RX_FRAME_SIZE); 8314 8315 libipw_rx_mgt(priv->ieee, header, stats); 8316 8317 if (priv->ieee->iw_mode == IW_MODE_ADHOC && 8318 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == 8319 IEEE80211_STYPE_PROBE_RESP) || 8320 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) == 8321 IEEE80211_STYPE_BEACON))) { 8322 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN)) 8323 ipw_add_station(priv, header->addr2); 8324 } 8325 8326 if (priv->config & CFG_NET_STATS) { 8327 IPW_DEBUG_HC("sending stat packet\n"); 8328 8329 /* Set the size of the skb to the size of the full 8330 * ipw header and 802.11 frame */ 8331 skb_put(skb, le16_to_cpu(pkt->u.frame.length) + 8332 IPW_RX_FRAME_SIZE); 8333 8334 /* Advance past the ipw packet header to the 802.11 frame */ 8335 skb_pull(skb, IPW_RX_FRAME_SIZE); 8336 8337 /* Push the libipw_rx_stats before the 802.11 frame */ 8338 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats)); 8339 8340 skb->dev = priv->ieee->dev; 8341 8342 /* Point raw at the libipw_stats */ 8343 skb_reset_mac_header(skb); 8344 8345 skb->pkt_type = PACKET_OTHERHOST; 8346 skb->protocol = cpu_to_be16(ETH_P_80211_STATS); 8347 memset(skb->cb, 0, sizeof(rxb->skb->cb)); 8348 netif_rx(skb); 8349 rxb->skb = NULL; 8350 } 8351} 8352 8353/* 8354 * Main entry function for recieving a packet with 80211 headers. This 8355 * should be called when ever the FW has notified us that there is a new 8356 * skb in the recieve queue. 8357 */ 8358static void ipw_rx(struct ipw_priv *priv) 8359{ 8360 struct ipw_rx_mem_buffer *rxb; 8361 struct ipw_rx_packet *pkt; 8362 struct libipw_hdr_4addr *header; 8363 u32 r, w, i; 8364 u8 network_packet; 8365 u8 fill_rx = 0; 8366 8367 r = ipw_read32(priv, IPW_RX_READ_INDEX); 8368 w = ipw_read32(priv, IPW_RX_WRITE_INDEX); 8369 i = priv->rxq->read; 8370 8371 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2)) 8372 fill_rx = 1; 8373 8374 while (i != r) { 8375 rxb = priv->rxq->queue[i]; 8376 if (unlikely(rxb == NULL)) { 8377 printk(KERN_CRIT "Queue not allocated!\n"); 8378 break; 8379 } 8380 priv->rxq->queue[i] = NULL; 8381 8382 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 8383 IPW_RX_BUF_SIZE, 8384 PCI_DMA_FROMDEVICE); 8385 8386 pkt = (struct ipw_rx_packet *)rxb->skb->data; 8387 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", 8388 pkt->header.message_type, 8389 pkt->header.rx_seq_num, pkt->header.control_bits); 8390 8391 switch (pkt->header.message_type) { 8392 case RX_FRAME_TYPE: /* 802.11 frame */ { 8393 struct libipw_rx_stats stats = { 8394 .rssi = pkt->u.frame.rssi_dbm - 8395 IPW_RSSI_TO_DBM, 8396 .signal = 8397 pkt->u.frame.rssi_dbm - 8398 IPW_RSSI_TO_DBM + 0x100, 8399 .noise = 8400 le16_to_cpu(pkt->u.frame.noise), 8401 .rate = pkt->u.frame.rate, 8402 .mac_time = jiffies, 8403 .received_channel = 8404 pkt->u.frame.received_channel, 8405 .freq = 8406 (pkt->u.frame. 8407 control & (1 << 0)) ? 8408 LIBIPW_24GHZ_BAND : 8409 LIBIPW_52GHZ_BAND, 8410 .len = le16_to_cpu(pkt->u.frame.length), 8411 }; 8412 8413 if (stats.rssi != 0) 8414 stats.mask |= LIBIPW_STATMASK_RSSI; 8415 if (stats.signal != 0) 8416 stats.mask |= LIBIPW_STATMASK_SIGNAL; 8417 if (stats.noise != 0) 8418 stats.mask |= LIBIPW_STATMASK_NOISE; 8419 if (stats.rate != 0) 8420 stats.mask |= LIBIPW_STATMASK_RATE; 8421 8422 priv->rx_packets++; 8423 8424#ifdef CONFIG_IPW2200_PROMISCUOUS 8425 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) 8426 ipw_handle_promiscuous_rx(priv, rxb, &stats); 8427#endif 8428 8429#ifdef CONFIG_IPW2200_MONITOR 8430 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 8431#ifdef CONFIG_IPW2200_RADIOTAP 8432 8433 ipw_handle_data_packet_monitor(priv, 8434 rxb, 8435 &stats); 8436#else 8437 ipw_handle_data_packet(priv, rxb, 8438 &stats); 8439#endif 8440 break; 8441 } 8442#endif 8443 8444 header = 8445 (struct libipw_hdr_4addr *)(rxb->skb-> 8446 data + 8447 IPW_RX_FRAME_SIZE); 8448 /* TODO: Check Ad-Hoc dest/source and make sure 8449 * that we are actually parsing these packets 8450 * correctly -- we should probably use the 8451 * frame control of the packet and disregard 8452 * the current iw_mode */ 8453 8454 network_packet = 8455 is_network_packet(priv, header); 8456 if (network_packet && priv->assoc_network) { 8457 priv->assoc_network->stats.rssi = 8458 stats.rssi; 8459 priv->exp_avg_rssi = 8460 exponential_average(priv->exp_avg_rssi, 8461 stats.rssi, DEPTH_RSSI); 8462 } 8463 8464 IPW_DEBUG_RX("Frame: len=%u\n", 8465 le16_to_cpu(pkt->u.frame.length)); 8466 8467 if (le16_to_cpu(pkt->u.frame.length) < 8468 libipw_get_hdrlen(le16_to_cpu( 8469 header->frame_ctl))) { 8470 IPW_DEBUG_DROP 8471 ("Received packet is too small. " 8472 "Dropping.\n"); 8473 priv->net_dev->stats.rx_errors++; 8474 priv->wstats.discard.misc++; 8475 break; 8476 } 8477 8478 switch (WLAN_FC_GET_TYPE 8479 (le16_to_cpu(header->frame_ctl))) { 8480 8481 case IEEE80211_FTYPE_MGMT: 8482 ipw_handle_mgmt_packet(priv, rxb, 8483 &stats); 8484 break; 8485 8486 case IEEE80211_FTYPE_CTL: 8487 break; 8488 8489 case IEEE80211_FTYPE_DATA: 8490 if (unlikely(!network_packet || 8491 is_duplicate_packet(priv, 8492 header))) 8493 { 8494 IPW_DEBUG_DROP("Dropping: " 8495 "%pM, " 8496 "%pM, " 8497 "%pM\n", 8498 header->addr1, 8499 header->addr2, 8500 header->addr3); 8501 break; 8502 } 8503 8504 ipw_handle_data_packet(priv, rxb, 8505 &stats); 8506 8507 break; 8508 } 8509 break; 8510 } 8511 8512 case RX_HOST_NOTIFICATION_TYPE:{ 8513 IPW_DEBUG_RX 8514 ("Notification: subtype=%02X flags=%02X size=%d\n", 8515 pkt->u.notification.subtype, 8516 pkt->u.notification.flags, 8517 le16_to_cpu(pkt->u.notification.size)); 8518 ipw_rx_notification(priv, &pkt->u.notification); 8519 break; 8520 } 8521 8522 default: 8523 IPW_DEBUG_RX("Bad Rx packet of type %d\n", 8524 pkt->header.message_type); 8525 break; 8526 } 8527 8528 /* For now we just don't re-use anything. We can tweak this 8529 * later to try and re-use notification packets and SKBs that 8530 * fail to Rx correctly */ 8531 if (rxb->skb != NULL) { 8532 dev_kfree_skb_any(rxb->skb); 8533 rxb->skb = NULL; 8534 } 8535 8536 pci_unmap_single(priv->pci_dev, rxb->dma_addr, 8537 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 8538 list_add_tail(&rxb->list, &priv->rxq->rx_used); 8539 8540 i = (i + 1) % RX_QUEUE_SIZE; 8541 8542 /* If there are a lot of unsued frames, restock the Rx queue 8543 * so the ucode won't assert */ 8544 if (fill_rx) { 8545 priv->rxq->read = i; 8546 ipw_rx_queue_replenish(priv); 8547 } 8548 } 8549 8550 /* Backtrack one entry */ 8551 priv->rxq->read = i; 8552 ipw_rx_queue_restock(priv); 8553} 8554 8555#define DEFAULT_RTS_THRESHOLD 2304U 8556#define MIN_RTS_THRESHOLD 1U 8557#define MAX_RTS_THRESHOLD 2304U 8558#define DEFAULT_BEACON_INTERVAL 100U 8559#define DEFAULT_SHORT_RETRY_LIMIT 7U 8560#define DEFAULT_LONG_RETRY_LIMIT 4U 8561 8562/** 8563 * ipw_sw_reset 8564 * @option: options to control different reset behaviour 8565 * 0 = reset everything except the 'disable' module_param 8566 * 1 = reset everything and print out driver info (for probe only) 8567 * 2 = reset everything 8568 */ 8569static int ipw_sw_reset(struct ipw_priv *priv, int option) 8570{ 8571 int band, modulation; 8572 int old_mode = priv->ieee->iw_mode; 8573 8574 /* Initialize module parameter values here */ 8575 priv->config = 0; 8576 8577 /* We default to disabling the LED code as right now it causes 8578 * too many systems to lock up... */ 8579 if (!led_support) 8580 priv->config |= CFG_NO_LED; 8581 8582 if (associate) 8583 priv->config |= CFG_ASSOCIATE; 8584 else 8585 IPW_DEBUG_INFO("Auto associate disabled.\n"); 8586 8587 if (auto_create) 8588 priv->config |= CFG_ADHOC_CREATE; 8589 else 8590 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n"); 8591 8592 priv->config &= ~CFG_STATIC_ESSID; 8593 priv->essid_len = 0; 8594 memset(priv->essid, 0, IW_ESSID_MAX_SIZE); 8595 8596 if (disable && option) { 8597 priv->status |= STATUS_RF_KILL_SW; 8598 IPW_DEBUG_INFO("Radio disabled.\n"); 8599 } 8600 8601 if (default_channel != 0) { 8602 priv->config |= CFG_STATIC_CHANNEL; 8603 priv->channel = default_channel; 8604 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel); 8605 /* TODO: Validate that provided channel is in range */ 8606 } 8607#ifdef CONFIG_IPW2200_QOS 8608 ipw_qos_init(priv, qos_enable, qos_burst_enable, 8609 burst_duration_CCK, burst_duration_OFDM); 8610#endif /* CONFIG_IPW2200_QOS */ 8611 8612 switch (network_mode) { 8613 case 1: 8614 priv->ieee->iw_mode = IW_MODE_ADHOC; 8615 priv->net_dev->type = ARPHRD_ETHER; 8616 8617 break; 8618#ifdef CONFIG_IPW2200_MONITOR 8619 case 2: 8620 priv->ieee->iw_mode = IW_MODE_MONITOR; 8621#ifdef CONFIG_IPW2200_RADIOTAP 8622 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 8623#else 8624 priv->net_dev->type = ARPHRD_IEEE80211; 8625#endif 8626 break; 8627#endif 8628 default: 8629 case 0: 8630 priv->net_dev->type = ARPHRD_ETHER; 8631 priv->ieee->iw_mode = IW_MODE_INFRA; 8632 break; 8633 } 8634 8635 if (hwcrypto) { 8636 priv->ieee->host_encrypt = 0; 8637 priv->ieee->host_encrypt_msdu = 0; 8638 priv->ieee->host_decrypt = 0; 8639 priv->ieee->host_mc_decrypt = 0; 8640 } 8641 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off"); 8642 8643 /* IPW2200/2915 is abled to do hardware fragmentation. */ 8644 priv->ieee->host_open_frag = 0; 8645 8646 if ((priv->pci_dev->device == 0x4223) || 8647 (priv->pci_dev->device == 0x4224)) { 8648 if (option == 1) 8649 printk(KERN_INFO DRV_NAME 8650 ": Detected Intel PRO/Wireless 2915ABG Network " 8651 "Connection\n"); 8652 priv->ieee->abg_true = 1; 8653 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND; 8654 modulation = LIBIPW_OFDM_MODULATION | 8655 LIBIPW_CCK_MODULATION; 8656 priv->adapter = IPW_2915ABG; 8657 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B; 8658 } else { 8659 if (option == 1) 8660 printk(KERN_INFO DRV_NAME 8661 ": Detected Intel PRO/Wireless 2200BG Network " 8662 "Connection\n"); 8663 8664 priv->ieee->abg_true = 0; 8665 band = LIBIPW_24GHZ_BAND; 8666 modulation = LIBIPW_OFDM_MODULATION | 8667 LIBIPW_CCK_MODULATION; 8668 priv->adapter = IPW_2200BG; 8669 priv->ieee->mode = IEEE_G | IEEE_B; 8670 } 8671 8672 priv->ieee->freq_band = band; 8673 priv->ieee->modulation = modulation; 8674 8675 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK; 8676 8677 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; 8678 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; 8679 8680 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 8681 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT; 8682 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT; 8683 8684 /* If power management is turned on, default to AC mode */ 8685 priv->power_mode = IPW_POWER_AC; 8686 priv->tx_power = IPW_TX_POWER_DEFAULT; 8687 8688 return old_mode == priv->ieee->iw_mode; 8689} 8690 8691/* 8692 * This file defines the Wireless Extension handlers. It does not 8693 * define any methods of hardware manipulation and relies on the 8694 * functions defined in ipw_main to provide the HW interaction. 8695 * 8696 * The exception to this is the use of the ipw_get_ordinal() 8697 * function used to poll the hardware vs. making unecessary calls. 8698 * 8699 */ 8700 8701static int ipw_set_channel(struct ipw_priv *priv, u8 channel) 8702{ 8703 if (channel == 0) { 8704 IPW_DEBUG_INFO("Setting channel to ANY (0)\n"); 8705 priv->config &= ~CFG_STATIC_CHANNEL; 8706 IPW_DEBUG_ASSOC("Attempting to associate with new " 8707 "parameters.\n"); 8708 ipw_associate(priv); 8709 return 0; 8710 } 8711 8712 priv->config |= CFG_STATIC_CHANNEL; 8713 8714 if (priv->channel == channel) { 8715 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n", 8716 channel); 8717 return 0; 8718 } 8719 8720 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel); 8721 priv->channel = channel; 8722 8723#ifdef CONFIG_IPW2200_MONITOR 8724 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 8725 int i; 8726 if (priv->status & STATUS_SCANNING) { 8727 IPW_DEBUG_SCAN("Scan abort triggered due to " 8728 "channel change.\n"); 8729 ipw_abort_scan(priv); 8730 } 8731 8732 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--) 8733 udelay(10); 8734 8735 if (priv->status & STATUS_SCANNING) 8736 IPW_DEBUG_SCAN("Still scanning...\n"); 8737 else 8738 IPW_DEBUG_SCAN("Took %dms to abort current scan\n", 8739 1000 - i); 8740 8741 return 0; 8742 } 8743#endif /* CONFIG_IPW2200_MONITOR */ 8744 8745 /* Network configuration changed -- force [re]association */ 8746 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n"); 8747 if (!ipw_disassociate(priv)) 8748 ipw_associate(priv); 8749 8750 return 0; 8751} 8752 8753static int ipw_wx_set_freq(struct net_device *dev, 8754 struct iw_request_info *info, 8755 union iwreq_data *wrqu, char *extra) 8756{ 8757 struct ipw_priv *priv = libipw_priv(dev); 8758 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 8759 struct iw_freq *fwrq = &wrqu->freq; 8760 int ret = 0, i; 8761 u8 channel, flags; 8762 int band; 8763 8764 if (fwrq->m == 0) { 8765 IPW_DEBUG_WX("SET Freq/Channel -> any\n"); 8766 mutex_lock(&priv->mutex); 8767 ret = ipw_set_channel(priv, 0); 8768 mutex_unlock(&priv->mutex); 8769 return ret; 8770 } 8771 /* if setting by freq convert to channel */ 8772 if (fwrq->e == 1) { 8773 channel = libipw_freq_to_channel(priv->ieee, fwrq->m); 8774 if (channel == 0) 8775 return -EINVAL; 8776 } else 8777 channel = fwrq->m; 8778 8779 if (!(band = libipw_is_valid_channel(priv->ieee, channel))) 8780 return -EINVAL; 8781 8782 if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 8783 i = libipw_channel_to_index(priv->ieee, channel); 8784 if (i == -1) 8785 return -EINVAL; 8786 8787 flags = (band == LIBIPW_24GHZ_BAND) ? 8788 geo->bg[i].flags : geo->a[i].flags; 8789 if (flags & LIBIPW_CH_PASSIVE_ONLY) { 8790 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n"); 8791 return -EINVAL; 8792 } 8793 } 8794 8795 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 8796 mutex_lock(&priv->mutex); 8797 ret = ipw_set_channel(priv, channel); 8798 mutex_unlock(&priv->mutex); 8799 return ret; 8800} 8801 8802static int ipw_wx_get_freq(struct net_device *dev, 8803 struct iw_request_info *info, 8804 union iwreq_data *wrqu, char *extra) 8805{ 8806 struct ipw_priv *priv = libipw_priv(dev); 8807 8808 wrqu->freq.e = 0; 8809 8810 /* If we are associated, trying to associate, or have a statically 8811 * configured CHANNEL then return that; otherwise return ANY */ 8812 mutex_lock(&priv->mutex); 8813 if (priv->config & CFG_STATIC_CHANNEL || 8814 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) { 8815 int i; 8816 8817 i = libipw_channel_to_index(priv->ieee, priv->channel); 8818 BUG_ON(i == -1); 8819 wrqu->freq.e = 1; 8820 8821 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) { 8822 case LIBIPW_52GHZ_BAND: 8823 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000; 8824 break; 8825 8826 case LIBIPW_24GHZ_BAND: 8827 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000; 8828 break; 8829 8830 default: 8831 BUG(); 8832 } 8833 } else 8834 wrqu->freq.m = 0; 8835 8836 mutex_unlock(&priv->mutex); 8837 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); 8838 return 0; 8839} 8840 8841static int ipw_wx_set_mode(struct net_device *dev, 8842 struct iw_request_info *info, 8843 union iwreq_data *wrqu, char *extra) 8844{ 8845 struct ipw_priv *priv = libipw_priv(dev); 8846 int err = 0; 8847 8848 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode); 8849 8850 switch (wrqu->mode) { 8851#ifdef CONFIG_IPW2200_MONITOR 8852 case IW_MODE_MONITOR: 8853#endif 8854 case IW_MODE_ADHOC: 8855 case IW_MODE_INFRA: 8856 break; 8857 case IW_MODE_AUTO: 8858 wrqu->mode = IW_MODE_INFRA; 8859 break; 8860 default: 8861 return -EINVAL; 8862 } 8863 if (wrqu->mode == priv->ieee->iw_mode) 8864 return 0; 8865 8866 mutex_lock(&priv->mutex); 8867 8868 ipw_sw_reset(priv, 0); 8869 8870#ifdef CONFIG_IPW2200_MONITOR 8871 if (priv->ieee->iw_mode == IW_MODE_MONITOR) 8872 priv->net_dev->type = ARPHRD_ETHER; 8873 8874 if (wrqu->mode == IW_MODE_MONITOR) 8875#ifdef CONFIG_IPW2200_RADIOTAP 8876 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 8877#else 8878 priv->net_dev->type = ARPHRD_IEEE80211; 8879#endif 8880#endif /* CONFIG_IPW2200_MONITOR */ 8881 8882 /* Free the existing firmware and reset the fw_loaded 8883 * flag so ipw_load() will bring in the new firmware */ 8884 free_firmware(); 8885 8886 priv->ieee->iw_mode = wrqu->mode; 8887 8888 queue_work(priv->workqueue, &priv->adapter_restart); 8889 mutex_unlock(&priv->mutex); 8890 return err; 8891} 8892 8893static int ipw_wx_get_mode(struct net_device *dev, 8894 struct iw_request_info *info, 8895 union iwreq_data *wrqu, char *extra) 8896{ 8897 struct ipw_priv *priv = libipw_priv(dev); 8898 mutex_lock(&priv->mutex); 8899 wrqu->mode = priv->ieee->iw_mode; 8900 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode); 8901 mutex_unlock(&priv->mutex); 8902 return 0; 8903} 8904 8905/* Values are in microsecond */ 8906static const s32 timeout_duration[] = { 8907 350000, 8908 250000, 8909 75000, 8910 37000, 8911 25000, 8912}; 8913 8914static const s32 period_duration[] = { 8915 400000, 8916 700000, 8917 1000000, 8918 1000000, 8919 1000000 8920}; 8921 8922static int ipw_wx_get_range(struct net_device *dev, 8923 struct iw_request_info *info, 8924 union iwreq_data *wrqu, char *extra) 8925{ 8926 struct ipw_priv *priv = libipw_priv(dev); 8927 struct iw_range *range = (struct iw_range *)extra; 8928 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 8929 int i = 0, j; 8930 8931 wrqu->data.length = sizeof(*range); 8932 memset(range, 0, sizeof(*range)); 8933 8934 /* 54Mbs == ~27 Mb/s real (802.11g) */ 8935 range->throughput = 27 * 1000 * 1000; 8936 8937 range->max_qual.qual = 100; 8938 /* TODO: Find real max RSSI and stick here */ 8939 range->max_qual.level = 0; 8940 range->max_qual.noise = 0; 8941 range->max_qual.updated = 7; /* Updated all three */ 8942 8943 range->avg_qual.qual = 70; 8944 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ 8945 range->avg_qual.level = 0; /* FIXME to real average level */ 8946 range->avg_qual.noise = 0; 8947 range->avg_qual.updated = 7; /* Updated all three */ 8948 mutex_lock(&priv->mutex); 8949 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES); 8950 8951 for (i = 0; i < range->num_bitrates; i++) 8952 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) * 8953 500000; 8954 8955 range->max_rts = DEFAULT_RTS_THRESHOLD; 8956 range->min_frag = MIN_FRAG_THRESHOLD; 8957 range->max_frag = MAX_FRAG_THRESHOLD; 8958 8959 range->encoding_size[0] = 5; 8960 range->encoding_size[1] = 13; 8961 range->num_encoding_sizes = 2; 8962 range->max_encoding_tokens = WEP_KEYS; 8963 8964 /* Set the Wireless Extension versions */ 8965 range->we_version_compiled = WIRELESS_EXT; 8966 range->we_version_source = 18; 8967 8968 i = 0; 8969 if (priv->ieee->mode & (IEEE_B | IEEE_G)) { 8970 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) { 8971 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 8972 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY)) 8973 continue; 8974 8975 range->freq[i].i = geo->bg[j].channel; 8976 range->freq[i].m = geo->bg[j].freq * 100000; 8977 range->freq[i].e = 1; 8978 i++; 8979 } 8980 } 8981 8982 if (priv->ieee->mode & IEEE_A) { 8983 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) { 8984 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 8985 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY)) 8986 continue; 8987 8988 range->freq[i].i = geo->a[j].channel; 8989 range->freq[i].m = geo->a[j].freq * 100000; 8990 range->freq[i].e = 1; 8991 i++; 8992 } 8993 } 8994 8995 range->num_channels = i; 8996 range->num_frequency = i; 8997 8998 mutex_unlock(&priv->mutex); 8999 9000 /* Event capability (kernel + driver) */ 9001 range->event_capa[0] = (IW_EVENT_CAPA_K_0 | 9002 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | 9003 IW_EVENT_CAPA_MASK(SIOCGIWAP) | 9004 IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); 9005 range->event_capa[1] = IW_EVENT_CAPA_K_1; 9006 9007 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | 9008 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 9009 9010 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE; 9011 9012 IPW_DEBUG_WX("GET Range\n"); 9013 return 0; 9014} 9015 9016static int ipw_wx_set_wap(struct net_device *dev, 9017 struct iw_request_info *info, 9018 union iwreq_data *wrqu, char *extra) 9019{ 9020 struct ipw_priv *priv = libipw_priv(dev); 9021 9022 static const unsigned char any[] = { 9023 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 9024 }; 9025 static const unsigned char off[] = { 9026 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 9027 }; 9028 9029 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 9030 return -EINVAL; 9031 mutex_lock(&priv->mutex); 9032 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) || 9033 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) { 9034 /* we disable mandatory BSSID association */ 9035 IPW_DEBUG_WX("Setting AP BSSID to ANY\n"); 9036 priv->config &= ~CFG_STATIC_BSSID; 9037 IPW_DEBUG_ASSOC("Attempting to associate with new " 9038 "parameters.\n"); 9039 ipw_associate(priv); 9040 mutex_unlock(&priv->mutex); 9041 return 0; 9042 } 9043 9044 priv->config |= CFG_STATIC_BSSID; 9045 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) { 9046 IPW_DEBUG_WX("BSSID set to current BSSID.\n"); 9047 mutex_unlock(&priv->mutex); 9048 return 0; 9049 } 9050 9051 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n", 9052 wrqu->ap_addr.sa_data); 9053 9054 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); 9055 9056 /* Network configuration changed -- force [re]association */ 9057 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n"); 9058 if (!ipw_disassociate(priv)) 9059 ipw_associate(priv); 9060 9061 mutex_unlock(&priv->mutex); 9062 return 0; 9063} 9064 9065static int ipw_wx_get_wap(struct net_device *dev, 9066 struct iw_request_info *info, 9067 union iwreq_data *wrqu, char *extra) 9068{ 9069 struct ipw_priv *priv = libipw_priv(dev); 9070 9071 /* If we are associated, trying to associate, or have a statically 9072 * configured BSSID then return that; otherwise return ANY */ 9073 mutex_lock(&priv->mutex); 9074 if (priv->config & CFG_STATIC_BSSID || 9075 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 9076 wrqu->ap_addr.sa_family = ARPHRD_ETHER; 9077 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); 9078 } else 9079 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); 9080 9081 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", 9082 wrqu->ap_addr.sa_data); 9083 mutex_unlock(&priv->mutex); 9084 return 0; 9085} 9086 9087static int ipw_wx_set_essid(struct net_device *dev, 9088 struct iw_request_info *info, 9089 union iwreq_data *wrqu, char *extra) 9090{ 9091 struct ipw_priv *priv = libipw_priv(dev); 9092 int length; 9093 DECLARE_SSID_BUF(ssid); 9094 9095 mutex_lock(&priv->mutex); 9096 9097 if (!wrqu->essid.flags) 9098 { 9099 IPW_DEBUG_WX("Setting ESSID to ANY\n"); 9100 ipw_disassociate(priv); 9101 priv->config &= ~CFG_STATIC_ESSID; 9102 ipw_associate(priv); 9103 mutex_unlock(&priv->mutex); 9104 return 0; 9105 } 9106 9107 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE); 9108 9109 priv->config |= CFG_STATIC_ESSID; 9110 9111 if (priv->essid_len == length && !memcmp(priv->essid, extra, length) 9112 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) { 9113 IPW_DEBUG_WX("ESSID set to current ESSID.\n"); 9114 mutex_unlock(&priv->mutex); 9115 return 0; 9116 } 9117 9118 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", 9119 print_ssid(ssid, extra, length), length); 9120 9121 priv->essid_len = length; 9122 memcpy(priv->essid, extra, priv->essid_len); 9123 9124 /* Network configuration changed -- force [re]association */ 9125 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n"); 9126 if (!ipw_disassociate(priv)) 9127 ipw_associate(priv); 9128 9129 mutex_unlock(&priv->mutex); 9130 return 0; 9131} 9132 9133static int ipw_wx_get_essid(struct net_device *dev, 9134 struct iw_request_info *info, 9135 union iwreq_data *wrqu, char *extra) 9136{ 9137 struct ipw_priv *priv = libipw_priv(dev); 9138 DECLARE_SSID_BUF(ssid); 9139 9140 /* If we are associated, trying to associate, or have a statically 9141 * configured ESSID then return that; otherwise return ANY */ 9142 mutex_lock(&priv->mutex); 9143 if (priv->config & CFG_STATIC_ESSID || 9144 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 9145 IPW_DEBUG_WX("Getting essid: '%s'\n", 9146 print_ssid(ssid, priv->essid, priv->essid_len)); 9147 memcpy(extra, priv->essid, priv->essid_len); 9148 wrqu->essid.length = priv->essid_len; 9149 wrqu->essid.flags = 1; /* active */ 9150 } else { 9151 IPW_DEBUG_WX("Getting essid: ANY\n"); 9152 wrqu->essid.length = 0; 9153 wrqu->essid.flags = 0; /* active */ 9154 } 9155 mutex_unlock(&priv->mutex); 9156 return 0; 9157} 9158 9159static int ipw_wx_set_nick(struct net_device *dev, 9160 struct iw_request_info *info, 9161 union iwreq_data *wrqu, char *extra) 9162{ 9163 struct ipw_priv *priv = libipw_priv(dev); 9164 9165 IPW_DEBUG_WX("Setting nick to '%s'\n", extra); 9166 if (wrqu->data.length > IW_ESSID_MAX_SIZE) 9167 return -E2BIG; 9168 mutex_lock(&priv->mutex); 9169 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); 9170 memset(priv->nick, 0, sizeof(priv->nick)); 9171 memcpy(priv->nick, extra, wrqu->data.length); 9172 IPW_DEBUG_TRACE("<<\n"); 9173 mutex_unlock(&priv->mutex); 9174 return 0; 9175 9176} 9177 9178static int ipw_wx_get_nick(struct net_device *dev, 9179 struct iw_request_info *info, 9180 union iwreq_data *wrqu, char *extra) 9181{ 9182 struct ipw_priv *priv = libipw_priv(dev); 9183 IPW_DEBUG_WX("Getting nick\n"); 9184 mutex_lock(&priv->mutex); 9185 wrqu->data.length = strlen(priv->nick); 9186 memcpy(extra, priv->nick, wrqu->data.length); 9187 wrqu->data.flags = 1; /* active */ 9188 mutex_unlock(&priv->mutex); 9189 return 0; 9190} 9191 9192static int ipw_wx_set_sens(struct net_device *dev, 9193 struct iw_request_info *info, 9194 union iwreq_data *wrqu, char *extra) 9195{ 9196 struct ipw_priv *priv = libipw_priv(dev); 9197 int err = 0; 9198 9199 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value); 9200 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value); 9201 mutex_lock(&priv->mutex); 9202 9203 if (wrqu->sens.fixed == 0) 9204 { 9205 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT; 9206 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT; 9207 goto out; 9208 } 9209 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) || 9210 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) { 9211 err = -EINVAL; 9212 goto out; 9213 } 9214 9215 priv->roaming_threshold = wrqu->sens.value; 9216 priv->disassociate_threshold = 3*wrqu->sens.value; 9217 out: 9218 mutex_unlock(&priv->mutex); 9219 return err; 9220} 9221 9222static int ipw_wx_get_sens(struct net_device *dev, 9223 struct iw_request_info *info, 9224 union iwreq_data *wrqu, char *extra) 9225{ 9226 struct ipw_priv *priv = libipw_priv(dev); 9227 mutex_lock(&priv->mutex); 9228 wrqu->sens.fixed = 1; 9229 wrqu->sens.value = priv->roaming_threshold; 9230 mutex_unlock(&priv->mutex); 9231 9232 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n", 9233 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9234 9235 return 0; 9236} 9237 9238static int ipw_wx_set_rate(struct net_device *dev, 9239 struct iw_request_info *info, 9240 union iwreq_data *wrqu, char *extra) 9241{ 9242 /* TODO: We should use semaphores or locks for access to priv */ 9243 struct ipw_priv *priv = libipw_priv(dev); 9244 u32 target_rate = wrqu->bitrate.value; 9245 u32 fixed, mask; 9246 9247 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */ 9248 /* value = X, fixed = 1 means only rate X */ 9249 /* value = X, fixed = 0 means all rates lower equal X */ 9250 9251 if (target_rate == -1) { 9252 fixed = 0; 9253 mask = LIBIPW_DEFAULT_RATES_MASK; 9254 /* Now we should reassociate */ 9255 goto apply; 9256 } 9257 9258 mask = 0; 9259 fixed = wrqu->bitrate.fixed; 9260 9261 if (target_rate == 1000000 || !fixed) 9262 mask |= LIBIPW_CCK_RATE_1MB_MASK; 9263 if (target_rate == 1000000) 9264 goto apply; 9265 9266 if (target_rate == 2000000 || !fixed) 9267 mask |= LIBIPW_CCK_RATE_2MB_MASK; 9268 if (target_rate == 2000000) 9269 goto apply; 9270 9271 if (target_rate == 5500000 || !fixed) 9272 mask |= LIBIPW_CCK_RATE_5MB_MASK; 9273 if (target_rate == 5500000) 9274 goto apply; 9275 9276 if (target_rate == 6000000 || !fixed) 9277 mask |= LIBIPW_OFDM_RATE_6MB_MASK; 9278 if (target_rate == 6000000) 9279 goto apply; 9280 9281 if (target_rate == 9000000 || !fixed) 9282 mask |= LIBIPW_OFDM_RATE_9MB_MASK; 9283 if (target_rate == 9000000) 9284 goto apply; 9285 9286 if (target_rate == 11000000 || !fixed) 9287 mask |= LIBIPW_CCK_RATE_11MB_MASK; 9288 if (target_rate == 11000000) 9289 goto apply; 9290 9291 if (target_rate == 12000000 || !fixed) 9292 mask |= LIBIPW_OFDM_RATE_12MB_MASK; 9293 if (target_rate == 12000000) 9294 goto apply; 9295 9296 if (target_rate == 18000000 || !fixed) 9297 mask |= LIBIPW_OFDM_RATE_18MB_MASK; 9298 if (target_rate == 18000000) 9299 goto apply; 9300 9301 if (target_rate == 24000000 || !fixed) 9302 mask |= LIBIPW_OFDM_RATE_24MB_MASK; 9303 if (target_rate == 24000000) 9304 goto apply; 9305 9306 if (target_rate == 36000000 || !fixed) 9307 mask |= LIBIPW_OFDM_RATE_36MB_MASK; 9308 if (target_rate == 36000000) 9309 goto apply; 9310 9311 if (target_rate == 48000000 || !fixed) 9312 mask |= LIBIPW_OFDM_RATE_48MB_MASK; 9313 if (target_rate == 48000000) 9314 goto apply; 9315 9316 if (target_rate == 54000000 || !fixed) 9317 mask |= LIBIPW_OFDM_RATE_54MB_MASK; 9318 if (target_rate == 54000000) 9319 goto apply; 9320 9321 IPW_DEBUG_WX("invalid rate specified, returning error\n"); 9322 return -EINVAL; 9323 9324 apply: 9325 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n", 9326 mask, fixed ? "fixed" : "sub-rates"); 9327 mutex_lock(&priv->mutex); 9328 if (mask == LIBIPW_DEFAULT_RATES_MASK) { 9329 priv->config &= ~CFG_FIXED_RATE; 9330 ipw_set_fixed_rate(priv, priv->ieee->mode); 9331 } else 9332 priv->config |= CFG_FIXED_RATE; 9333 9334 if (priv->rates_mask == mask) { 9335 IPW_DEBUG_WX("Mask set to current mask.\n"); 9336 mutex_unlock(&priv->mutex); 9337 return 0; 9338 } 9339 9340 priv->rates_mask = mask; 9341 9342 /* Network configuration changed -- force [re]association */ 9343 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n"); 9344 if (!ipw_disassociate(priv)) 9345 ipw_associate(priv); 9346 9347 mutex_unlock(&priv->mutex); 9348 return 0; 9349} 9350 9351static int ipw_wx_get_rate(struct net_device *dev, 9352 struct iw_request_info *info, 9353 union iwreq_data *wrqu, char *extra) 9354{ 9355 struct ipw_priv *priv = libipw_priv(dev); 9356 mutex_lock(&priv->mutex); 9357 wrqu->bitrate.value = priv->last_rate; 9358 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0; 9359 mutex_unlock(&priv->mutex); 9360 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 9361 return 0; 9362} 9363 9364static int ipw_wx_set_rts(struct net_device *dev, 9365 struct iw_request_info *info, 9366 union iwreq_data *wrqu, char *extra) 9367{ 9368 struct ipw_priv *priv = libipw_priv(dev); 9369 mutex_lock(&priv->mutex); 9370 if (wrqu->rts.disabled || !wrqu->rts.fixed) 9371 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 9372 else { 9373 if (wrqu->rts.value < MIN_RTS_THRESHOLD || 9374 wrqu->rts.value > MAX_RTS_THRESHOLD) { 9375 mutex_unlock(&priv->mutex); 9376 return -EINVAL; 9377 } 9378 priv->rts_threshold = wrqu->rts.value; 9379 } 9380 9381 ipw_send_rts_threshold(priv, priv->rts_threshold); 9382 mutex_unlock(&priv->mutex); 9383 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); 9384 return 0; 9385} 9386 9387static int ipw_wx_get_rts(struct net_device *dev, 9388 struct iw_request_info *info, 9389 union iwreq_data *wrqu, char *extra) 9390{ 9391 struct ipw_priv *priv = libipw_priv(dev); 9392 mutex_lock(&priv->mutex); 9393 wrqu->rts.value = priv->rts_threshold; 9394 wrqu->rts.fixed = 0; /* no auto select */ 9395 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); 9396 mutex_unlock(&priv->mutex); 9397 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); 9398 return 0; 9399} 9400 9401static int ipw_wx_set_txpow(struct net_device *dev, 9402 struct iw_request_info *info, 9403 union iwreq_data *wrqu, char *extra) 9404{ 9405 struct ipw_priv *priv = libipw_priv(dev); 9406 int err = 0; 9407 9408 mutex_lock(&priv->mutex); 9409 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) { 9410 err = -EINPROGRESS; 9411 goto out; 9412 } 9413 9414 if (!wrqu->power.fixed) 9415 wrqu->power.value = IPW_TX_POWER_DEFAULT; 9416 9417 if (wrqu->power.flags != IW_TXPOW_DBM) { 9418 err = -EINVAL; 9419 goto out; 9420 } 9421 9422 if ((wrqu->power.value > IPW_TX_POWER_MAX) || 9423 (wrqu->power.value < IPW_TX_POWER_MIN)) { 9424 err = -EINVAL; 9425 goto out; 9426 } 9427 9428 priv->tx_power = wrqu->power.value; 9429 err = ipw_set_tx_power(priv); 9430 out: 9431 mutex_unlock(&priv->mutex); 9432 return err; 9433} 9434 9435static int ipw_wx_get_txpow(struct net_device *dev, 9436 struct iw_request_info *info, 9437 union iwreq_data *wrqu, char *extra) 9438{ 9439 struct ipw_priv *priv = libipw_priv(dev); 9440 mutex_lock(&priv->mutex); 9441 wrqu->power.value = priv->tx_power; 9442 wrqu->power.fixed = 1; 9443 wrqu->power.flags = IW_TXPOW_DBM; 9444 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; 9445 mutex_unlock(&priv->mutex); 9446 9447 IPW_DEBUG_WX("GET TX Power -> %s %d \n", 9448 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9449 9450 return 0; 9451} 9452 9453static int ipw_wx_set_frag(struct net_device *dev, 9454 struct iw_request_info *info, 9455 union iwreq_data *wrqu, char *extra) 9456{ 9457 struct ipw_priv *priv = libipw_priv(dev); 9458 mutex_lock(&priv->mutex); 9459 if (wrqu->frag.disabled || !wrqu->frag.fixed) 9460 priv->ieee->fts = DEFAULT_FTS; 9461 else { 9462 if (wrqu->frag.value < MIN_FRAG_THRESHOLD || 9463 wrqu->frag.value > MAX_FRAG_THRESHOLD) { 9464 mutex_unlock(&priv->mutex); 9465 return -EINVAL; 9466 } 9467 9468 priv->ieee->fts = wrqu->frag.value & ~0x1; 9469 } 9470 9471 ipw_send_frag_threshold(priv, wrqu->frag.value); 9472 mutex_unlock(&priv->mutex); 9473 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); 9474 return 0; 9475} 9476 9477static int ipw_wx_get_frag(struct net_device *dev, 9478 struct iw_request_info *info, 9479 union iwreq_data *wrqu, char *extra) 9480{ 9481 struct ipw_priv *priv = libipw_priv(dev); 9482 mutex_lock(&priv->mutex); 9483 wrqu->frag.value = priv->ieee->fts; 9484 wrqu->frag.fixed = 0; /* no auto select */ 9485 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); 9486 mutex_unlock(&priv->mutex); 9487 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); 9488 9489 return 0; 9490} 9491 9492static int ipw_wx_set_retry(struct net_device *dev, 9493 struct iw_request_info *info, 9494 union iwreq_data *wrqu, char *extra) 9495{ 9496 struct ipw_priv *priv = libipw_priv(dev); 9497 9498 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) 9499 return -EINVAL; 9500 9501 if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) 9502 return 0; 9503 9504 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255) 9505 return -EINVAL; 9506 9507 mutex_lock(&priv->mutex); 9508 if (wrqu->retry.flags & IW_RETRY_SHORT) 9509 priv->short_retry_limit = (u8) wrqu->retry.value; 9510 else if (wrqu->retry.flags & IW_RETRY_LONG) 9511 priv->long_retry_limit = (u8) wrqu->retry.value; 9512 else { 9513 priv->short_retry_limit = (u8) wrqu->retry.value; 9514 priv->long_retry_limit = (u8) wrqu->retry.value; 9515 } 9516 9517 ipw_send_retry_limit(priv, priv->short_retry_limit, 9518 priv->long_retry_limit); 9519 mutex_unlock(&priv->mutex); 9520 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n", 9521 priv->short_retry_limit, priv->long_retry_limit); 9522 return 0; 9523} 9524 9525static int ipw_wx_get_retry(struct net_device *dev, 9526 struct iw_request_info *info, 9527 union iwreq_data *wrqu, char *extra) 9528{ 9529 struct ipw_priv *priv = libipw_priv(dev); 9530 9531 mutex_lock(&priv->mutex); 9532 wrqu->retry.disabled = 0; 9533 9534 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { 9535 mutex_unlock(&priv->mutex); 9536 return -EINVAL; 9537 } 9538 9539 if (wrqu->retry.flags & IW_RETRY_LONG) { 9540 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG; 9541 wrqu->retry.value = priv->long_retry_limit; 9542 } else if (wrqu->retry.flags & IW_RETRY_SHORT) { 9543 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT; 9544 wrqu->retry.value = priv->short_retry_limit; 9545 } else { 9546 wrqu->retry.flags = IW_RETRY_LIMIT; 9547 wrqu->retry.value = priv->short_retry_limit; 9548 } 9549 mutex_unlock(&priv->mutex); 9550 9551 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); 9552 9553 return 0; 9554} 9555 9556static int ipw_wx_set_scan(struct net_device *dev, 9557 struct iw_request_info *info, 9558 union iwreq_data *wrqu, char *extra) 9559{ 9560 struct ipw_priv *priv = libipw_priv(dev); 9561 struct iw_scan_req *req = (struct iw_scan_req *)extra; 9562 struct delayed_work *work = NULL; 9563 9564 mutex_lock(&priv->mutex); 9565 9566 priv->user_requested_scan = 1; 9567 9568 if (wrqu->data.length == sizeof(struct iw_scan_req)) { 9569 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 9570 int len = min((int)req->essid_len, 9571 (int)sizeof(priv->direct_scan_ssid)); 9572 memcpy(priv->direct_scan_ssid, req->essid, len); 9573 priv->direct_scan_ssid_len = len; 9574 work = &priv->request_direct_scan; 9575 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) { 9576 work = &priv->request_passive_scan; 9577 } 9578 } else { 9579 /* Normal active broadcast scan */ 9580 work = &priv->request_scan; 9581 } 9582 9583 mutex_unlock(&priv->mutex); 9584 9585 IPW_DEBUG_WX("Start scan\n"); 9586 9587 queue_delayed_work(priv->workqueue, work, 0); 9588 9589 return 0; 9590} 9591 9592static int ipw_wx_get_scan(struct net_device *dev, 9593 struct iw_request_info *info, 9594 union iwreq_data *wrqu, char *extra) 9595{ 9596 struct ipw_priv *priv = libipw_priv(dev); 9597 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra); 9598} 9599 9600static int ipw_wx_set_encode(struct net_device *dev, 9601 struct iw_request_info *info, 9602 union iwreq_data *wrqu, char *key) 9603{ 9604 struct ipw_priv *priv = libipw_priv(dev); 9605 int ret; 9606 u32 cap = priv->capability; 9607 9608 mutex_lock(&priv->mutex); 9609 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key); 9610 9611 /* In IBSS mode, we need to notify the firmware to update 9612 * the beacon info after we changed the capability. */ 9613 if (cap != priv->capability && 9614 priv->ieee->iw_mode == IW_MODE_ADHOC && 9615 priv->status & STATUS_ASSOCIATED) 9616 ipw_disassociate(priv); 9617 9618 mutex_unlock(&priv->mutex); 9619 return ret; 9620} 9621 9622static int ipw_wx_get_encode(struct net_device *dev, 9623 struct iw_request_info *info, 9624 union iwreq_data *wrqu, char *key) 9625{ 9626 struct ipw_priv *priv = libipw_priv(dev); 9627 return libipw_wx_get_encode(priv->ieee, info, wrqu, key); 9628} 9629 9630static int ipw_wx_set_power(struct net_device *dev, 9631 struct iw_request_info *info, 9632 union iwreq_data *wrqu, char *extra) 9633{ 9634 struct ipw_priv *priv = libipw_priv(dev); 9635 int err; 9636 mutex_lock(&priv->mutex); 9637 if (wrqu->power.disabled) { 9638 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); 9639 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM); 9640 if (err) { 9641 IPW_DEBUG_WX("failed setting power mode.\n"); 9642 mutex_unlock(&priv->mutex); 9643 return err; 9644 } 9645 IPW_DEBUG_WX("SET Power Management Mode -> off\n"); 9646 mutex_unlock(&priv->mutex); 9647 return 0; 9648 } 9649 9650 switch (wrqu->power.flags & IW_POWER_MODE) { 9651 case IW_POWER_ON: /* If not specified */ 9652 case IW_POWER_MODE: /* If set all mask */ 9653 case IW_POWER_ALL_R: /* If explicitly state all */ 9654 break; 9655 default: /* Otherwise we don't support it */ 9656 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", 9657 wrqu->power.flags); 9658 mutex_unlock(&priv->mutex); 9659 return -EOPNOTSUPP; 9660 } 9661 9662 /* If the user hasn't specified a power management mode yet, default 9663 * to BATTERY */ 9664 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC) 9665 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; 9666 else 9667 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; 9668 9669 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); 9670 if (err) { 9671 IPW_DEBUG_WX("failed setting power mode.\n"); 9672 mutex_unlock(&priv->mutex); 9673 return err; 9674 } 9675 9676 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); 9677 mutex_unlock(&priv->mutex); 9678 return 0; 9679} 9680 9681static int ipw_wx_get_power(struct net_device *dev, 9682 struct iw_request_info *info, 9683 union iwreq_data *wrqu, char *extra) 9684{ 9685 struct ipw_priv *priv = libipw_priv(dev); 9686 mutex_lock(&priv->mutex); 9687 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9688 wrqu->power.disabled = 1; 9689 else 9690 wrqu->power.disabled = 0; 9691 9692 mutex_unlock(&priv->mutex); 9693 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); 9694 9695 return 0; 9696} 9697 9698static int ipw_wx_set_powermode(struct net_device *dev, 9699 struct iw_request_info *info, 9700 union iwreq_data *wrqu, char *extra) 9701{ 9702 struct ipw_priv *priv = libipw_priv(dev); 9703 int mode = *(int *)extra; 9704 int err; 9705 9706 mutex_lock(&priv->mutex); 9707 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) 9708 mode = IPW_POWER_AC; 9709 9710 if (IPW_POWER_LEVEL(priv->power_mode) != mode) { 9711 err = ipw_send_power_mode(priv, mode); 9712 if (err) { 9713 IPW_DEBUG_WX("failed setting power mode.\n"); 9714 mutex_unlock(&priv->mutex); 9715 return err; 9716 } 9717 priv->power_mode = IPW_POWER_ENABLED | mode; 9718 } 9719 mutex_unlock(&priv->mutex); 9720 return 0; 9721} 9722 9723#define MAX_WX_STRING 80 9724static int ipw_wx_get_powermode(struct net_device *dev, 9725 struct iw_request_info *info, 9726 union iwreq_data *wrqu, char *extra) 9727{ 9728 struct ipw_priv *priv = libipw_priv(dev); 9729 int level = IPW_POWER_LEVEL(priv->power_mode); 9730 char *p = extra; 9731 9732 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level); 9733 9734 switch (level) { 9735 case IPW_POWER_AC: 9736 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)"); 9737 break; 9738 case IPW_POWER_BATTERY: 9739 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)"); 9740 break; 9741 default: 9742 p += snprintf(p, MAX_WX_STRING - (p - extra), 9743 "(Timeout %dms, Period %dms)", 9744 timeout_duration[level - 1] / 1000, 9745 period_duration[level - 1] / 1000); 9746 } 9747 9748 if (!(priv->power_mode & IPW_POWER_ENABLED)) 9749 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF"); 9750 9751 wrqu->data.length = p - extra + 1; 9752 9753 return 0; 9754} 9755 9756static int ipw_wx_set_wireless_mode(struct net_device *dev, 9757 struct iw_request_info *info, 9758 union iwreq_data *wrqu, char *extra) 9759{ 9760 struct ipw_priv *priv = libipw_priv(dev); 9761 int mode = *(int *)extra; 9762 u8 band = 0, modulation = 0; 9763 9764 if (mode == 0 || mode & ~IEEE_MODE_MASK) { 9765 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode); 9766 return -EINVAL; 9767 } 9768 mutex_lock(&priv->mutex); 9769 if (priv->adapter == IPW_2915ABG) { 9770 priv->ieee->abg_true = 1; 9771 if (mode & IEEE_A) { 9772 band |= LIBIPW_52GHZ_BAND; 9773 modulation |= LIBIPW_OFDM_MODULATION; 9774 } else 9775 priv->ieee->abg_true = 0; 9776 } else { 9777 if (mode & IEEE_A) { 9778 IPW_WARNING("Attempt to set 2200BG into " 9779 "802.11a mode\n"); 9780 mutex_unlock(&priv->mutex); 9781 return -EINVAL; 9782 } 9783 9784 priv->ieee->abg_true = 0; 9785 } 9786 9787 if (mode & IEEE_B) { 9788 band |= LIBIPW_24GHZ_BAND; 9789 modulation |= LIBIPW_CCK_MODULATION; 9790 } else 9791 priv->ieee->abg_true = 0; 9792 9793 if (mode & IEEE_G) { 9794 band |= LIBIPW_24GHZ_BAND; 9795 modulation |= LIBIPW_OFDM_MODULATION; 9796 } else 9797 priv->ieee->abg_true = 0; 9798 9799 priv->ieee->mode = mode; 9800 priv->ieee->freq_band = band; 9801 priv->ieee->modulation = modulation; 9802 init_supported_rates(priv, &priv->rates); 9803 9804 /* Network configuration changed -- force [re]association */ 9805 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n"); 9806 if (!ipw_disassociate(priv)) { 9807 ipw_send_supported_rates(priv, &priv->rates); 9808 ipw_associate(priv); 9809 } 9810 9811 /* Update the band LEDs */ 9812 ipw_led_band_on(priv); 9813 9814 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", 9815 mode & IEEE_A ? 'a' : '.', 9816 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.'); 9817 mutex_unlock(&priv->mutex); 9818 return 0; 9819} 9820 9821static int ipw_wx_get_wireless_mode(struct net_device *dev, 9822 struct iw_request_info *info, 9823 union iwreq_data *wrqu, char *extra) 9824{ 9825 struct ipw_priv *priv = libipw_priv(dev); 9826 mutex_lock(&priv->mutex); 9827 switch (priv->ieee->mode) { 9828 case IEEE_A: 9829 strncpy(extra, "802.11a (1)", MAX_WX_STRING); 9830 break; 9831 case IEEE_B: 9832 strncpy(extra, "802.11b (2)", MAX_WX_STRING); 9833 break; 9834 case IEEE_A | IEEE_B: 9835 strncpy(extra, "802.11ab (3)", MAX_WX_STRING); 9836 break; 9837 case IEEE_G: 9838 strncpy(extra, "802.11g (4)", MAX_WX_STRING); 9839 break; 9840 case IEEE_A | IEEE_G: 9841 strncpy(extra, "802.11ag (5)", MAX_WX_STRING); 9842 break; 9843 case IEEE_B | IEEE_G: 9844 strncpy(extra, "802.11bg (6)", MAX_WX_STRING); 9845 break; 9846 case IEEE_A | IEEE_B | IEEE_G: 9847 strncpy(extra, "802.11abg (7)", MAX_WX_STRING); 9848 break; 9849 default: 9850 strncpy(extra, "unknown", MAX_WX_STRING); 9851 break; 9852 } 9853 9854 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); 9855 9856 wrqu->data.length = strlen(extra) + 1; 9857 mutex_unlock(&priv->mutex); 9858 9859 return 0; 9860} 9861 9862static int ipw_wx_set_preamble(struct net_device *dev, 9863 struct iw_request_info *info, 9864 union iwreq_data *wrqu, char *extra) 9865{ 9866 struct ipw_priv *priv = libipw_priv(dev); 9867 int mode = *(int *)extra; 9868 mutex_lock(&priv->mutex); 9869 /* Switching from SHORT -> LONG requires a disassociation */ 9870 if (mode == 1) { 9871 if (!(priv->config & CFG_PREAMBLE_LONG)) { 9872 priv->config |= CFG_PREAMBLE_LONG; 9873 9874 /* Network configuration changed -- force [re]association */ 9875 IPW_DEBUG_ASSOC 9876 ("[re]association triggered due to preamble change.\n"); 9877 if (!ipw_disassociate(priv)) 9878 ipw_associate(priv); 9879 } 9880 goto done; 9881 } 9882 9883 if (mode == 0) { 9884 priv->config &= ~CFG_PREAMBLE_LONG; 9885 goto done; 9886 } 9887 mutex_unlock(&priv->mutex); 9888 return -EINVAL; 9889 9890 done: 9891 mutex_unlock(&priv->mutex); 9892 return 0; 9893} 9894 9895static int ipw_wx_get_preamble(struct net_device *dev, 9896 struct iw_request_info *info, 9897 union iwreq_data *wrqu, char *extra) 9898{ 9899 struct ipw_priv *priv = libipw_priv(dev); 9900 mutex_lock(&priv->mutex); 9901 if (priv->config & CFG_PREAMBLE_LONG) 9902 snprintf(wrqu->name, IFNAMSIZ, "long (1)"); 9903 else 9904 snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); 9905 mutex_unlock(&priv->mutex); 9906 return 0; 9907} 9908 9909#ifdef CONFIG_IPW2200_MONITOR 9910static int ipw_wx_set_monitor(struct net_device *dev, 9911 struct iw_request_info *info, 9912 union iwreq_data *wrqu, char *extra) 9913{ 9914 struct ipw_priv *priv = libipw_priv(dev); 9915 int *parms = (int *)extra; 9916 int enable = (parms[0] > 0); 9917 mutex_lock(&priv->mutex); 9918 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]); 9919 if (enable) { 9920 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9921#ifdef CONFIG_IPW2200_RADIOTAP 9922 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 9923#else 9924 priv->net_dev->type = ARPHRD_IEEE80211; 9925#endif 9926 queue_work(priv->workqueue, &priv->adapter_restart); 9927 } 9928 9929 ipw_set_channel(priv, parms[1]); 9930 } else { 9931 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 9932 mutex_unlock(&priv->mutex); 9933 return 0; 9934 } 9935 priv->net_dev->type = ARPHRD_ETHER; 9936 queue_work(priv->workqueue, &priv->adapter_restart); 9937 } 9938 mutex_unlock(&priv->mutex); 9939 return 0; 9940} 9941 9942#endif /* CONFIG_IPW2200_MONITOR */ 9943 9944static int ipw_wx_reset(struct net_device *dev, 9945 struct iw_request_info *info, 9946 union iwreq_data *wrqu, char *extra) 9947{ 9948 struct ipw_priv *priv = libipw_priv(dev); 9949 IPW_DEBUG_WX("RESET\n"); 9950 queue_work(priv->workqueue, &priv->adapter_restart); 9951 return 0; 9952} 9953 9954static int ipw_wx_sw_reset(struct net_device *dev, 9955 struct iw_request_info *info, 9956 union iwreq_data *wrqu, char *extra) 9957{ 9958 struct ipw_priv *priv = libipw_priv(dev); 9959 union iwreq_data wrqu_sec = { 9960 .encoding = { 9961 .flags = IW_ENCODE_DISABLED, 9962 }, 9963 }; 9964 int ret; 9965 9966 IPW_DEBUG_WX("SW_RESET\n"); 9967 9968 mutex_lock(&priv->mutex); 9969 9970 ret = ipw_sw_reset(priv, 2); 9971 if (!ret) { 9972 free_firmware(); 9973 ipw_adapter_restart(priv); 9974 } 9975 9976 /* The SW reset bit might have been toggled on by the 'disable' 9977 * module parameter, so take appropriate action */ 9978 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW); 9979 9980 mutex_unlock(&priv->mutex); 9981 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL); 9982 mutex_lock(&priv->mutex); 9983 9984 if (!(priv->status & STATUS_RF_KILL_MASK)) { 9985 /* Configuration likely changed -- force [re]association */ 9986 IPW_DEBUG_ASSOC("[re]association triggered due to sw " 9987 "reset.\n"); 9988 if (!ipw_disassociate(priv)) 9989 ipw_associate(priv); 9990 } 9991 9992 mutex_unlock(&priv->mutex); 9993 9994 return 0; 9995} 9996 9997/* Rebase the WE IOCTLs to zero for the handler array */ 9998#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] 9999static iw_handler ipw_wx_handlers[] = { 10000 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname, 10001 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 10002 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 10003 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 10004 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, 10005 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens, 10006 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens, 10007 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, 10008 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, 10009 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, 10010 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan, 10011 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan, 10012 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid, 10013 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid, 10014 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick, 10015 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick, 10016 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate, 10017 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate, 10018 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts, 10019 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts, 10020 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag, 10021 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag, 10022 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow, 10023 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow, 10024 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry, 10025 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry, 10026 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode, 10027 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode, 10028 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power, 10029 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power, 10030 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy, 10031 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy, 10032 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy, 10033 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy, 10034 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie, 10035 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie, 10036 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme, 10037 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth, 10038 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth, 10039 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext, 10040 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext, 10041}; 10042 10043enum { 10044 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV, 10045 IPW_PRIV_GET_POWER, 10046 IPW_PRIV_SET_MODE, 10047 IPW_PRIV_GET_MODE, 10048 IPW_PRIV_SET_PREAMBLE, 10049 IPW_PRIV_GET_PREAMBLE, 10050 IPW_PRIV_RESET, 10051 IPW_PRIV_SW_RESET, 10052#ifdef CONFIG_IPW2200_MONITOR 10053 IPW_PRIV_SET_MONITOR, 10054#endif 10055}; 10056 10057static struct iw_priv_args ipw_priv_args[] = { 10058 { 10059 .cmd = IPW_PRIV_SET_POWER, 10060 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 10061 .name = "set_power"}, 10062 { 10063 .cmd = IPW_PRIV_GET_POWER, 10064 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 10065 .name = "get_power"}, 10066 { 10067 .cmd = IPW_PRIV_SET_MODE, 10068 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 10069 .name = "set_mode"}, 10070 { 10071 .cmd = IPW_PRIV_GET_MODE, 10072 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 10073 .name = "get_mode"}, 10074 { 10075 .cmd = IPW_PRIV_SET_PREAMBLE, 10076 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 10077 .name = "set_preamble"}, 10078 { 10079 .cmd = IPW_PRIV_GET_PREAMBLE, 10080 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, 10081 .name = "get_preamble"}, 10082 { 10083 IPW_PRIV_RESET, 10084 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"}, 10085 { 10086 IPW_PRIV_SW_RESET, 10087 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"}, 10088#ifdef CONFIG_IPW2200_MONITOR 10089 { 10090 IPW_PRIV_SET_MONITOR, 10091 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"}, 10092#endif /* CONFIG_IPW2200_MONITOR */ 10093}; 10094 10095static iw_handler ipw_priv_handler[] = { 10096 ipw_wx_set_powermode, 10097 ipw_wx_get_powermode, 10098 ipw_wx_set_wireless_mode, 10099 ipw_wx_get_wireless_mode, 10100 ipw_wx_set_preamble, 10101 ipw_wx_get_preamble, 10102 ipw_wx_reset, 10103 ipw_wx_sw_reset, 10104#ifdef CONFIG_IPW2200_MONITOR 10105 ipw_wx_set_monitor, 10106#endif 10107}; 10108 10109static struct iw_handler_def ipw_wx_handler_def = { 10110 .standard = ipw_wx_handlers, 10111 .num_standard = ARRAY_SIZE(ipw_wx_handlers), 10112 .num_private = ARRAY_SIZE(ipw_priv_handler), 10113 .num_private_args = ARRAY_SIZE(ipw_priv_args), 10114 .private = ipw_priv_handler, 10115 .private_args = ipw_priv_args, 10116 .get_wireless_stats = ipw_get_wireless_stats, 10117}; 10118 10119/* 10120 * Get wireless statistics. 10121 * Called by /proc/net/wireless 10122 * Also called by SIOCGIWSTATS 10123 */ 10124static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev) 10125{ 10126 struct ipw_priv *priv = libipw_priv(dev); 10127 struct iw_statistics *wstats; 10128 10129 wstats = &priv->wstats; 10130 10131 /* if hw is disabled, then ipw_get_ordinal() can't be called. 10132 * netdev->get_wireless_stats seems to be called before fw is 10133 * initialized. STATUS_ASSOCIATED will only be set if the hw is up 10134 * and associated; if not associcated, the values are all meaningless 10135 * anyway, so set them all to NULL and INVALID */ 10136 if (!(priv->status & STATUS_ASSOCIATED)) { 10137 wstats->miss.beacon = 0; 10138 wstats->discard.retries = 0; 10139 wstats->qual.qual = 0; 10140 wstats->qual.level = 0; 10141 wstats->qual.noise = 0; 10142 wstats->qual.updated = 7; 10143 wstats->qual.updated |= IW_QUAL_NOISE_INVALID | 10144 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; 10145 return wstats; 10146 } 10147 10148 wstats->qual.qual = priv->quality; 10149 wstats->qual.level = priv->exp_avg_rssi; 10150 wstats->qual.noise = priv->exp_avg_noise; 10151 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | 10152 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM; 10153 10154 wstats->miss.beacon = average_value(&priv->average_missed_beacons); 10155 wstats->discard.retries = priv->last_tx_failures; 10156 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable; 10157 10158/* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len)) 10159 goto fail_get_ordinal; 10160 wstats->discard.retries += tx_retry; */ 10161 10162 return wstats; 10163} 10164 10165/* net device stuff */ 10166 10167static void init_sys_config(struct ipw_sys_config *sys_config) 10168{ 10169 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 10170 sys_config->bt_coexistence = 0; 10171 sys_config->answer_broadcast_ssid_probe = 0; 10172 sys_config->accept_all_data_frames = 0; 10173 sys_config->accept_non_directed_frames = 1; 10174 sys_config->exclude_unicast_unencrypted = 0; 10175 sys_config->disable_unicast_decryption = 1; 10176 sys_config->exclude_multicast_unencrypted = 0; 10177 sys_config->disable_multicast_decryption = 1; 10178 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B) 10179 antenna = CFG_SYS_ANTENNA_BOTH; 10180 sys_config->antenna_diversity = antenna; 10181 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ 10182 sys_config->dot11g_auto_detection = 0; 10183 sys_config->enable_cts_to_self = 0; 10184 sys_config->bt_coexist_collision_thr = 0; 10185 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */ 10186 sys_config->silence_threshold = 0x1e; 10187} 10188 10189static int ipw_net_open(struct net_device *dev) 10190{ 10191 IPW_DEBUG_INFO("dev->open\n"); 10192 netif_start_queue(dev); 10193 return 0; 10194} 10195 10196static int ipw_net_stop(struct net_device *dev) 10197{ 10198 IPW_DEBUG_INFO("dev->close\n"); 10199 netif_stop_queue(dev); 10200 return 0; 10201} 10202 10203/* 10204todo: 10205 10206modify to send one tfd per fragment instead of using chunking. otherwise 10207we need to heavily modify the libipw_skb_to_txb. 10208*/ 10209 10210static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, 10211 int pri) 10212{ 10213 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *) 10214 txb->fragments[0]->data; 10215 int i = 0; 10216 struct tfd_frame *tfd; 10217#ifdef CONFIG_IPW2200_QOS 10218 int tx_id = ipw_get_tx_queue_number(priv, pri); 10219 struct clx2_tx_queue *txq = &priv->txq[tx_id]; 10220#else 10221 struct clx2_tx_queue *txq = &priv->txq[0]; 10222#endif 10223 struct clx2_queue *q = &txq->q; 10224 u8 id, hdr_len, unicast; 10225 int fc; 10226 10227 if (!(priv->status & STATUS_ASSOCIATED)) 10228 goto drop; 10229 10230 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10231 switch (priv->ieee->iw_mode) { 10232 case IW_MODE_ADHOC: 10233 unicast = !is_multicast_ether_addr(hdr->addr1); 10234 id = ipw_find_station(priv, hdr->addr1); 10235 if (id == IPW_INVALID_STATION) { 10236 id = ipw_add_station(priv, hdr->addr1); 10237 if (id == IPW_INVALID_STATION) { 10238 IPW_WARNING("Attempt to send data to " 10239 "invalid cell: %pM\n", 10240 hdr->addr1); 10241 goto drop; 10242 } 10243 } 10244 break; 10245 10246 case IW_MODE_INFRA: 10247 default: 10248 unicast = !is_multicast_ether_addr(hdr->addr3); 10249 id = 0; 10250 break; 10251 } 10252 10253 tfd = &txq->bd[q->first_empty]; 10254 txq->txb[q->first_empty] = txb; 10255 memset(tfd, 0, sizeof(*tfd)); 10256 tfd->u.data.station_number = id; 10257 10258 tfd->control_flags.message_type = TX_FRAME_TYPE; 10259 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK; 10260 10261 tfd->u.data.cmd_id = DINO_CMD_TX; 10262 tfd->u.data.len = cpu_to_le16(txb->payload_size); 10263 10264 if (priv->assoc_request.ieee_mode == IPW_B_MODE) 10265 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK; 10266 else 10267 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM; 10268 10269 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE) 10270 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE; 10271 10272 fc = le16_to_cpu(hdr->frame_ctl); 10273 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS); 10274 10275 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len); 10276 10277 if (likely(unicast)) 10278 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; 10279 10280 if (txb->encrypted && !priv->ieee->host_encrypt) { 10281 switch (priv->ieee->sec.level) { 10282 case SEC_LEVEL_3: 10283 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10284 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10285 /* XXX: ACK flag must be set for CCMP even if it 10286 * is a multicast/broadcast packet, because CCMP 10287 * group communication encrypted by GTK is 10288 * actually done by the AP. */ 10289 if (!unicast) 10290 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD; 10291 10292 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10293 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM; 10294 tfd->u.data.key_index = 0; 10295 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE; 10296 break; 10297 case SEC_LEVEL_2: 10298 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10299 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10300 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10301 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP; 10302 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE; 10303 break; 10304 case SEC_LEVEL_1: 10305 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10306 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10307 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx; 10308 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <= 10309 40) 10310 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit; 10311 else 10312 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit; 10313 break; 10314 case SEC_LEVEL_0: 10315 break; 10316 default: 10317 printk(KERN_ERR "Unknown security level %d\n", 10318 priv->ieee->sec.level); 10319 break; 10320 } 10321 } else 10322 /* No hardware encryption */ 10323 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP; 10324 10325#ifdef CONFIG_IPW2200_QOS 10326 if (fc & IEEE80211_STYPE_QOS_DATA) 10327 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data)); 10328#endif /* CONFIG_IPW2200_QOS */ 10329 10330 /* payload */ 10331 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2), 10332 txb->nr_frags)); 10333 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n", 10334 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks)); 10335 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) { 10336 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n", 10337 i, le32_to_cpu(tfd->u.data.num_chunks), 10338 txb->fragments[i]->len - hdr_len); 10339 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n", 10340 i, tfd->u.data.num_chunks, 10341 txb->fragments[i]->len - hdr_len); 10342 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len, 10343 txb->fragments[i]->len - hdr_len); 10344 10345 tfd->u.data.chunk_ptr[i] = 10346 cpu_to_le32(pci_map_single 10347 (priv->pci_dev, 10348 txb->fragments[i]->data + hdr_len, 10349 txb->fragments[i]->len - hdr_len, 10350 PCI_DMA_TODEVICE)); 10351 tfd->u.data.chunk_len[i] = 10352 cpu_to_le16(txb->fragments[i]->len - hdr_len); 10353 } 10354 10355 if (i != txb->nr_frags) { 10356 struct sk_buff *skb; 10357 u16 remaining_bytes = 0; 10358 int j; 10359 10360 for (j = i; j < txb->nr_frags; j++) 10361 remaining_bytes += txb->fragments[j]->len - hdr_len; 10362 10363 printk(KERN_INFO "Trying to reallocate for %d bytes\n", 10364 remaining_bytes); 10365 skb = alloc_skb(remaining_bytes, GFP_ATOMIC); 10366 if (skb != NULL) { 10367 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes); 10368 for (j = i; j < txb->nr_frags; j++) { 10369 int size = txb->fragments[j]->len - hdr_len; 10370 10371 printk(KERN_INFO "Adding frag %d %d...\n", 10372 j, size); 10373 memcpy(skb_put(skb, size), 10374 txb->fragments[j]->data + hdr_len, size); 10375 } 10376 dev_kfree_skb_any(txb->fragments[i]); 10377 txb->fragments[i] = skb; 10378 tfd->u.data.chunk_ptr[i] = 10379 cpu_to_le32(pci_map_single 10380 (priv->pci_dev, skb->data, 10381 remaining_bytes, 10382 PCI_DMA_TODEVICE)); 10383 10384 le32_add_cpu(&tfd->u.data.num_chunks, 1); 10385 } 10386 } 10387 10388 /* kick DMA */ 10389 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 10390 ipw_write32(priv, q->reg_w, q->first_empty); 10391 10392 if (ipw_tx_queue_space(q) < q->high_mark) 10393 netif_stop_queue(priv->net_dev); 10394 10395 return NETDEV_TX_OK; 10396 10397 drop: 10398 IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); 10399 libipw_txb_free(txb); 10400 return NETDEV_TX_OK; 10401} 10402 10403static int ipw_net_is_queue_full(struct net_device *dev, int pri) 10404{ 10405 struct ipw_priv *priv = libipw_priv(dev); 10406#ifdef CONFIG_IPW2200_QOS 10407 int tx_id = ipw_get_tx_queue_number(priv, pri); 10408 struct clx2_tx_queue *txq = &priv->txq[tx_id]; 10409#else 10410 struct clx2_tx_queue *txq = &priv->txq[0]; 10411#endif /* CONFIG_IPW2200_QOS */ 10412 10413 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark) 10414 return 1; 10415 10416 return 0; 10417} 10418 10419#ifdef CONFIG_IPW2200_PROMISCUOUS 10420static void ipw_handle_promiscuous_tx(struct ipw_priv *priv, 10421 struct libipw_txb *txb) 10422{ 10423 struct libipw_rx_stats dummystats; 10424 struct ieee80211_hdr *hdr; 10425 u8 n; 10426 u16 filter = priv->prom_priv->filter; 10427 int hdr_only = 0; 10428 10429 if (filter & IPW_PROM_NO_TX) 10430 return; 10431 10432 memset(&dummystats, 0, sizeof(dummystats)); 10433 10434 /* Filtering of fragment chains is done agains the first fragment */ 10435 hdr = (void *)txb->fragments[0]->data; 10436 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) { 10437 if (filter & IPW_PROM_NO_MGMT) 10438 return; 10439 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 10440 hdr_only = 1; 10441 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) { 10442 if (filter & IPW_PROM_NO_CTL) 10443 return; 10444 if (filter & IPW_PROM_CTL_HEADER_ONLY) 10445 hdr_only = 1; 10446 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) { 10447 if (filter & IPW_PROM_NO_DATA) 10448 return; 10449 if (filter & IPW_PROM_DATA_HEADER_ONLY) 10450 hdr_only = 1; 10451 } 10452 10453 for(n=0; n<txb->nr_frags; ++n) { 10454 struct sk_buff *src = txb->fragments[n]; 10455 struct sk_buff *dst; 10456 struct ieee80211_radiotap_header *rt_hdr; 10457 int len; 10458 10459 if (hdr_only) { 10460 hdr = (void *)src->data; 10461 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control)); 10462 } else 10463 len = src->len; 10464 10465 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC); 10466 if (!dst) 10467 continue; 10468 10469 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr)); 10470 10471 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION; 10472 rt_hdr->it_pad = 0; 10473 rt_hdr->it_present = 0; /* after all, it's just an idea */ 10474 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL); 10475 10476 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16( 10477 ieee80211chan2mhz(priv->channel)); 10478 if (priv->channel > 14) /* 802.11a */ 10479 *(__le16*)skb_put(dst, sizeof(u16)) = 10480 cpu_to_le16(IEEE80211_CHAN_OFDM | 10481 IEEE80211_CHAN_5GHZ); 10482 else if (priv->ieee->mode == IEEE_B) /* 802.11b */ 10483 *(__le16*)skb_put(dst, sizeof(u16)) = 10484 cpu_to_le16(IEEE80211_CHAN_CCK | 10485 IEEE80211_CHAN_2GHZ); 10486 else /* 802.11g */ 10487 *(__le16*)skb_put(dst, sizeof(u16)) = 10488 cpu_to_le16(IEEE80211_CHAN_OFDM | 10489 IEEE80211_CHAN_2GHZ); 10490 10491 rt_hdr->it_len = cpu_to_le16(dst->len); 10492 10493 skb_copy_from_linear_data(src, skb_put(dst, len), len); 10494 10495 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats)) 10496 dev_kfree_skb_any(dst); 10497 } 10498} 10499#endif 10500 10501static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb, 10502 struct net_device *dev, int pri) 10503{ 10504 struct ipw_priv *priv = libipw_priv(dev); 10505 unsigned long flags; 10506 netdev_tx_t ret; 10507 10508 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); 10509 spin_lock_irqsave(&priv->lock, flags); 10510 10511#ifdef CONFIG_IPW2200_PROMISCUOUS 10512 if (rtap_iface && netif_running(priv->prom_net_dev)) 10513 ipw_handle_promiscuous_tx(priv, txb); 10514#endif 10515 10516 ret = ipw_tx_skb(priv, txb, pri); 10517 if (ret == NETDEV_TX_OK) 10518 __ipw_led_activity_on(priv); 10519 spin_unlock_irqrestore(&priv->lock, flags); 10520 10521 return ret; 10522} 10523 10524static void ipw_net_set_multicast_list(struct net_device *dev) 10525{ 10526 10527} 10528 10529static int ipw_net_set_mac_address(struct net_device *dev, void *p) 10530{ 10531 struct ipw_priv *priv = libipw_priv(dev); 10532 struct sockaddr *addr = p; 10533 10534 if (!is_valid_ether_addr(addr->sa_data)) 10535 return -EADDRNOTAVAIL; 10536 mutex_lock(&priv->mutex); 10537 priv->config |= CFG_CUSTOM_MAC; 10538 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 10539 printk(KERN_INFO "%s: Setting MAC to %pM\n", 10540 priv->net_dev->name, priv->mac_addr); 10541 queue_work(priv->workqueue, &priv->adapter_restart); 10542 mutex_unlock(&priv->mutex); 10543 return 0; 10544} 10545 10546static void ipw_ethtool_get_drvinfo(struct net_device *dev, 10547 struct ethtool_drvinfo *info) 10548{ 10549 struct ipw_priv *p = libipw_priv(dev); 10550 char vers[64]; 10551 char date[32]; 10552 u32 len; 10553 10554 strcpy(info->driver, DRV_NAME); 10555 strcpy(info->version, DRV_VERSION); 10556 10557 len = sizeof(vers); 10558 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); 10559 len = sizeof(date); 10560 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); 10561 10562 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", 10563 vers, date); 10564 strcpy(info->bus_info, pci_name(p->pci_dev)); 10565 info->eedump_len = IPW_EEPROM_IMAGE_SIZE; 10566} 10567 10568static u32 ipw_ethtool_get_link(struct net_device *dev) 10569{ 10570 struct ipw_priv *priv = libipw_priv(dev); 10571 return (priv->status & STATUS_ASSOCIATED) != 0; 10572} 10573 10574static int ipw_ethtool_get_eeprom_len(struct net_device *dev) 10575{ 10576 return IPW_EEPROM_IMAGE_SIZE; 10577} 10578 10579static int ipw_ethtool_get_eeprom(struct net_device *dev, 10580 struct ethtool_eeprom *eeprom, u8 * bytes) 10581{ 10582 struct ipw_priv *p = libipw_priv(dev); 10583 10584 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 10585 return -EINVAL; 10586 mutex_lock(&p->mutex); 10587 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len); 10588 mutex_unlock(&p->mutex); 10589 return 0; 10590} 10591 10592static int ipw_ethtool_set_eeprom(struct net_device *dev, 10593 struct ethtool_eeprom *eeprom, u8 * bytes) 10594{ 10595 struct ipw_priv *p = libipw_priv(dev); 10596 int i; 10597 10598 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE) 10599 return -EINVAL; 10600 mutex_lock(&p->mutex); 10601 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len); 10602 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++) 10603 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]); 10604 mutex_unlock(&p->mutex); 10605 return 0; 10606} 10607 10608static const struct ethtool_ops ipw_ethtool_ops = { 10609 .get_link = ipw_ethtool_get_link, 10610 .get_drvinfo = ipw_ethtool_get_drvinfo, 10611 .get_eeprom_len = ipw_ethtool_get_eeprom_len, 10612 .get_eeprom = ipw_ethtool_get_eeprom, 10613 .set_eeprom = ipw_ethtool_set_eeprom, 10614}; 10615 10616static irqreturn_t ipw_isr(int irq, void *data) 10617{ 10618 struct ipw_priv *priv = data; 10619 u32 inta, inta_mask; 10620 10621 if (!priv) 10622 return IRQ_NONE; 10623 10624 spin_lock(&priv->irq_lock); 10625 10626 if (!(priv->status & STATUS_INT_ENABLED)) { 10627 /* IRQ is disabled */ 10628 goto none; 10629 } 10630 10631 inta = ipw_read32(priv, IPW_INTA_RW); 10632 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); 10633 10634 if (inta == 0xFFFFFFFF) { 10635 /* Hardware disappeared */ 10636 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n"); 10637 goto none; 10638 } 10639 10640 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) { 10641 /* Shared interrupt */ 10642 goto none; 10643 } 10644 10645 /* tell the device to stop sending interrupts */ 10646 __ipw_disable_interrupts(priv); 10647 10648 /* ack current interrupts */ 10649 inta &= (IPW_INTA_MASK_ALL & inta_mask); 10650 ipw_write32(priv, IPW_INTA_RW, inta); 10651 10652 /* Cache INTA value for our tasklet */ 10653 priv->isr_inta = inta; 10654 10655 tasklet_schedule(&priv->irq_tasklet); 10656 10657 spin_unlock(&priv->irq_lock); 10658 10659 return IRQ_HANDLED; 10660 none: 10661 spin_unlock(&priv->irq_lock); 10662 return IRQ_NONE; 10663} 10664 10665static void ipw_rf_kill(void *adapter) 10666{ 10667 struct ipw_priv *priv = adapter; 10668 unsigned long flags; 10669 10670 spin_lock_irqsave(&priv->lock, flags); 10671 10672 if (rf_kill_active(priv)) { 10673 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 10674 if (priv->workqueue) 10675 queue_delayed_work(priv->workqueue, 10676 &priv->rf_kill, 2 * HZ); 10677 goto exit_unlock; 10678 } 10679 10680 /* RF Kill is now disabled, so bring the device back up */ 10681 10682 if (!(priv->status & STATUS_RF_KILL_MASK)) { 10683 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting " 10684 "device\n"); 10685 10686 /* we can not do an adapter restart while inside an irq lock */ 10687 queue_work(priv->workqueue, &priv->adapter_restart); 10688 } else 10689 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " 10690 "enabled\n"); 10691 10692 exit_unlock: 10693 spin_unlock_irqrestore(&priv->lock, flags); 10694} 10695 10696static void ipw_bg_rf_kill(struct work_struct *work) 10697{ 10698 struct ipw_priv *priv = 10699 container_of(work, struct ipw_priv, rf_kill.work); 10700 mutex_lock(&priv->mutex); 10701 ipw_rf_kill(priv); 10702 mutex_unlock(&priv->mutex); 10703} 10704 10705static void ipw_link_up(struct ipw_priv *priv) 10706{ 10707 priv->last_seq_num = -1; 10708 priv->last_frag_num = -1; 10709 priv->last_packet_time = 0; 10710 10711 netif_carrier_on(priv->net_dev); 10712 10713 cancel_delayed_work(&priv->request_scan); 10714 cancel_delayed_work(&priv->request_direct_scan); 10715 cancel_delayed_work(&priv->request_passive_scan); 10716 cancel_delayed_work(&priv->scan_event); 10717 ipw_reset_stats(priv); 10718 /* Ensure the rate is updated immediately */ 10719 priv->last_rate = ipw_get_current_rate(priv); 10720 ipw_gather_stats(priv); 10721 ipw_led_link_up(priv); 10722 notify_wx_assoc_event(priv); 10723 10724 if (priv->config & CFG_BACKGROUND_SCAN) 10725 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); 10726} 10727 10728static void ipw_bg_link_up(struct work_struct *work) 10729{ 10730 struct ipw_priv *priv = 10731 container_of(work, struct ipw_priv, link_up); 10732 mutex_lock(&priv->mutex); 10733 ipw_link_up(priv); 10734 mutex_unlock(&priv->mutex); 10735} 10736 10737static void ipw_link_down(struct ipw_priv *priv) 10738{ 10739 ipw_led_link_down(priv); 10740 netif_carrier_off(priv->net_dev); 10741 notify_wx_assoc_event(priv); 10742 10743 /* Cancel any queued work ... */ 10744 cancel_delayed_work(&priv->request_scan); 10745 cancel_delayed_work(&priv->request_direct_scan); 10746 cancel_delayed_work(&priv->request_passive_scan); 10747 cancel_delayed_work(&priv->adhoc_check); 10748 cancel_delayed_work(&priv->gather_stats); 10749 10750 ipw_reset_stats(priv); 10751 10752 if (!(priv->status & STATUS_EXIT_PENDING)) { 10753 /* Queue up another scan... */ 10754 queue_delayed_work(priv->workqueue, &priv->request_scan, 0); 10755 } else 10756 cancel_delayed_work(&priv->scan_event); 10757} 10758 10759static void ipw_bg_link_down(struct work_struct *work) 10760{ 10761 struct ipw_priv *priv = 10762 container_of(work, struct ipw_priv, link_down); 10763 mutex_lock(&priv->mutex); 10764 ipw_link_down(priv); 10765 mutex_unlock(&priv->mutex); 10766} 10767 10768static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) 10769{ 10770 int ret = 0; 10771 10772 priv->workqueue = create_workqueue(DRV_NAME); 10773 init_waitqueue_head(&priv->wait_command_queue); 10774 init_waitqueue_head(&priv->wait_state); 10775 10776 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); 10777 INIT_WORK(&priv->associate, ipw_bg_associate); 10778 INIT_WORK(&priv->disassociate, ipw_bg_disassociate); 10779 INIT_WORK(&priv->system_config, ipw_system_config); 10780 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); 10781 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); 10782 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); 10783 INIT_WORK(&priv->up, ipw_bg_up); 10784 INIT_WORK(&priv->down, ipw_bg_down); 10785 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); 10786 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan); 10787 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan); 10788 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); 10789 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); 10790 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); 10791 INIT_WORK(&priv->roam, ipw_bg_roam); 10792 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); 10793 INIT_WORK(&priv->link_up, ipw_bg_link_up); 10794 INIT_WORK(&priv->link_down, ipw_bg_link_down); 10795 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); 10796 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); 10797 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); 10798 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); 10799 10800#ifdef CONFIG_IPW2200_QOS 10801 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); 10802#endif /* CONFIG_IPW2200_QOS */ 10803 10804 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 10805 ipw_irq_tasklet, (unsigned long)priv); 10806 10807 return ret; 10808} 10809 10810static void shim__set_security(struct net_device *dev, 10811 struct libipw_security *sec) 10812{ 10813 struct ipw_priv *priv = libipw_priv(dev); 10814 int i; 10815 for (i = 0; i < 4; i++) { 10816 if (sec->flags & (1 << i)) { 10817 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i]; 10818 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i]; 10819 if (sec->key_sizes[i] == 0) 10820 priv->ieee->sec.flags &= ~(1 << i); 10821 else { 10822 memcpy(priv->ieee->sec.keys[i], sec->keys[i], 10823 sec->key_sizes[i]); 10824 priv->ieee->sec.flags |= (1 << i); 10825 } 10826 priv->status |= STATUS_SECURITY_UPDATED; 10827 } else if (sec->level != SEC_LEVEL_1) 10828 priv->ieee->sec.flags &= ~(1 << i); 10829 } 10830 10831 if (sec->flags & SEC_ACTIVE_KEY) { 10832 if (sec->active_key <= 3) { 10833 priv->ieee->sec.active_key = sec->active_key; 10834 priv->ieee->sec.flags |= SEC_ACTIVE_KEY; 10835 } else 10836 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; 10837 priv->status |= STATUS_SECURITY_UPDATED; 10838 } else 10839 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; 10840 10841 if ((sec->flags & SEC_AUTH_MODE) && 10842 (priv->ieee->sec.auth_mode != sec->auth_mode)) { 10843 priv->ieee->sec.auth_mode = sec->auth_mode; 10844 priv->ieee->sec.flags |= SEC_AUTH_MODE; 10845 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY) 10846 priv->capability |= CAP_SHARED_KEY; 10847 else 10848 priv->capability &= ~CAP_SHARED_KEY; 10849 priv->status |= STATUS_SECURITY_UPDATED; 10850 } 10851 10852 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) { 10853 priv->ieee->sec.flags |= SEC_ENABLED; 10854 priv->ieee->sec.enabled = sec->enabled; 10855 priv->status |= STATUS_SECURITY_UPDATED; 10856 if (sec->enabled) 10857 priv->capability |= CAP_PRIVACY_ON; 10858 else 10859 priv->capability &= ~CAP_PRIVACY_ON; 10860 } 10861 10862 if (sec->flags & SEC_ENCRYPT) 10863 priv->ieee->sec.encrypt = sec->encrypt; 10864 10865 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) { 10866 priv->ieee->sec.level = sec->level; 10867 priv->ieee->sec.flags |= SEC_LEVEL; 10868 priv->status |= STATUS_SECURITY_UPDATED; 10869 } 10870 10871 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT)) 10872 ipw_set_hwcrypto_keys(priv); 10873 10874 /* To match current functionality of ipw2100 (which works well w/ 10875 * various supplicants, we don't force a disassociate if the 10876 * privacy capability changes ... */ 10877#if 0 10878 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) && 10879 (((priv->assoc_request.capability & 10880 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) || 10881 (!(priv->assoc_request.capability & 10882 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) { 10883 IPW_DEBUG_ASSOC("Disassociating due to capability " 10884 "change.\n"); 10885 ipw_disassociate(priv); 10886 } 10887#endif 10888} 10889 10890static int init_supported_rates(struct ipw_priv *priv, 10891 struct ipw_supported_rates *rates) 10892{ 10893 /* TODO: Mask out rates based on priv->rates_mask */ 10894 10895 memset(rates, 0, sizeof(*rates)); 10896 /* configure supported rates */ 10897 switch (priv->ieee->freq_band) { 10898 case LIBIPW_52GHZ_BAND: 10899 rates->ieee_mode = IPW_A_MODE; 10900 rates->purpose = IPW_RATE_CAPABILITIES; 10901 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, 10902 LIBIPW_OFDM_DEFAULT_RATES_MASK); 10903 break; 10904 10905 default: /* Mixed or 2.4Ghz */ 10906 rates->ieee_mode = IPW_G_MODE; 10907 rates->purpose = IPW_RATE_CAPABILITIES; 10908 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION, 10909 LIBIPW_CCK_DEFAULT_RATES_MASK); 10910 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) { 10911 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION, 10912 LIBIPW_OFDM_DEFAULT_RATES_MASK); 10913 } 10914 break; 10915 } 10916 10917 return 0; 10918} 10919 10920static int ipw_config(struct ipw_priv *priv) 10921{ 10922 /* This is only called from ipw_up, which resets/reloads the firmware 10923 so, we don't need to first disable the card before we configure 10924 it */ 10925 if (ipw_set_tx_power(priv)) 10926 goto error; 10927 10928 /* initialize adapter address */ 10929 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr)) 10930 goto error; 10931 10932 /* set basic system config settings */ 10933 init_sys_config(&priv->sys_config); 10934 10935 /* Support Bluetooth if we have BT h/w on board, and user wants to. 10936 * Does not support BT priority yet (don't abort or defer our Tx) */ 10937 if (bt_coexist) { 10938 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY]; 10939 10940 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG) 10941 priv->sys_config.bt_coexistence 10942 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL; 10943 if (bt_caps & EEPROM_SKU_CAP_BT_OOB) 10944 priv->sys_config.bt_coexistence 10945 |= CFG_BT_COEXISTENCE_OOB; 10946 } 10947 10948#ifdef CONFIG_IPW2200_PROMISCUOUS 10949 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { 10950 priv->sys_config.accept_all_data_frames = 1; 10951 priv->sys_config.accept_non_directed_frames = 1; 10952 priv->sys_config.accept_all_mgmt_bcpr = 1; 10953 priv->sys_config.accept_all_mgmt_frames = 1; 10954 } 10955#endif 10956 10957 if (priv->ieee->iw_mode == IW_MODE_ADHOC) 10958 priv->sys_config.answer_broadcast_ssid_probe = 1; 10959 else 10960 priv->sys_config.answer_broadcast_ssid_probe = 0; 10961 10962 if (ipw_send_system_config(priv)) 10963 goto error; 10964 10965 init_supported_rates(priv, &priv->rates); 10966 if (ipw_send_supported_rates(priv, &priv->rates)) 10967 goto error; 10968 10969 /* Set request-to-send threshold */ 10970 if (priv->rts_threshold) { 10971 if (ipw_send_rts_threshold(priv, priv->rts_threshold)) 10972 goto error; 10973 } 10974#ifdef CONFIG_IPW2200_QOS 10975 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n"); 10976 ipw_qos_activate(priv, NULL); 10977#endif /* CONFIG_IPW2200_QOS */ 10978 10979 if (ipw_set_random_seed(priv)) 10980 goto error; 10981 10982 /* final state transition to the RUN state */ 10983 if (ipw_send_host_complete(priv)) 10984 goto error; 10985 10986 priv->status |= STATUS_INIT; 10987 10988 ipw_led_init(priv); 10989 ipw_led_radio_on(priv); 10990 priv->notif_missed_beacons = 0; 10991 10992 /* Set hardware WEP key if it is configured. */ 10993 if ((priv->capability & CAP_PRIVACY_ON) && 10994 (priv->ieee->sec.level == SEC_LEVEL_1) && 10995 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt)) 10996 ipw_set_hwcrypto_keys(priv); 10997 10998 return 0; 10999 11000 error: 11001 return -EIO; 11002} 11003 11004/* 11005 * NOTE: 11006 * 11007 * These tables have been tested in conjunction with the 11008 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters. 11009 * 11010 * Altering this values, using it on other hardware, or in geographies 11011 * not intended for resale of the above mentioned Intel adapters has 11012 * not been tested. 11013 * 11014 * Remember to update the table in README.ipw2200 when changing this 11015 * table. 11016 * 11017 */ 11018static const struct libipw_geo ipw_geos[] = { 11019 { /* Restricted */ 11020 "---", 11021 .bg_channels = 11, 11022 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11023 {2427, 4}, {2432, 5}, {2437, 6}, 11024 {2442, 7}, {2447, 8}, {2452, 9}, 11025 {2457, 10}, {2462, 11}}, 11026 }, 11027 11028 { /* Custom US/Canada */ 11029 "ZZF", 11030 .bg_channels = 11, 11031 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11032 {2427, 4}, {2432, 5}, {2437, 6}, 11033 {2442, 7}, {2447, 8}, {2452, 9}, 11034 {2457, 10}, {2462, 11}}, 11035 .a_channels = 8, 11036 .a = {{5180, 36}, 11037 {5200, 40}, 11038 {5220, 44}, 11039 {5240, 48}, 11040 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11041 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11042 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11043 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}}, 11044 }, 11045 11046 { /* Rest of World */ 11047 "ZZD", 11048 .bg_channels = 13, 11049 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11050 {2427, 4}, {2432, 5}, {2437, 6}, 11051 {2442, 7}, {2447, 8}, {2452, 9}, 11052 {2457, 10}, {2462, 11}, {2467, 12}, 11053 {2472, 13}}, 11054 }, 11055 11056 { /* Custom USA & Europe & High */ 11057 "ZZA", 11058 .bg_channels = 11, 11059 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11060 {2427, 4}, {2432, 5}, {2437, 6}, 11061 {2442, 7}, {2447, 8}, {2452, 9}, 11062 {2457, 10}, {2462, 11}}, 11063 .a_channels = 13, 11064 .a = {{5180, 36}, 11065 {5200, 40}, 11066 {5220, 44}, 11067 {5240, 48}, 11068 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11069 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11070 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11071 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11072 {5745, 149}, 11073 {5765, 153}, 11074 {5785, 157}, 11075 {5805, 161}, 11076 {5825, 165}}, 11077 }, 11078 11079 { /* Custom NA & Europe */ 11080 "ZZB", 11081 .bg_channels = 11, 11082 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11083 {2427, 4}, {2432, 5}, {2437, 6}, 11084 {2442, 7}, {2447, 8}, {2452, 9}, 11085 {2457, 10}, {2462, 11}}, 11086 .a_channels = 13, 11087 .a = {{5180, 36}, 11088 {5200, 40}, 11089 {5220, 44}, 11090 {5240, 48}, 11091 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11092 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11093 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11094 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11095 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11096 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11097 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11098 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11099 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11100 }, 11101 11102 { /* Custom Japan */ 11103 "ZZC", 11104 .bg_channels = 11, 11105 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11106 {2427, 4}, {2432, 5}, {2437, 6}, 11107 {2442, 7}, {2447, 8}, {2452, 9}, 11108 {2457, 10}, {2462, 11}}, 11109 .a_channels = 4, 11110 .a = {{5170, 34}, {5190, 38}, 11111 {5210, 42}, {5230, 46}}, 11112 }, 11113 11114 { /* Custom */ 11115 "ZZM", 11116 .bg_channels = 11, 11117 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11118 {2427, 4}, {2432, 5}, {2437, 6}, 11119 {2442, 7}, {2447, 8}, {2452, 9}, 11120 {2457, 10}, {2462, 11}}, 11121 }, 11122 11123 { /* Europe */ 11124 "ZZE", 11125 .bg_channels = 13, 11126 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11127 {2427, 4}, {2432, 5}, {2437, 6}, 11128 {2442, 7}, {2447, 8}, {2452, 9}, 11129 {2457, 10}, {2462, 11}, {2467, 12}, 11130 {2472, 13}}, 11131 .a_channels = 19, 11132 .a = {{5180, 36}, 11133 {5200, 40}, 11134 {5220, 44}, 11135 {5240, 48}, 11136 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11137 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11138 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11139 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11140 {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, 11141 {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, 11142 {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, 11143 {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, 11144 {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, 11145 {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, 11146 {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, 11147 {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, 11148 {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, 11149 {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, 11150 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}}, 11151 }, 11152 11153 { /* Custom Japan */ 11154 "ZZJ", 11155 .bg_channels = 14, 11156 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11157 {2427, 4}, {2432, 5}, {2437, 6}, 11158 {2442, 7}, {2447, 8}, {2452, 9}, 11159 {2457, 10}, {2462, 11}, {2467, 12}, 11160 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}}, 11161 .a_channels = 4, 11162 .a = {{5170, 34}, {5190, 38}, 11163 {5210, 42}, {5230, 46}}, 11164 }, 11165 11166 { /* Rest of World */ 11167 "ZZR", 11168 .bg_channels = 14, 11169 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11170 {2427, 4}, {2432, 5}, {2437, 6}, 11171 {2442, 7}, {2447, 8}, {2452, 9}, 11172 {2457, 10}, {2462, 11}, {2467, 12}, 11173 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY | 11174 LIBIPW_CH_PASSIVE_ONLY}}, 11175 }, 11176 11177 { /* High Band */ 11178 "ZZH", 11179 .bg_channels = 13, 11180 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11181 {2427, 4}, {2432, 5}, {2437, 6}, 11182 {2442, 7}, {2447, 8}, {2452, 9}, 11183 {2457, 10}, {2462, 11}, 11184 {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, 11185 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, 11186 .a_channels = 4, 11187 .a = {{5745, 149}, {5765, 153}, 11188 {5785, 157}, {5805, 161}}, 11189 }, 11190 11191 { /* Custom Europe */ 11192 "ZZG", 11193 .bg_channels = 13, 11194 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11195 {2427, 4}, {2432, 5}, {2437, 6}, 11196 {2442, 7}, {2447, 8}, {2452, 9}, 11197 {2457, 10}, {2462, 11}, 11198 {2467, 12}, {2472, 13}}, 11199 .a_channels = 4, 11200 .a = {{5180, 36}, {5200, 40}, 11201 {5220, 44}, {5240, 48}}, 11202 }, 11203 11204 { /* Europe */ 11205 "ZZK", 11206 .bg_channels = 13, 11207 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11208 {2427, 4}, {2432, 5}, {2437, 6}, 11209 {2442, 7}, {2447, 8}, {2452, 9}, 11210 {2457, 10}, {2462, 11}, 11211 {2467, 12, LIBIPW_CH_PASSIVE_ONLY}, 11212 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}}, 11213 .a_channels = 24, 11214 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, 11215 {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, 11216 {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, 11217 {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, 11218 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11219 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11220 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11221 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11222 {5500, 100, LIBIPW_CH_PASSIVE_ONLY}, 11223 {5520, 104, LIBIPW_CH_PASSIVE_ONLY}, 11224 {5540, 108, LIBIPW_CH_PASSIVE_ONLY}, 11225 {5560, 112, LIBIPW_CH_PASSIVE_ONLY}, 11226 {5580, 116, LIBIPW_CH_PASSIVE_ONLY}, 11227 {5600, 120, LIBIPW_CH_PASSIVE_ONLY}, 11228 {5620, 124, LIBIPW_CH_PASSIVE_ONLY}, 11229 {5640, 128, LIBIPW_CH_PASSIVE_ONLY}, 11230 {5660, 132, LIBIPW_CH_PASSIVE_ONLY}, 11231 {5680, 136, LIBIPW_CH_PASSIVE_ONLY}, 11232 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}, 11233 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11234 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11235 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11236 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11237 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11238 }, 11239 11240 { /* Europe */ 11241 "ZZL", 11242 .bg_channels = 11, 11243 .bg = {{2412, 1}, {2417, 2}, {2422, 3}, 11244 {2427, 4}, {2432, 5}, {2437, 6}, 11245 {2442, 7}, {2447, 8}, {2452, 9}, 11246 {2457, 10}, {2462, 11}}, 11247 .a_channels = 13, 11248 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY}, 11249 {5200, 40, LIBIPW_CH_PASSIVE_ONLY}, 11250 {5220, 44, LIBIPW_CH_PASSIVE_ONLY}, 11251 {5240, 48, LIBIPW_CH_PASSIVE_ONLY}, 11252 {5260, 52, LIBIPW_CH_PASSIVE_ONLY}, 11253 {5280, 56, LIBIPW_CH_PASSIVE_ONLY}, 11254 {5300, 60, LIBIPW_CH_PASSIVE_ONLY}, 11255 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}, 11256 {5745, 149, LIBIPW_CH_PASSIVE_ONLY}, 11257 {5765, 153, LIBIPW_CH_PASSIVE_ONLY}, 11258 {5785, 157, LIBIPW_CH_PASSIVE_ONLY}, 11259 {5805, 161, LIBIPW_CH_PASSIVE_ONLY}, 11260 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}}, 11261 } 11262}; 11263 11264#define MAX_HW_RESTARTS 5 11265static int ipw_up(struct ipw_priv *priv) 11266{ 11267 int rc, i, j; 11268 11269 /* Age scan list entries found before suspend */ 11270 if (priv->suspend_time) { 11271 libipw_networks_age(priv->ieee, priv->suspend_time); 11272 priv->suspend_time = 0; 11273 } 11274 11275 if (priv->status & STATUS_EXIT_PENDING) 11276 return -EIO; 11277 11278 if (cmdlog && !priv->cmdlog) { 11279 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog), 11280 GFP_KERNEL); 11281 if (priv->cmdlog == NULL) { 11282 IPW_ERROR("Error allocating %d command log entries.\n", 11283 cmdlog); 11284 return -ENOMEM; 11285 } else { 11286 priv->cmdlog_len = cmdlog; 11287 } 11288 } 11289 11290 for (i = 0; i < MAX_HW_RESTARTS; i++) { 11291 /* Load the microcode, firmware, and eeprom. 11292 * Also start the clocks. */ 11293 rc = ipw_load(priv); 11294 if (rc) { 11295 IPW_ERROR("Unable to load firmware: %d\n", rc); 11296 return rc; 11297 } 11298 11299 ipw_init_ordinals(priv); 11300 if (!(priv->config & CFG_CUSTOM_MAC)) 11301 eeprom_parse_mac(priv, priv->mac_addr); 11302 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11303 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN); 11304 11305 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) { 11306 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE], 11307 ipw_geos[j].name, 3)) 11308 break; 11309 } 11310 if (j == ARRAY_SIZE(ipw_geos)) { 11311 IPW_WARNING("SKU [%c%c%c] not recognized.\n", 11312 priv->eeprom[EEPROM_COUNTRY_CODE + 0], 11313 priv->eeprom[EEPROM_COUNTRY_CODE + 1], 11314 priv->eeprom[EEPROM_COUNTRY_CODE + 2]); 11315 j = 0; 11316 } 11317 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) { 11318 IPW_WARNING("Could not set geography."); 11319 return 0; 11320 } 11321 11322 if (priv->status & STATUS_RF_KILL_SW) { 11323 IPW_WARNING("Radio disabled by module parameter.\n"); 11324 return 0; 11325 } else if (rf_kill_active(priv)) { 11326 IPW_WARNING("Radio Frequency Kill Switch is On:\n" 11327 "Kill switch must be turned off for " 11328 "wireless networking to work.\n"); 11329 queue_delayed_work(priv->workqueue, &priv->rf_kill, 11330 2 * HZ); 11331 return 0; 11332 } 11333 11334 rc = ipw_config(priv); 11335 if (!rc) { 11336 IPW_DEBUG_INFO("Configured device on count %i\n", i); 11337 11338 /* If configure to try and auto-associate, kick 11339 * off a scan. */ 11340 queue_delayed_work(priv->workqueue, 11341 &priv->request_scan, 0); 11342 11343 return 0; 11344 } 11345 11346 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc); 11347 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n", 11348 i, MAX_HW_RESTARTS); 11349 11350 /* We had an error bringing up the hardware, so take it 11351 * all the way back down so we can try again */ 11352 ipw_down(priv); 11353 } 11354 11355 /* tried to restart and config the device for as long as our 11356 * patience could withstand */ 11357 IPW_ERROR("Unable to initialize device after %d attempts.\n", i); 11358 11359 return -EIO; 11360} 11361 11362static void ipw_bg_up(struct work_struct *work) 11363{ 11364 struct ipw_priv *priv = 11365 container_of(work, struct ipw_priv, up); 11366 mutex_lock(&priv->mutex); 11367 ipw_up(priv); 11368 mutex_unlock(&priv->mutex); 11369} 11370 11371static void ipw_deinit(struct ipw_priv *priv) 11372{ 11373 int i; 11374 11375 if (priv->status & STATUS_SCANNING) { 11376 IPW_DEBUG_INFO("Aborting scan during shutdown.\n"); 11377 ipw_abort_scan(priv); 11378 } 11379 11380 if (priv->status & STATUS_ASSOCIATED) { 11381 IPW_DEBUG_INFO("Disassociating during shutdown.\n"); 11382 ipw_disassociate(priv); 11383 } 11384 11385 ipw_led_shutdown(priv); 11386 11387 /* Wait up to 1s for status to change to not scanning and not 11388 * associated (disassociation can take a while for a ful 802.11 11389 * exchange */ 11390 for (i = 1000; i && (priv->status & 11391 (STATUS_DISASSOCIATING | 11392 STATUS_ASSOCIATED | STATUS_SCANNING)); i--) 11393 udelay(10); 11394 11395 if (priv->status & (STATUS_DISASSOCIATING | 11396 STATUS_ASSOCIATED | STATUS_SCANNING)) 11397 IPW_DEBUG_INFO("Still associated or scanning...\n"); 11398 else 11399 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i); 11400 11401 /* Attempt to disable the card */ 11402 ipw_send_card_disable(priv, 0); 11403 11404 priv->status &= ~STATUS_INIT; 11405} 11406 11407static void ipw_down(struct ipw_priv *priv) 11408{ 11409 int exit_pending = priv->status & STATUS_EXIT_PENDING; 11410 11411 priv->status |= STATUS_EXIT_PENDING; 11412 11413 if (ipw_is_init(priv)) 11414 ipw_deinit(priv); 11415 11416 /* Wipe out the EXIT_PENDING status bit if we are not actually 11417 * exiting the module */ 11418 if (!exit_pending) 11419 priv->status &= ~STATUS_EXIT_PENDING; 11420 11421 /* tell the device to stop sending interrupts */ 11422 ipw_disable_interrupts(priv); 11423 11424 /* Clear all bits but the RF Kill */ 11425 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING; 11426 netif_carrier_off(priv->net_dev); 11427 11428 ipw_stop_nic(priv); 11429 11430 ipw_led_radio_off(priv); 11431} 11432 11433static void ipw_bg_down(struct work_struct *work) 11434{ 11435 struct ipw_priv *priv = 11436 container_of(work, struct ipw_priv, down); 11437 mutex_lock(&priv->mutex); 11438 ipw_down(priv); 11439 mutex_unlock(&priv->mutex); 11440} 11441 11442/* Called by register_netdev() */ 11443static int ipw_net_init(struct net_device *dev) 11444{ 11445 int i, rc = 0; 11446 struct ipw_priv *priv = libipw_priv(dev); 11447 const struct libipw_geo *geo = libipw_get_geo(priv->ieee); 11448 struct wireless_dev *wdev = &priv->ieee->wdev; 11449 mutex_lock(&priv->mutex); 11450 11451 if (ipw_up(priv)) { 11452 rc = -EIO; 11453 goto out; 11454 } 11455 11456 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); 11457 11458 /* fill-out priv->ieee->bg_band */ 11459 if (geo->bg_channels) { 11460 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; 11461 11462 bg_band->band = IEEE80211_BAND_2GHZ; 11463 bg_band->n_channels = geo->bg_channels; 11464 bg_band->channels = 11465 kzalloc(geo->bg_channels * 11466 sizeof(struct ieee80211_channel), GFP_KERNEL); 11467 /* translate geo->bg to bg_band.channels */ 11468 for (i = 0; i < geo->bg_channels; i++) { 11469 bg_band->channels[i].band = IEEE80211_BAND_2GHZ; 11470 bg_band->channels[i].center_freq = geo->bg[i].freq; 11471 bg_band->channels[i].hw_value = geo->bg[i].channel; 11472 bg_band->channels[i].max_power = geo->bg[i].max_power; 11473 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) 11474 bg_band->channels[i].flags |= 11475 IEEE80211_CHAN_PASSIVE_SCAN; 11476 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS) 11477 bg_band->channels[i].flags |= 11478 IEEE80211_CHAN_NO_IBSS; 11479 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT) 11480 bg_band->channels[i].flags |= 11481 IEEE80211_CHAN_RADAR; 11482 /* No equivalent for LIBIPW_CH_80211H_RULES, 11483 LIBIPW_CH_UNIFORM_SPREADING, or 11484 LIBIPW_CH_B_ONLY... */ 11485 } 11486 /* point at bitrate info */ 11487 bg_band->bitrates = ipw2200_bg_rates; 11488 bg_band->n_bitrates = ipw2200_num_bg_rates; 11489 11490 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; 11491 } 11492 11493 /* fill-out priv->ieee->a_band */ 11494 if (geo->a_channels) { 11495 struct ieee80211_supported_band *a_band = &priv->ieee->a_band; 11496 11497 a_band->band = IEEE80211_BAND_5GHZ; 11498 a_band->n_channels = geo->a_channels; 11499 a_band->channels = 11500 kzalloc(geo->a_channels * 11501 sizeof(struct ieee80211_channel), GFP_KERNEL); 11502 /* translate geo->bg to a_band.channels */ 11503 for (i = 0; i < geo->a_channels; i++) { 11504 a_band->channels[i].band = IEEE80211_BAND_2GHZ; 11505 a_band->channels[i].center_freq = geo->a[i].freq; 11506 a_band->channels[i].hw_value = geo->a[i].channel; 11507 a_band->channels[i].max_power = geo->a[i].max_power; 11508 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) 11509 a_band->channels[i].flags |= 11510 IEEE80211_CHAN_PASSIVE_SCAN; 11511 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS) 11512 a_band->channels[i].flags |= 11513 IEEE80211_CHAN_NO_IBSS; 11514 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT) 11515 a_band->channels[i].flags |= 11516 IEEE80211_CHAN_RADAR; 11517 /* No equivalent for LIBIPW_CH_80211H_RULES, 11518 LIBIPW_CH_UNIFORM_SPREADING, or 11519 LIBIPW_CH_B_ONLY... */ 11520 } 11521 /* point at bitrate info */ 11522 a_band->bitrates = ipw2200_a_rates; 11523 a_band->n_bitrates = ipw2200_num_a_rates; 11524 11525 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; 11526 } 11527 11528 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11529 11530 /* With that information in place, we can now register the wiphy... */ 11531 if (wiphy_register(wdev->wiphy)) { 11532 rc = -EIO; 11533 goto out; 11534 } 11535 11536out: 11537 mutex_unlock(&priv->mutex); 11538 return rc; 11539} 11540 11541/* PCI driver stuff */ 11542static DEFINE_PCI_DEVICE_TABLE(card_ids) = { 11543 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0}, 11544 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0}, 11545 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0}, 11546 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0}, 11547 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0}, 11548 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0}, 11549 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0}, 11550 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0}, 11551 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0}, 11552 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0}, 11553 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0}, 11554 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0}, 11555 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0}, 11556 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0}, 11557 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0}, 11558 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0}, 11559 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0}, 11560 {PCI_VDEVICE(INTEL, 0x104f), 0}, 11561 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */ 11562 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */ 11563 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */ 11564 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */ 11565 11566 /* required last entry */ 11567 {0,} 11568}; 11569 11570MODULE_DEVICE_TABLE(pci, card_ids); 11571 11572static struct attribute *ipw_sysfs_entries[] = { 11573 &dev_attr_rf_kill.attr, 11574 &dev_attr_direct_dword.attr, 11575 &dev_attr_indirect_byte.attr, 11576 &dev_attr_indirect_dword.attr, 11577 &dev_attr_mem_gpio_reg.attr, 11578 &dev_attr_command_event_reg.attr, 11579 &dev_attr_nic_type.attr, 11580 &dev_attr_status.attr, 11581 &dev_attr_cfg.attr, 11582 &dev_attr_error.attr, 11583 &dev_attr_event_log.attr, 11584 &dev_attr_cmd_log.attr, 11585 &dev_attr_eeprom_delay.attr, 11586 &dev_attr_ucode_version.attr, 11587 &dev_attr_rtc.attr, 11588 &dev_attr_scan_age.attr, 11589 &dev_attr_led.attr, 11590 &dev_attr_speed_scan.attr, 11591 &dev_attr_net_stats.attr, 11592 &dev_attr_channels.attr, 11593#ifdef CONFIG_IPW2200_PROMISCUOUS 11594 &dev_attr_rtap_iface.attr, 11595 &dev_attr_rtap_filter.attr, 11596#endif 11597 NULL 11598}; 11599 11600static struct attribute_group ipw_attribute_group = { 11601 .name = NULL, /* put in device directory */ 11602 .attrs = ipw_sysfs_entries, 11603}; 11604 11605#ifdef CONFIG_IPW2200_PROMISCUOUS 11606static int ipw_prom_open(struct net_device *dev) 11607{ 11608 struct ipw_prom_priv *prom_priv = libipw_priv(dev); 11609 struct ipw_priv *priv = prom_priv->priv; 11610 11611 IPW_DEBUG_INFO("prom dev->open\n"); 11612 netif_carrier_off(dev); 11613 11614 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 11615 priv->sys_config.accept_all_data_frames = 1; 11616 priv->sys_config.accept_non_directed_frames = 1; 11617 priv->sys_config.accept_all_mgmt_bcpr = 1; 11618 priv->sys_config.accept_all_mgmt_frames = 1; 11619 11620 ipw_send_system_config(priv); 11621 } 11622 11623 return 0; 11624} 11625 11626static int ipw_prom_stop(struct net_device *dev) 11627{ 11628 struct ipw_prom_priv *prom_priv = libipw_priv(dev); 11629 struct ipw_priv *priv = prom_priv->priv; 11630 11631 IPW_DEBUG_INFO("prom dev->stop\n"); 11632 11633 if (priv->ieee->iw_mode != IW_MODE_MONITOR) { 11634 priv->sys_config.accept_all_data_frames = 0; 11635 priv->sys_config.accept_non_directed_frames = 0; 11636 priv->sys_config.accept_all_mgmt_bcpr = 0; 11637 priv->sys_config.accept_all_mgmt_frames = 0; 11638 11639 ipw_send_system_config(priv); 11640 } 11641 11642 return 0; 11643} 11644 11645static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb, 11646 struct net_device *dev) 11647{ 11648 IPW_DEBUG_INFO("prom dev->xmit\n"); 11649 dev_kfree_skb(skb); 11650 return NETDEV_TX_OK; 11651} 11652 11653static const struct net_device_ops ipw_prom_netdev_ops = { 11654 .ndo_open = ipw_prom_open, 11655 .ndo_stop = ipw_prom_stop, 11656 .ndo_start_xmit = ipw_prom_hard_start_xmit, 11657 .ndo_change_mtu = libipw_change_mtu, 11658 .ndo_set_mac_address = eth_mac_addr, 11659 .ndo_validate_addr = eth_validate_addr, 11660}; 11661 11662static int ipw_prom_alloc(struct ipw_priv *priv) 11663{ 11664 int rc = 0; 11665 11666 if (priv->prom_net_dev) 11667 return -EPERM; 11668 11669 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1); 11670 if (priv->prom_net_dev == NULL) 11671 return -ENOMEM; 11672 11673 priv->prom_priv = libipw_priv(priv->prom_net_dev); 11674 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev); 11675 priv->prom_priv->priv = priv; 11676 11677 strcpy(priv->prom_net_dev->name, "rtap%d"); 11678 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11679 11680 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; 11681 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops; 11682 11683 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR; 11684 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev); 11685 11686 rc = register_netdev(priv->prom_net_dev); 11687 if (rc) { 11688 free_libipw(priv->prom_net_dev, 1); 11689 priv->prom_net_dev = NULL; 11690 return rc; 11691 } 11692 11693 return 0; 11694} 11695 11696static void ipw_prom_free(struct ipw_priv *priv) 11697{ 11698 if (!priv->prom_net_dev) 11699 return; 11700 11701 unregister_netdev(priv->prom_net_dev); 11702 free_libipw(priv->prom_net_dev, 1); 11703 11704 priv->prom_net_dev = NULL; 11705} 11706 11707#endif 11708 11709static const struct net_device_ops ipw_netdev_ops = { 11710 .ndo_init = ipw_net_init, 11711 .ndo_open = ipw_net_open, 11712 .ndo_stop = ipw_net_stop, 11713 .ndo_set_multicast_list = ipw_net_set_multicast_list, 11714 .ndo_set_mac_address = ipw_net_set_mac_address, 11715 .ndo_start_xmit = libipw_xmit, 11716 .ndo_change_mtu = libipw_change_mtu, 11717 .ndo_validate_addr = eth_validate_addr, 11718}; 11719 11720static int __devinit ipw_pci_probe(struct pci_dev *pdev, 11721 const struct pci_device_id *ent) 11722{ 11723 int err = 0; 11724 struct net_device *net_dev; 11725 void __iomem *base; 11726 u32 length, val; 11727 struct ipw_priv *priv; 11728 int i; 11729 11730 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0); 11731 if (net_dev == NULL) { 11732 err = -ENOMEM; 11733 goto out; 11734 } 11735 11736 priv = libipw_priv(net_dev); 11737 priv->ieee = netdev_priv(net_dev); 11738 11739 priv->net_dev = net_dev; 11740 priv->pci_dev = pdev; 11741 ipw_debug_level = debug; 11742 spin_lock_init(&priv->irq_lock); 11743 spin_lock_init(&priv->lock); 11744 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) 11745 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); 11746 11747 mutex_init(&priv->mutex); 11748 if (pci_enable_device(pdev)) { 11749 err = -ENODEV; 11750 goto out_free_libipw; 11751 } 11752 11753 pci_set_master(pdev); 11754 11755 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 11756 if (!err) 11757 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 11758 if (err) { 11759 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); 11760 goto out_pci_disable_device; 11761 } 11762 11763 pci_set_drvdata(pdev, priv); 11764 11765 err = pci_request_regions(pdev, DRV_NAME); 11766 if (err) 11767 goto out_pci_disable_device; 11768 11769 /* We disable the RETRY_TIMEOUT register (0x41) to keep 11770 * PCI Tx retries from interfering with C3 CPU state */ 11771 pci_read_config_dword(pdev, 0x40, &val); 11772 if ((val & 0x0000ff00) != 0) 11773 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 11774 11775 length = pci_resource_len(pdev, 0); 11776 priv->hw_len = length; 11777 11778 base = pci_ioremap_bar(pdev, 0); 11779 if (!base) { 11780 err = -ENODEV; 11781 goto out_pci_release_regions; 11782 } 11783 11784 priv->hw_base = base; 11785 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length); 11786 IPW_DEBUG_INFO("pci_resource_base = %p\n", base); 11787 11788 err = ipw_setup_deferred_work(priv); 11789 if (err) { 11790 IPW_ERROR("Unable to setup deferred work\n"); 11791 goto out_iounmap; 11792 } 11793 11794 ipw_sw_reset(priv, 1); 11795 11796 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); 11797 if (err) { 11798 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); 11799 goto out_destroy_workqueue; 11800 } 11801 11802 SET_NETDEV_DEV(net_dev, &pdev->dev); 11803 11804 mutex_lock(&priv->mutex); 11805 11806 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit; 11807 priv->ieee->set_security = shim__set_security; 11808 priv->ieee->is_queue_full = ipw_net_is_queue_full; 11809 11810#ifdef CONFIG_IPW2200_QOS 11811 priv->ieee->is_qos_active = ipw_is_qos_active; 11812 priv->ieee->handle_probe_response = ipw_handle_beacon; 11813 priv->ieee->handle_beacon = ipw_handle_probe_response; 11814 priv->ieee->handle_assoc_response = ipw_handle_assoc_response; 11815#endif /* CONFIG_IPW2200_QOS */ 11816 11817 priv->ieee->perfect_rssi = -20; 11818 priv->ieee->worst_rssi = -85; 11819 11820 net_dev->netdev_ops = &ipw_netdev_ops; 11821 priv->wireless_data.spy_data = &priv->ieee->spy_data; 11822 net_dev->wireless_data = &priv->wireless_data; 11823 net_dev->wireless_handlers = &ipw_wx_handler_def; 11824 net_dev->ethtool_ops = &ipw_ethtool_ops; 11825 net_dev->irq = pdev->irq; 11826 net_dev->base_addr = (unsigned long)priv->hw_base; 11827 net_dev->mem_start = pci_resource_start(pdev, 0); 11828 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1; 11829 11830 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); 11831 if (err) { 11832 IPW_ERROR("failed to create sysfs device attributes\n"); 11833 mutex_unlock(&priv->mutex); 11834 goto out_release_irq; 11835 } 11836 11837 mutex_unlock(&priv->mutex); 11838 err = register_netdev(net_dev); 11839 if (err) { 11840 IPW_ERROR("failed to register network device\n"); 11841 goto out_remove_sysfs; 11842 } 11843 11844#ifdef CONFIG_IPW2200_PROMISCUOUS 11845 if (rtap_iface) { 11846 err = ipw_prom_alloc(priv); 11847 if (err) { 11848 IPW_ERROR("Failed to register promiscuous network " 11849 "device (error %d).\n", err); 11850 unregister_netdev(priv->net_dev); 11851 goto out_remove_sysfs; 11852 } 11853 } 11854#endif 11855 11856 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg " 11857 "channels, %d 802.11a channels)\n", 11858 priv->ieee->geo.name, priv->ieee->geo.bg_channels, 11859 priv->ieee->geo.a_channels); 11860 11861 return 0; 11862 11863 out_remove_sysfs: 11864 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11865 out_release_irq: 11866 free_irq(pdev->irq, priv); 11867 out_destroy_workqueue: 11868 destroy_workqueue(priv->workqueue); 11869 priv->workqueue = NULL; 11870 out_iounmap: 11871 iounmap(priv->hw_base); 11872 out_pci_release_regions: 11873 pci_release_regions(pdev); 11874 out_pci_disable_device: 11875 pci_disable_device(pdev); 11876 pci_set_drvdata(pdev, NULL); 11877 out_free_libipw: 11878 free_libipw(priv->net_dev, 0); 11879 out: 11880 return err; 11881} 11882 11883static void __devexit ipw_pci_remove(struct pci_dev *pdev) 11884{ 11885 struct ipw_priv *priv = pci_get_drvdata(pdev); 11886 struct list_head *p, *q; 11887 int i; 11888 11889 if (!priv) 11890 return; 11891 11892 mutex_lock(&priv->mutex); 11893 11894 priv->status |= STATUS_EXIT_PENDING; 11895 ipw_down(priv); 11896 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 11897 11898 mutex_unlock(&priv->mutex); 11899 11900 unregister_netdev(priv->net_dev); 11901 11902 if (priv->rxq) { 11903 ipw_rx_queue_free(priv, priv->rxq); 11904 priv->rxq = NULL; 11905 } 11906 ipw_tx_queue_free(priv); 11907 11908 if (priv->cmdlog) { 11909 kfree(priv->cmdlog); 11910 priv->cmdlog = NULL; 11911 } 11912 /* ipw_down will ensure that there is no more pending work 11913 * in the workqueue's, so we can safely remove them now. */ 11914 cancel_delayed_work(&priv->adhoc_check); 11915 cancel_delayed_work(&priv->gather_stats); 11916 cancel_delayed_work(&priv->request_scan); 11917 cancel_delayed_work(&priv->request_direct_scan); 11918 cancel_delayed_work(&priv->request_passive_scan); 11919 cancel_delayed_work(&priv->scan_event); 11920 cancel_delayed_work(&priv->rf_kill); 11921 cancel_delayed_work(&priv->scan_check); 11922 destroy_workqueue(priv->workqueue); 11923 priv->workqueue = NULL; 11924 11925 /* Free MAC hash list for ADHOC */ 11926 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { 11927 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { 11928 list_del(p); 11929 kfree(list_entry(p, struct ipw_ibss_seq, list)); 11930 } 11931 } 11932 11933 kfree(priv->error); 11934 priv->error = NULL; 11935 11936#ifdef CONFIG_IPW2200_PROMISCUOUS 11937 ipw_prom_free(priv); 11938#endif 11939 11940 free_irq(pdev->irq, priv); 11941 iounmap(priv->hw_base); 11942 pci_release_regions(pdev); 11943 pci_disable_device(pdev); 11944 pci_set_drvdata(pdev, NULL); 11945 /* wiphy_unregister needs to be here, before free_libipw */ 11946 wiphy_unregister(priv->ieee->wdev.wiphy); 11947 kfree(priv->ieee->a_band.channels); 11948 kfree(priv->ieee->bg_band.channels); 11949 free_libipw(priv->net_dev, 0); 11950 free_firmware(); 11951} 11952 11953#ifdef CONFIG_PM 11954static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 11955{ 11956 struct ipw_priv *priv = pci_get_drvdata(pdev); 11957 struct net_device *dev = priv->net_dev; 11958 11959 printk(KERN_INFO "%s: Going into suspend...\n", dev->name); 11960 11961 /* Take down the device; powers it off, etc. */ 11962 ipw_down(priv); 11963 11964 /* Remove the PRESENT state of the device */ 11965 netif_device_detach(dev); 11966 11967 pci_save_state(pdev); 11968 pci_disable_device(pdev); 11969 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 11970 11971 priv->suspend_at = get_seconds(); 11972 11973 return 0; 11974} 11975 11976static int ipw_pci_resume(struct pci_dev *pdev) 11977{ 11978 struct ipw_priv *priv = pci_get_drvdata(pdev); 11979 struct net_device *dev = priv->net_dev; 11980 int err; 11981 u32 val; 11982 11983 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); 11984 11985 pci_set_power_state(pdev, PCI_D0); 11986 err = pci_enable_device(pdev); 11987 if (err) { 11988 printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 11989 dev->name); 11990 return err; 11991 } 11992 pci_restore_state(pdev); 11993 11994 /* 11995 * Suspend/Resume resets the PCI configuration space, so we have to 11996 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 11997 * from interfering with C3 CPU state. pci_restore_state won't help 11998 * here since it only restores the first 64 bytes pci config header. 11999 */ 12000 pci_read_config_dword(pdev, 0x40, &val); 12001 if ((val & 0x0000ff00) != 0) 12002 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 12003 12004 /* Set the device back into the PRESENT state; this will also wake 12005 * the queue of needed */ 12006 netif_device_attach(dev); 12007 12008 priv->suspend_time = get_seconds() - priv->suspend_at; 12009 12010 /* Bring the device back up */ 12011 queue_work(priv->workqueue, &priv->up); 12012 12013 return 0; 12014} 12015#endif 12016 12017static void ipw_pci_shutdown(struct pci_dev *pdev) 12018{ 12019 struct ipw_priv *priv = pci_get_drvdata(pdev); 12020 12021 /* Take down the device; powers it off, etc. */ 12022 ipw_down(priv); 12023 12024 pci_disable_device(pdev); 12025} 12026 12027/* driver initialization stuff */ 12028static struct pci_driver ipw_driver = { 12029 .name = DRV_NAME, 12030 .id_table = card_ids, 12031 .probe = ipw_pci_probe, 12032 .remove = __devexit_p(ipw_pci_remove), 12033#ifdef CONFIG_PM 12034 .suspend = ipw_pci_suspend, 12035 .resume = ipw_pci_resume, 12036#endif 12037 .shutdown = ipw_pci_shutdown, 12038}; 12039 12040static int __init ipw_init(void) 12041{ 12042 int ret; 12043 12044 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 12045 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 12046 12047 ret = pci_register_driver(&ipw_driver); 12048 if (ret) { 12049 IPW_ERROR("Unable to initialize PCI module\n"); 12050 return ret; 12051 } 12052 12053 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level); 12054 if (ret) { 12055 IPW_ERROR("Unable to create driver sysfs file\n"); 12056 pci_unregister_driver(&ipw_driver); 12057 return ret; 12058 } 12059 12060 return ret; 12061} 12062 12063static void __exit ipw_exit(void) 12064{ 12065 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level); 12066 pci_unregister_driver(&ipw_driver); 12067} 12068 12069module_param(disable, int, 0444); 12070MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 12071 12072module_param(associate, int, 0444); 12073MODULE_PARM_DESC(associate, "auto associate when scanning (default off)"); 12074 12075module_param(auto_create, int, 0444); 12076MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); 12077 12078module_param_named(led, led_support, int, 0444); 12079MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)"); 12080 12081module_param(debug, int, 0444); 12082MODULE_PARM_DESC(debug, "debug output mask"); 12083 12084module_param_named(channel, default_channel, int, 0444); 12085MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); 12086 12087#ifdef CONFIG_IPW2200_PROMISCUOUS 12088module_param(rtap_iface, int, 0444); 12089MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)"); 12090#endif 12091 12092#ifdef CONFIG_IPW2200_QOS 12093module_param(qos_enable, int, 0444); 12094MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis"); 12095 12096module_param(qos_burst_enable, int, 0444); 12097MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode"); 12098 12099module_param(qos_no_ack_mask, int, 0444); 12100MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack"); 12101 12102module_param(burst_duration_CCK, int, 0444); 12103MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value"); 12104 12105module_param(burst_duration_OFDM, int, 0444); 12106MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value"); 12107#endif /* CONFIG_IPW2200_QOS */ 12108 12109#ifdef CONFIG_IPW2200_MONITOR 12110module_param_named(mode, network_mode, int, 0444); 12111MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); 12112#else 12113module_param_named(mode, network_mode, int, 0444); 12114MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)"); 12115#endif 12116 12117module_param(bt_coexist, int, 0444); 12118MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)"); 12119 12120module_param(hwcrypto, int, 0444); 12121MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)"); 12122 12123module_param(cmdlog, int, 0444); 12124MODULE_PARM_DESC(cmdlog, 12125 "allocate a ring buffer for logging firmware commands"); 12126 12127module_param(roaming, int, 0444); 12128MODULE_PARM_DESC(roaming, "enable roaming support (default on)"); 12129 12130module_param(antenna, int, 0444); 12131MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)"); 12132 12133module_exit(ipw_exit); 12134module_init(ipw_init); 12135