ipw2200.c revision 3505d1a9fd65e2d3e00827857b6795d9d8983658
1/******************************************************************************
2
3  Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5  802.11 status code portion of this file from ethereal-0.10.6:
6    Copyright 2000, Axis Communications AB
7    Ethereal - Network traffic analyzer
8    By Gerald Combs <gerald@ethereal.com>
9    Copyright 1998 Gerald Combs
10
11  This program is free software; you can redistribute it and/or modify it
12  under the terms of version 2 of the GNU General Public License as
13  published by the Free Software Foundation.
14
15  This program is distributed in the hope that it will be useful, but WITHOUT
16  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  more details.
19
20  You should have received a copy of the GNU General Public License along with
21  this program; if not, write to the Free Software Foundation, Inc., 59
22  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23
24  The full GNU General Public License is included in this distribution in the
25  file called LICENSE.
26
27  Contact Information:
28  Intel Linux Wireless <ilw@linux.intel.com>
29  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include <linux/sched.h>
34#include "ipw2200.h"
35
36
37#ifndef KBUILD_EXTMOD
38#define VK "k"
39#else
40#define VK
41#endif
42
43#ifdef CONFIG_IPW2200_DEBUG
44#define VD "d"
45#else
46#define VD
47#endif
48
49#ifdef CONFIG_IPW2200_MONITOR
50#define VM "m"
51#else
52#define VM
53#endif
54
55#ifdef CONFIG_IPW2200_PROMISCUOUS
56#define VP "p"
57#else
58#define VP
59#endif
60
61#ifdef CONFIG_IPW2200_RADIOTAP
62#define VR "r"
63#else
64#define VR
65#endif
66
67#ifdef CONFIG_IPW2200_QOS
68#define VQ "q"
69#else
70#define VQ
71#endif
72
73#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
74#define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
75#define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
76#define DRV_VERSION     IPW2200_VERSION
77
78#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80MODULE_DESCRIPTION(DRV_DESCRIPTION);
81MODULE_VERSION(DRV_VERSION);
82MODULE_AUTHOR(DRV_COPYRIGHT);
83MODULE_LICENSE("GPL");
84MODULE_FIRMWARE("ipw2200-ibss.fw");
85#ifdef CONFIG_IPW2200_MONITOR
86MODULE_FIRMWARE("ipw2200-sniffer.fw");
87#endif
88MODULE_FIRMWARE("ipw2200-bss.fw");
89
90static int cmdlog = 0;
91static int debug = 0;
92static int default_channel = 0;
93static int network_mode = 0;
94
95static u32 ipw_debug_level;
96static int associate;
97static int auto_create = 1;
98static int led_support = 0;
99static int disable = 0;
100static int bt_coexist = 0;
101static int hwcrypto = 0;
102static int roaming = 1;
103static const char ipw_modes[] = {
104	'a', 'b', 'g', '?'
105};
106static int antenna = CFG_SYS_ANTENNA_BOTH;
107
108#ifdef CONFIG_IPW2200_PROMISCUOUS
109static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
110#endif
111
112
113#ifdef CONFIG_IPW2200_QOS
114static int qos_enable = 0;
115static int qos_burst_enable = 0;
116static int qos_no_ack_mask = 0;
117static int burst_duration_CCK = 0;
118static int burst_duration_OFDM = 0;
119
120static struct libipw_qos_parameters def_qos_parameters_OFDM = {
121	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
122	 QOS_TX3_CW_MIN_OFDM},
123	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
124	 QOS_TX3_CW_MAX_OFDM},
125	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
126	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
127	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
128	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
129};
130
131static struct libipw_qos_parameters def_qos_parameters_CCK = {
132	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
133	 QOS_TX3_CW_MIN_CCK},
134	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
135	 QOS_TX3_CW_MAX_CCK},
136	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
137	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
138	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
139	 QOS_TX3_TXOP_LIMIT_CCK}
140};
141
142static struct libipw_qos_parameters def_parameters_OFDM = {
143	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
144	 DEF_TX3_CW_MIN_OFDM},
145	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
146	 DEF_TX3_CW_MAX_OFDM},
147	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
148	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
149	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
150	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
151};
152
153static struct libipw_qos_parameters def_parameters_CCK = {
154	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
155	 DEF_TX3_CW_MIN_CCK},
156	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
157	 DEF_TX3_CW_MAX_CCK},
158	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
159	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
160	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
161	 DEF_TX3_TXOP_LIMIT_CCK}
162};
163
164static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
165
166static int from_priority_to_tx_queue[] = {
167	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
168	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
169};
170
171static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
172
173static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
174				       *qos_param);
175static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
176				     *qos_param);
177#endif				/* CONFIG_IPW2200_QOS */
178
179static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
180static void ipw_remove_current_network(struct ipw_priv *priv);
181static void ipw_rx(struct ipw_priv *priv);
182static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
183				struct clx2_tx_queue *txq, int qindex);
184static int ipw_queue_reset(struct ipw_priv *priv);
185
186static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
187			     int len, int sync);
188
189static void ipw_tx_queue_free(struct ipw_priv *);
190
191static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
192static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
193static void ipw_rx_queue_replenish(void *);
194static int ipw_up(struct ipw_priv *);
195static void ipw_bg_up(struct work_struct *work);
196static void ipw_down(struct ipw_priv *);
197static void ipw_bg_down(struct work_struct *work);
198static int ipw_config(struct ipw_priv *);
199static int init_supported_rates(struct ipw_priv *priv,
200				struct ipw_supported_rates *prates);
201static void ipw_set_hwcrypto_keys(struct ipw_priv *);
202static void ipw_send_wep_keys(struct ipw_priv *, int);
203
204static int snprint_line(char *buf, size_t count,
205			const u8 * data, u32 len, u32 ofs)
206{
207	int out, i, j, l;
208	char c;
209
210	out = snprintf(buf, count, "%08X", ofs);
211
212	for (l = 0, i = 0; i < 2; i++) {
213		out += snprintf(buf + out, count - out, " ");
214		for (j = 0; j < 8 && l < len; j++, l++)
215			out += snprintf(buf + out, count - out, "%02X ",
216					data[(i * 8 + j)]);
217		for (; j < 8; j++)
218			out += snprintf(buf + out, count - out, "   ");
219	}
220
221	out += snprintf(buf + out, count - out, " ");
222	for (l = 0, i = 0; i < 2; i++) {
223		out += snprintf(buf + out, count - out, " ");
224		for (j = 0; j < 8 && l < len; j++, l++) {
225			c = data[(i * 8 + j)];
226			if (!isascii(c) || !isprint(c))
227				c = '.';
228
229			out += snprintf(buf + out, count - out, "%c", c);
230		}
231
232		for (; j < 8; j++)
233			out += snprintf(buf + out, count - out, " ");
234	}
235
236	return out;
237}
238
239static void printk_buf(int level, const u8 * data, u32 len)
240{
241	char line[81];
242	u32 ofs = 0;
243	if (!(ipw_debug_level & level))
244		return;
245
246	while (len) {
247		snprint_line(line, sizeof(line), &data[ofs],
248			     min(len, 16U), ofs);
249		printk(KERN_DEBUG "%s\n", line);
250		ofs += 16;
251		len -= min(len, 16U);
252	}
253}
254
255static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
256{
257	size_t out = size;
258	u32 ofs = 0;
259	int total = 0;
260
261	while (size && len) {
262		out = snprint_line(output, size, &data[ofs],
263				   min_t(size_t, len, 16U), ofs);
264
265		ofs += 16;
266		output += out;
267		size -= out;
268		len -= min_t(size_t, len, 16U);
269		total += out;
270	}
271	return total;
272}
273
274/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
275static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
276#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
277
278/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
279static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
280#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
281
282/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
283static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
284static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
285{
286	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
287		     __LINE__, (u32) (b), (u32) (c));
288	_ipw_write_reg8(a, b, c);
289}
290
291/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
292static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
293static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
294{
295	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
296		     __LINE__, (u32) (b), (u32) (c));
297	_ipw_write_reg16(a, b, c);
298}
299
300/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
301static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
302static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
303{
304	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
305		     __LINE__, (u32) (b), (u32) (c));
306	_ipw_write_reg32(a, b, c);
307}
308
309/* 8-bit direct write (low 4K) */
310static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
311		u8 val)
312{
313	writeb(val, ipw->hw_base + ofs);
314}
315
316/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
317#define ipw_write8(ipw, ofs, val) do { \
318	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
319			__LINE__, (u32)(ofs), (u32)(val)); \
320	_ipw_write8(ipw, ofs, val); \
321} while (0)
322
323/* 16-bit direct write (low 4K) */
324static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
325		u16 val)
326{
327	writew(val, ipw->hw_base + ofs);
328}
329
330/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
331#define ipw_write16(ipw, ofs, val) do { \
332	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
333			__LINE__, (u32)(ofs), (u32)(val)); \
334	_ipw_write16(ipw, ofs, val); \
335} while (0)
336
337/* 32-bit direct write (low 4K) */
338static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
339		u32 val)
340{
341	writel(val, ipw->hw_base + ofs);
342}
343
344/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
345#define ipw_write32(ipw, ofs, val) do { \
346	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
347			__LINE__, (u32)(ofs), (u32)(val)); \
348	_ipw_write32(ipw, ofs, val); \
349} while (0)
350
351/* 8-bit direct read (low 4K) */
352static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
353{
354	return readb(ipw->hw_base + ofs);
355}
356
357/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
358#define ipw_read8(ipw, ofs) ({ \
359	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
360			(u32)(ofs)); \
361	_ipw_read8(ipw, ofs); \
362})
363
364/* 16-bit direct read (low 4K) */
365static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
366{
367	return readw(ipw->hw_base + ofs);
368}
369
370/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
371#define ipw_read16(ipw, ofs) ({ \
372	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
373			(u32)(ofs)); \
374	_ipw_read16(ipw, ofs); \
375})
376
377/* 32-bit direct read (low 4K) */
378static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
379{
380	return readl(ipw->hw_base + ofs);
381}
382
383/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
384#define ipw_read32(ipw, ofs) ({ \
385	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
386			(u32)(ofs)); \
387	_ipw_read32(ipw, ofs); \
388})
389
390static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
391/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
392#define ipw_read_indirect(a, b, c, d) ({ \
393	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
394			__LINE__, (u32)(b), (u32)(d)); \
395	_ipw_read_indirect(a, b, c, d); \
396})
397
398/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
399static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
400				int num);
401#define ipw_write_indirect(a, b, c, d) do { \
402	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
403			__LINE__, (u32)(b), (u32)(d)); \
404	_ipw_write_indirect(a, b, c, d); \
405} while (0)
406
407/* 32-bit indirect write (above 4K) */
408static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
409{
410	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
411	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
412	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
413}
414
415/* 8-bit indirect write (above 4K) */
416static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
417{
418	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
419	u32 dif_len = reg - aligned_addr;
420
421	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
422	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
423	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
424}
425
426/* 16-bit indirect write (above 4K) */
427static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
428{
429	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
430	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
431
432	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
433	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
434	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
435}
436
437/* 8-bit indirect read (above 4K) */
438static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
439{
440	u32 word;
441	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
442	IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
443	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
444	return (word >> ((reg & 0x3) * 8)) & 0xff;
445}
446
447/* 32-bit indirect read (above 4K) */
448static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
449{
450	u32 value;
451
452	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
453
454	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
455	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
456	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
457	return value;
458}
459
460/* General purpose, no alignment requirement, iterative (multi-byte) read, */
461/*    for area above 1st 4K of SRAM/reg space */
462static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
463			       int num)
464{
465	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
466	u32 dif_len = addr - aligned_addr;
467	u32 i;
468
469	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
470
471	if (num <= 0) {
472		return;
473	}
474
475	/* Read the first dword (or portion) byte by byte */
476	if (unlikely(dif_len)) {
477		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
478		/* Start reading at aligned_addr + dif_len */
479		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
480			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
481		aligned_addr += 4;
482	}
483
484	/* Read all of the middle dwords as dwords, with auto-increment */
485	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
486	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
487		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
488
489	/* Read the last dword (or portion) byte by byte */
490	if (unlikely(num)) {
491		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
492		for (i = 0; num > 0; i++, num--)
493			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
494	}
495}
496
497/* General purpose, no alignment requirement, iterative (multi-byte) write, */
498/*    for area above 1st 4K of SRAM/reg space */
499static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
500				int num)
501{
502	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
503	u32 dif_len = addr - aligned_addr;
504	u32 i;
505
506	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
507
508	if (num <= 0) {
509		return;
510	}
511
512	/* Write the first dword (or portion) byte by byte */
513	if (unlikely(dif_len)) {
514		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
515		/* Start writing at aligned_addr + dif_len */
516		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
517			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
518		aligned_addr += 4;
519	}
520
521	/* Write all of the middle dwords as dwords, with auto-increment */
522	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
523	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
524		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
525
526	/* Write the last dword (or portion) byte by byte */
527	if (unlikely(num)) {
528		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
529		for (i = 0; num > 0; i++, num--, buf++)
530			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
531	}
532}
533
534/* General purpose, no alignment requirement, iterative (multi-byte) write, */
535/*    for 1st 4K of SRAM/regs space */
536static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
537			     int num)
538{
539	memcpy_toio((priv->hw_base + addr), buf, num);
540}
541
542/* Set bit(s) in low 4K of SRAM/regs */
543static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
544{
545	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
546}
547
548/* Clear bit(s) in low 4K of SRAM/regs */
549static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
550{
551	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
552}
553
554static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
555{
556	if (priv->status & STATUS_INT_ENABLED)
557		return;
558	priv->status |= STATUS_INT_ENABLED;
559	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
560}
561
562static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
563{
564	if (!(priv->status & STATUS_INT_ENABLED))
565		return;
566	priv->status &= ~STATUS_INT_ENABLED;
567	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
568}
569
570static inline void ipw_enable_interrupts(struct ipw_priv *priv)
571{
572	unsigned long flags;
573
574	spin_lock_irqsave(&priv->irq_lock, flags);
575	__ipw_enable_interrupts(priv);
576	spin_unlock_irqrestore(&priv->irq_lock, flags);
577}
578
579static inline void ipw_disable_interrupts(struct ipw_priv *priv)
580{
581	unsigned long flags;
582
583	spin_lock_irqsave(&priv->irq_lock, flags);
584	__ipw_disable_interrupts(priv);
585	spin_unlock_irqrestore(&priv->irq_lock, flags);
586}
587
588static char *ipw_error_desc(u32 val)
589{
590	switch (val) {
591	case IPW_FW_ERROR_OK:
592		return "ERROR_OK";
593	case IPW_FW_ERROR_FAIL:
594		return "ERROR_FAIL";
595	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
596		return "MEMORY_UNDERFLOW";
597	case IPW_FW_ERROR_MEMORY_OVERFLOW:
598		return "MEMORY_OVERFLOW";
599	case IPW_FW_ERROR_BAD_PARAM:
600		return "BAD_PARAM";
601	case IPW_FW_ERROR_BAD_CHECKSUM:
602		return "BAD_CHECKSUM";
603	case IPW_FW_ERROR_NMI_INTERRUPT:
604		return "NMI_INTERRUPT";
605	case IPW_FW_ERROR_BAD_DATABASE:
606		return "BAD_DATABASE";
607	case IPW_FW_ERROR_ALLOC_FAIL:
608		return "ALLOC_FAIL";
609	case IPW_FW_ERROR_DMA_UNDERRUN:
610		return "DMA_UNDERRUN";
611	case IPW_FW_ERROR_DMA_STATUS:
612		return "DMA_STATUS";
613	case IPW_FW_ERROR_DINO_ERROR:
614		return "DINO_ERROR";
615	case IPW_FW_ERROR_EEPROM_ERROR:
616		return "EEPROM_ERROR";
617	case IPW_FW_ERROR_SYSASSERT:
618		return "SYSASSERT";
619	case IPW_FW_ERROR_FATAL_ERROR:
620		return "FATAL_ERROR";
621	default:
622		return "UNKNOWN_ERROR";
623	}
624}
625
626static void ipw_dump_error_log(struct ipw_priv *priv,
627			       struct ipw_fw_error *error)
628{
629	u32 i;
630
631	if (!error) {
632		IPW_ERROR("Error allocating and capturing error log.  "
633			  "Nothing to dump.\n");
634		return;
635	}
636
637	IPW_ERROR("Start IPW Error Log Dump:\n");
638	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
639		  error->status, error->config);
640
641	for (i = 0; i < error->elem_len; i++)
642		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
643			  ipw_error_desc(error->elem[i].desc),
644			  error->elem[i].time,
645			  error->elem[i].blink1,
646			  error->elem[i].blink2,
647			  error->elem[i].link1,
648			  error->elem[i].link2, error->elem[i].data);
649	for (i = 0; i < error->log_len; i++)
650		IPW_ERROR("%i\t0x%08x\t%i\n",
651			  error->log[i].time,
652			  error->log[i].data, error->log[i].event);
653}
654
655static inline int ipw_is_init(struct ipw_priv *priv)
656{
657	return (priv->status & STATUS_INIT) ? 1 : 0;
658}
659
660static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
661{
662	u32 addr, field_info, field_len, field_count, total_len;
663
664	IPW_DEBUG_ORD("ordinal = %i\n", ord);
665
666	if (!priv || !val || !len) {
667		IPW_DEBUG_ORD("Invalid argument\n");
668		return -EINVAL;
669	}
670
671	/* verify device ordinal tables have been initialized */
672	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
673		IPW_DEBUG_ORD("Access ordinals before initialization\n");
674		return -EINVAL;
675	}
676
677	switch (IPW_ORD_TABLE_ID_MASK & ord) {
678	case IPW_ORD_TABLE_0_MASK:
679		/*
680		 * TABLE 0: Direct access to a table of 32 bit values
681		 *
682		 * This is a very simple table with the data directly
683		 * read from the table
684		 */
685
686		/* remove the table id from the ordinal */
687		ord &= IPW_ORD_TABLE_VALUE_MASK;
688
689		/* boundary check */
690		if (ord > priv->table0_len) {
691			IPW_DEBUG_ORD("ordinal value (%i) longer then "
692				      "max (%i)\n", ord, priv->table0_len);
693			return -EINVAL;
694		}
695
696		/* verify we have enough room to store the value */
697		if (*len < sizeof(u32)) {
698			IPW_DEBUG_ORD("ordinal buffer length too small, "
699				      "need %zd\n", sizeof(u32));
700			return -EINVAL;
701		}
702
703		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
704			      ord, priv->table0_addr + (ord << 2));
705
706		*len = sizeof(u32);
707		ord <<= 2;
708		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
709		break;
710
711	case IPW_ORD_TABLE_1_MASK:
712		/*
713		 * TABLE 1: Indirect access to a table of 32 bit values
714		 *
715		 * This is a fairly large table of u32 values each
716		 * representing starting addr for the data (which is
717		 * also a u32)
718		 */
719
720		/* remove the table id from the ordinal */
721		ord &= IPW_ORD_TABLE_VALUE_MASK;
722
723		/* boundary check */
724		if (ord > priv->table1_len) {
725			IPW_DEBUG_ORD("ordinal value too long\n");
726			return -EINVAL;
727		}
728
729		/* verify we have enough room to store the value */
730		if (*len < sizeof(u32)) {
731			IPW_DEBUG_ORD("ordinal buffer length too small, "
732				      "need %zd\n", sizeof(u32));
733			return -EINVAL;
734		}
735
736		*((u32 *) val) =
737		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
738		*len = sizeof(u32);
739		break;
740
741	case IPW_ORD_TABLE_2_MASK:
742		/*
743		 * TABLE 2: Indirect access to a table of variable sized values
744		 *
745		 * This table consist of six values, each containing
746		 *     - dword containing the starting offset of the data
747		 *     - dword containing the lengh in the first 16bits
748		 *       and the count in the second 16bits
749		 */
750
751		/* remove the table id from the ordinal */
752		ord &= IPW_ORD_TABLE_VALUE_MASK;
753
754		/* boundary check */
755		if (ord > priv->table2_len) {
756			IPW_DEBUG_ORD("ordinal value too long\n");
757			return -EINVAL;
758		}
759
760		/* get the address of statistic */
761		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
762
763		/* get the second DW of statistics ;
764		 * two 16-bit words - first is length, second is count */
765		field_info =
766		    ipw_read_reg32(priv,
767				   priv->table2_addr + (ord << 3) +
768				   sizeof(u32));
769
770		/* get each entry length */
771		field_len = *((u16 *) & field_info);
772
773		/* get number of entries */
774		field_count = *(((u16 *) & field_info) + 1);
775
776		/* abort if not enought memory */
777		total_len = field_len * field_count;
778		if (total_len > *len) {
779			*len = total_len;
780			return -EINVAL;
781		}
782
783		*len = total_len;
784		if (!total_len)
785			return 0;
786
787		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
788			      "field_info = 0x%08x\n",
789			      addr, total_len, field_info);
790		ipw_read_indirect(priv, addr, val, total_len);
791		break;
792
793	default:
794		IPW_DEBUG_ORD("Invalid ordinal!\n");
795		return -EINVAL;
796
797	}
798
799	return 0;
800}
801
802static void ipw_init_ordinals(struct ipw_priv *priv)
803{
804	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
805	priv->table0_len = ipw_read32(priv, priv->table0_addr);
806
807	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
808		      priv->table0_addr, priv->table0_len);
809
810	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
811	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
812
813	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
814		      priv->table1_addr, priv->table1_len);
815
816	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
817	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
818	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
819
820	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
821		      priv->table2_addr, priv->table2_len);
822
823}
824
825static u32 ipw_register_toggle(u32 reg)
826{
827	reg &= ~IPW_START_STANDBY;
828	if (reg & IPW_GATE_ODMA)
829		reg &= ~IPW_GATE_ODMA;
830	if (reg & IPW_GATE_IDMA)
831		reg &= ~IPW_GATE_IDMA;
832	if (reg & IPW_GATE_ADMA)
833		reg &= ~IPW_GATE_ADMA;
834	return reg;
835}
836
837/*
838 * LED behavior:
839 * - On radio ON, turn on any LEDs that require to be on during start
840 * - On initialization, start unassociated blink
841 * - On association, disable unassociated blink
842 * - On disassociation, start unassociated blink
843 * - On radio OFF, turn off any LEDs started during radio on
844 *
845 */
846#define LD_TIME_LINK_ON msecs_to_jiffies(300)
847#define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
848#define LD_TIME_ACT_ON msecs_to_jiffies(250)
849
850static void ipw_led_link_on(struct ipw_priv *priv)
851{
852	unsigned long flags;
853	u32 led;
854
855	/* If configured to not use LEDs, or nic_type is 1,
856	 * then we don't toggle a LINK led */
857	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
858		return;
859
860	spin_lock_irqsave(&priv->lock, flags);
861
862	if (!(priv->status & STATUS_RF_KILL_MASK) &&
863	    !(priv->status & STATUS_LED_LINK_ON)) {
864		IPW_DEBUG_LED("Link LED On\n");
865		led = ipw_read_reg32(priv, IPW_EVENT_REG);
866		led |= priv->led_association_on;
867
868		led = ipw_register_toggle(led);
869
870		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
871		ipw_write_reg32(priv, IPW_EVENT_REG, led);
872
873		priv->status |= STATUS_LED_LINK_ON;
874
875		/* If we aren't associated, schedule turning the LED off */
876		if (!(priv->status & STATUS_ASSOCIATED))
877			queue_delayed_work(priv->workqueue,
878					   &priv->led_link_off,
879					   LD_TIME_LINK_ON);
880	}
881
882	spin_unlock_irqrestore(&priv->lock, flags);
883}
884
885static void ipw_bg_led_link_on(struct work_struct *work)
886{
887	struct ipw_priv *priv =
888		container_of(work, struct ipw_priv, led_link_on.work);
889	mutex_lock(&priv->mutex);
890	ipw_led_link_on(priv);
891	mutex_unlock(&priv->mutex);
892}
893
894static void ipw_led_link_off(struct ipw_priv *priv)
895{
896	unsigned long flags;
897	u32 led;
898
899	/* If configured not to use LEDs, or nic type is 1,
900	 * then we don't goggle the LINK led. */
901	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
902		return;
903
904	spin_lock_irqsave(&priv->lock, flags);
905
906	if (priv->status & STATUS_LED_LINK_ON) {
907		led = ipw_read_reg32(priv, IPW_EVENT_REG);
908		led &= priv->led_association_off;
909		led = ipw_register_toggle(led);
910
911		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
912		ipw_write_reg32(priv, IPW_EVENT_REG, led);
913
914		IPW_DEBUG_LED("Link LED Off\n");
915
916		priv->status &= ~STATUS_LED_LINK_ON;
917
918		/* If we aren't associated and the radio is on, schedule
919		 * turning the LED on (blink while unassociated) */
920		if (!(priv->status & STATUS_RF_KILL_MASK) &&
921		    !(priv->status & STATUS_ASSOCIATED))
922			queue_delayed_work(priv->workqueue, &priv->led_link_on,
923					   LD_TIME_LINK_OFF);
924
925	}
926
927	spin_unlock_irqrestore(&priv->lock, flags);
928}
929
930static void ipw_bg_led_link_off(struct work_struct *work)
931{
932	struct ipw_priv *priv =
933		container_of(work, struct ipw_priv, led_link_off.work);
934	mutex_lock(&priv->mutex);
935	ipw_led_link_off(priv);
936	mutex_unlock(&priv->mutex);
937}
938
939static void __ipw_led_activity_on(struct ipw_priv *priv)
940{
941	u32 led;
942
943	if (priv->config & CFG_NO_LED)
944		return;
945
946	if (priv->status & STATUS_RF_KILL_MASK)
947		return;
948
949	if (!(priv->status & STATUS_LED_ACT_ON)) {
950		led = ipw_read_reg32(priv, IPW_EVENT_REG);
951		led |= priv->led_activity_on;
952
953		led = ipw_register_toggle(led);
954
955		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
956		ipw_write_reg32(priv, IPW_EVENT_REG, led);
957
958		IPW_DEBUG_LED("Activity LED On\n");
959
960		priv->status |= STATUS_LED_ACT_ON;
961
962		cancel_delayed_work(&priv->led_act_off);
963		queue_delayed_work(priv->workqueue, &priv->led_act_off,
964				   LD_TIME_ACT_ON);
965	} else {
966		/* Reschedule LED off for full time period */
967		cancel_delayed_work(&priv->led_act_off);
968		queue_delayed_work(priv->workqueue, &priv->led_act_off,
969				   LD_TIME_ACT_ON);
970	}
971}
972
973#if 0
974void ipw_led_activity_on(struct ipw_priv *priv)
975{
976	unsigned long flags;
977	spin_lock_irqsave(&priv->lock, flags);
978	__ipw_led_activity_on(priv);
979	spin_unlock_irqrestore(&priv->lock, flags);
980}
981#endif  /*  0  */
982
983static void ipw_led_activity_off(struct ipw_priv *priv)
984{
985	unsigned long flags;
986	u32 led;
987
988	if (priv->config & CFG_NO_LED)
989		return;
990
991	spin_lock_irqsave(&priv->lock, flags);
992
993	if (priv->status & STATUS_LED_ACT_ON) {
994		led = ipw_read_reg32(priv, IPW_EVENT_REG);
995		led &= priv->led_activity_off;
996
997		led = ipw_register_toggle(led);
998
999		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1000		ipw_write_reg32(priv, IPW_EVENT_REG, led);
1001
1002		IPW_DEBUG_LED("Activity LED Off\n");
1003
1004		priv->status &= ~STATUS_LED_ACT_ON;
1005	}
1006
1007	spin_unlock_irqrestore(&priv->lock, flags);
1008}
1009
1010static void ipw_bg_led_activity_off(struct work_struct *work)
1011{
1012	struct ipw_priv *priv =
1013		container_of(work, struct ipw_priv, led_act_off.work);
1014	mutex_lock(&priv->mutex);
1015	ipw_led_activity_off(priv);
1016	mutex_unlock(&priv->mutex);
1017}
1018
1019static void ipw_led_band_on(struct ipw_priv *priv)
1020{
1021	unsigned long flags;
1022	u32 led;
1023
1024	/* Only nic type 1 supports mode LEDs */
1025	if (priv->config & CFG_NO_LED ||
1026	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1027		return;
1028
1029	spin_lock_irqsave(&priv->lock, flags);
1030
1031	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1032	if (priv->assoc_network->mode == IEEE_A) {
1033		led |= priv->led_ofdm_on;
1034		led &= priv->led_association_off;
1035		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1036	} else if (priv->assoc_network->mode == IEEE_G) {
1037		led |= priv->led_ofdm_on;
1038		led |= priv->led_association_on;
1039		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1040	} else {
1041		led &= priv->led_ofdm_off;
1042		led |= priv->led_association_on;
1043		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1044	}
1045
1046	led = ipw_register_toggle(led);
1047
1048	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1049	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1050
1051	spin_unlock_irqrestore(&priv->lock, flags);
1052}
1053
1054static void ipw_led_band_off(struct ipw_priv *priv)
1055{
1056	unsigned long flags;
1057	u32 led;
1058
1059	/* Only nic type 1 supports mode LEDs */
1060	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1061		return;
1062
1063	spin_lock_irqsave(&priv->lock, flags);
1064
1065	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1066	led &= priv->led_ofdm_off;
1067	led &= priv->led_association_off;
1068
1069	led = ipw_register_toggle(led);
1070
1071	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1072	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1073
1074	spin_unlock_irqrestore(&priv->lock, flags);
1075}
1076
1077static void ipw_led_radio_on(struct ipw_priv *priv)
1078{
1079	ipw_led_link_on(priv);
1080}
1081
1082static void ipw_led_radio_off(struct ipw_priv *priv)
1083{
1084	ipw_led_activity_off(priv);
1085	ipw_led_link_off(priv);
1086}
1087
1088static void ipw_led_link_up(struct ipw_priv *priv)
1089{
1090	/* Set the Link Led on for all nic types */
1091	ipw_led_link_on(priv);
1092}
1093
1094static void ipw_led_link_down(struct ipw_priv *priv)
1095{
1096	ipw_led_activity_off(priv);
1097	ipw_led_link_off(priv);
1098
1099	if (priv->status & STATUS_RF_KILL_MASK)
1100		ipw_led_radio_off(priv);
1101}
1102
1103static void ipw_led_init(struct ipw_priv *priv)
1104{
1105	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1106
1107	/* Set the default PINs for the link and activity leds */
1108	priv->led_activity_on = IPW_ACTIVITY_LED;
1109	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1110
1111	priv->led_association_on = IPW_ASSOCIATED_LED;
1112	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1113
1114	/* Set the default PINs for the OFDM leds */
1115	priv->led_ofdm_on = IPW_OFDM_LED;
1116	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1117
1118	switch (priv->nic_type) {
1119	case EEPROM_NIC_TYPE_1:
1120		/* In this NIC type, the LEDs are reversed.... */
1121		priv->led_activity_on = IPW_ASSOCIATED_LED;
1122		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1123		priv->led_association_on = IPW_ACTIVITY_LED;
1124		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1125
1126		if (!(priv->config & CFG_NO_LED))
1127			ipw_led_band_on(priv);
1128
1129		/* And we don't blink link LEDs for this nic, so
1130		 * just return here */
1131		return;
1132
1133	case EEPROM_NIC_TYPE_3:
1134	case EEPROM_NIC_TYPE_2:
1135	case EEPROM_NIC_TYPE_4:
1136	case EEPROM_NIC_TYPE_0:
1137		break;
1138
1139	default:
1140		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1141			       priv->nic_type);
1142		priv->nic_type = EEPROM_NIC_TYPE_0;
1143		break;
1144	}
1145
1146	if (!(priv->config & CFG_NO_LED)) {
1147		if (priv->status & STATUS_ASSOCIATED)
1148			ipw_led_link_on(priv);
1149		else
1150			ipw_led_link_off(priv);
1151	}
1152}
1153
1154static void ipw_led_shutdown(struct ipw_priv *priv)
1155{
1156	ipw_led_activity_off(priv);
1157	ipw_led_link_off(priv);
1158	ipw_led_band_off(priv);
1159	cancel_delayed_work(&priv->led_link_on);
1160	cancel_delayed_work(&priv->led_link_off);
1161	cancel_delayed_work(&priv->led_act_off);
1162}
1163
1164/*
1165 * The following adds a new attribute to the sysfs representation
1166 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1167 * used for controling the debug level.
1168 *
1169 * See the level definitions in ipw for details.
1170 */
1171static ssize_t show_debug_level(struct device_driver *d, char *buf)
1172{
1173	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1174}
1175
1176static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1177				 size_t count)
1178{
1179	char *p = (char *)buf;
1180	u32 val;
1181
1182	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1183		p++;
1184		if (p[0] == 'x' || p[0] == 'X')
1185			p++;
1186		val = simple_strtoul(p, &p, 16);
1187	} else
1188		val = simple_strtoul(p, &p, 10);
1189	if (p == buf)
1190		printk(KERN_INFO DRV_NAME
1191		       ": %s is not in hex or decimal form.\n", buf);
1192	else
1193		ipw_debug_level = val;
1194
1195	return strnlen(buf, count);
1196}
1197
1198static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1199		   show_debug_level, store_debug_level);
1200
1201static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1202{
1203	/* length = 1st dword in log */
1204	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1205}
1206
1207static void ipw_capture_event_log(struct ipw_priv *priv,
1208				  u32 log_len, struct ipw_event *log)
1209{
1210	u32 base;
1211
1212	if (log_len) {
1213		base = ipw_read32(priv, IPW_EVENT_LOG);
1214		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1215				  (u8 *) log, sizeof(*log) * log_len);
1216	}
1217}
1218
1219static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1220{
1221	struct ipw_fw_error *error;
1222	u32 log_len = ipw_get_event_log_len(priv);
1223	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1224	u32 elem_len = ipw_read_reg32(priv, base);
1225
1226	error = kmalloc(sizeof(*error) +
1227			sizeof(*error->elem) * elem_len +
1228			sizeof(*error->log) * log_len, GFP_ATOMIC);
1229	if (!error) {
1230		IPW_ERROR("Memory allocation for firmware error log "
1231			  "failed.\n");
1232		return NULL;
1233	}
1234	error->jiffies = jiffies;
1235	error->status = priv->status;
1236	error->config = priv->config;
1237	error->elem_len = elem_len;
1238	error->log_len = log_len;
1239	error->elem = (struct ipw_error_elem *)error->payload;
1240	error->log = (struct ipw_event *)(error->elem + elem_len);
1241
1242	ipw_capture_event_log(priv, log_len, error->log);
1243
1244	if (elem_len)
1245		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1246				  sizeof(*error->elem) * elem_len);
1247
1248	return error;
1249}
1250
1251static ssize_t show_event_log(struct device *d,
1252			      struct device_attribute *attr, char *buf)
1253{
1254	struct ipw_priv *priv = dev_get_drvdata(d);
1255	u32 log_len = ipw_get_event_log_len(priv);
1256	u32 log_size;
1257	struct ipw_event *log;
1258	u32 len = 0, i;
1259
1260	/* not using min() because of its strict type checking */
1261	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1262			sizeof(*log) * log_len : PAGE_SIZE;
1263	log = kzalloc(log_size, GFP_KERNEL);
1264	if (!log) {
1265		IPW_ERROR("Unable to allocate memory for log\n");
1266		return 0;
1267	}
1268	log_len = log_size / sizeof(*log);
1269	ipw_capture_event_log(priv, log_len, log);
1270
1271	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1272	for (i = 0; i < log_len; i++)
1273		len += snprintf(buf + len, PAGE_SIZE - len,
1274				"\n%08X%08X%08X",
1275				log[i].time, log[i].event, log[i].data);
1276	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1277	kfree(log);
1278	return len;
1279}
1280
1281static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1282
1283static ssize_t show_error(struct device *d,
1284			  struct device_attribute *attr, char *buf)
1285{
1286	struct ipw_priv *priv = dev_get_drvdata(d);
1287	u32 len = 0, i;
1288	if (!priv->error)
1289		return 0;
1290	len += snprintf(buf + len, PAGE_SIZE - len,
1291			"%08lX%08X%08X%08X",
1292			priv->error->jiffies,
1293			priv->error->status,
1294			priv->error->config, priv->error->elem_len);
1295	for (i = 0; i < priv->error->elem_len; i++)
1296		len += snprintf(buf + len, PAGE_SIZE - len,
1297				"\n%08X%08X%08X%08X%08X%08X%08X",
1298				priv->error->elem[i].time,
1299				priv->error->elem[i].desc,
1300				priv->error->elem[i].blink1,
1301				priv->error->elem[i].blink2,
1302				priv->error->elem[i].link1,
1303				priv->error->elem[i].link2,
1304				priv->error->elem[i].data);
1305
1306	len += snprintf(buf + len, PAGE_SIZE - len,
1307			"\n%08X", priv->error->log_len);
1308	for (i = 0; i < priv->error->log_len; i++)
1309		len += snprintf(buf + len, PAGE_SIZE - len,
1310				"\n%08X%08X%08X",
1311				priv->error->log[i].time,
1312				priv->error->log[i].event,
1313				priv->error->log[i].data);
1314	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1315	return len;
1316}
1317
1318static ssize_t clear_error(struct device *d,
1319			   struct device_attribute *attr,
1320			   const char *buf, size_t count)
1321{
1322	struct ipw_priv *priv = dev_get_drvdata(d);
1323
1324	kfree(priv->error);
1325	priv->error = NULL;
1326	return count;
1327}
1328
1329static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1330
1331static ssize_t show_cmd_log(struct device *d,
1332			    struct device_attribute *attr, char *buf)
1333{
1334	struct ipw_priv *priv = dev_get_drvdata(d);
1335	u32 len = 0, i;
1336	if (!priv->cmdlog)
1337		return 0;
1338	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1339	     (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1340	     i = (i + 1) % priv->cmdlog_len) {
1341		len +=
1342		    snprintf(buf + len, PAGE_SIZE - len,
1343			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1344			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1345			     priv->cmdlog[i].cmd.len);
1346		len +=
1347		    snprintk_buf(buf + len, PAGE_SIZE - len,
1348				 (u8 *) priv->cmdlog[i].cmd.param,
1349				 priv->cmdlog[i].cmd.len);
1350		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1351	}
1352	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1353	return len;
1354}
1355
1356static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1357
1358#ifdef CONFIG_IPW2200_PROMISCUOUS
1359static void ipw_prom_free(struct ipw_priv *priv);
1360static int ipw_prom_alloc(struct ipw_priv *priv);
1361static ssize_t store_rtap_iface(struct device *d,
1362			 struct device_attribute *attr,
1363			 const char *buf, size_t count)
1364{
1365	struct ipw_priv *priv = dev_get_drvdata(d);
1366	int rc = 0;
1367
1368	if (count < 1)
1369		return -EINVAL;
1370
1371	switch (buf[0]) {
1372	case '0':
1373		if (!rtap_iface)
1374			return count;
1375
1376		if (netif_running(priv->prom_net_dev)) {
1377			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1378			return count;
1379		}
1380
1381		ipw_prom_free(priv);
1382		rtap_iface = 0;
1383		break;
1384
1385	case '1':
1386		if (rtap_iface)
1387			return count;
1388
1389		rc = ipw_prom_alloc(priv);
1390		if (!rc)
1391			rtap_iface = 1;
1392		break;
1393
1394	default:
1395		return -EINVAL;
1396	}
1397
1398	if (rc) {
1399		IPW_ERROR("Failed to register promiscuous network "
1400			  "device (error %d).\n", rc);
1401	}
1402
1403	return count;
1404}
1405
1406static ssize_t show_rtap_iface(struct device *d,
1407			struct device_attribute *attr,
1408			char *buf)
1409{
1410	struct ipw_priv *priv = dev_get_drvdata(d);
1411	if (rtap_iface)
1412		return sprintf(buf, "%s", priv->prom_net_dev->name);
1413	else {
1414		buf[0] = '-';
1415		buf[1] = '1';
1416		buf[2] = '\0';
1417		return 3;
1418	}
1419}
1420
1421static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1422		   store_rtap_iface);
1423
1424static ssize_t store_rtap_filter(struct device *d,
1425			 struct device_attribute *attr,
1426			 const char *buf, size_t count)
1427{
1428	struct ipw_priv *priv = dev_get_drvdata(d);
1429
1430	if (!priv->prom_priv) {
1431		IPW_ERROR("Attempting to set filter without "
1432			  "rtap_iface enabled.\n");
1433		return -EPERM;
1434	}
1435
1436	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1437
1438	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1439		       BIT_ARG16(priv->prom_priv->filter));
1440
1441	return count;
1442}
1443
1444static ssize_t show_rtap_filter(struct device *d,
1445			struct device_attribute *attr,
1446			char *buf)
1447{
1448	struct ipw_priv *priv = dev_get_drvdata(d);
1449	return sprintf(buf, "0x%04X",
1450		       priv->prom_priv ? priv->prom_priv->filter : 0);
1451}
1452
1453static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1454		   store_rtap_filter);
1455#endif
1456
1457static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1458			     char *buf)
1459{
1460	struct ipw_priv *priv = dev_get_drvdata(d);
1461	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1462}
1463
1464static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1465			      const char *buf, size_t count)
1466{
1467	struct ipw_priv *priv = dev_get_drvdata(d);
1468	struct net_device *dev = priv->net_dev;
1469	char buffer[] = "00000000";
1470	unsigned long len =
1471	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1472	unsigned long val;
1473	char *p = buffer;
1474
1475	IPW_DEBUG_INFO("enter\n");
1476
1477	strncpy(buffer, buf, len);
1478	buffer[len] = 0;
1479
1480	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1481		p++;
1482		if (p[0] == 'x' || p[0] == 'X')
1483			p++;
1484		val = simple_strtoul(p, &p, 16);
1485	} else
1486		val = simple_strtoul(p, &p, 10);
1487	if (p == buffer) {
1488		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1489	} else {
1490		priv->ieee->scan_age = val;
1491		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1492	}
1493
1494	IPW_DEBUG_INFO("exit\n");
1495	return len;
1496}
1497
1498static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1499
1500static ssize_t show_led(struct device *d, struct device_attribute *attr,
1501			char *buf)
1502{
1503	struct ipw_priv *priv = dev_get_drvdata(d);
1504	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1505}
1506
1507static ssize_t store_led(struct device *d, struct device_attribute *attr,
1508			 const char *buf, size_t count)
1509{
1510	struct ipw_priv *priv = dev_get_drvdata(d);
1511
1512	IPW_DEBUG_INFO("enter\n");
1513
1514	if (count == 0)
1515		return 0;
1516
1517	if (*buf == 0) {
1518		IPW_DEBUG_LED("Disabling LED control.\n");
1519		priv->config |= CFG_NO_LED;
1520		ipw_led_shutdown(priv);
1521	} else {
1522		IPW_DEBUG_LED("Enabling LED control.\n");
1523		priv->config &= ~CFG_NO_LED;
1524		ipw_led_init(priv);
1525	}
1526
1527	IPW_DEBUG_INFO("exit\n");
1528	return count;
1529}
1530
1531static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1532
1533static ssize_t show_status(struct device *d,
1534			   struct device_attribute *attr, char *buf)
1535{
1536	struct ipw_priv *p = dev_get_drvdata(d);
1537	return sprintf(buf, "0x%08x\n", (int)p->status);
1538}
1539
1540static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1541
1542static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1543			char *buf)
1544{
1545	struct ipw_priv *p = dev_get_drvdata(d);
1546	return sprintf(buf, "0x%08x\n", (int)p->config);
1547}
1548
1549static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1550
1551static ssize_t show_nic_type(struct device *d,
1552			     struct device_attribute *attr, char *buf)
1553{
1554	struct ipw_priv *priv = dev_get_drvdata(d);
1555	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1556}
1557
1558static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1559
1560static ssize_t show_ucode_version(struct device *d,
1561				  struct device_attribute *attr, char *buf)
1562{
1563	u32 len = sizeof(u32), tmp = 0;
1564	struct ipw_priv *p = dev_get_drvdata(d);
1565
1566	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1567		return 0;
1568
1569	return sprintf(buf, "0x%08x\n", tmp);
1570}
1571
1572static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1573
1574static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1575			char *buf)
1576{
1577	u32 len = sizeof(u32), tmp = 0;
1578	struct ipw_priv *p = dev_get_drvdata(d);
1579
1580	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1581		return 0;
1582
1583	return sprintf(buf, "0x%08x\n", tmp);
1584}
1585
1586static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1587
1588/*
1589 * Add a device attribute to view/control the delay between eeprom
1590 * operations.
1591 */
1592static ssize_t show_eeprom_delay(struct device *d,
1593				 struct device_attribute *attr, char *buf)
1594{
1595	struct ipw_priv *p = dev_get_drvdata(d);
1596	int n = p->eeprom_delay;
1597	return sprintf(buf, "%i\n", n);
1598}
1599static ssize_t store_eeprom_delay(struct device *d,
1600				  struct device_attribute *attr,
1601				  const char *buf, size_t count)
1602{
1603	struct ipw_priv *p = dev_get_drvdata(d);
1604	sscanf(buf, "%i", &p->eeprom_delay);
1605	return strnlen(buf, count);
1606}
1607
1608static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1609		   show_eeprom_delay, store_eeprom_delay);
1610
1611static ssize_t show_command_event_reg(struct device *d,
1612				      struct device_attribute *attr, char *buf)
1613{
1614	u32 reg = 0;
1615	struct ipw_priv *p = dev_get_drvdata(d);
1616
1617	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1618	return sprintf(buf, "0x%08x\n", reg);
1619}
1620static ssize_t store_command_event_reg(struct device *d,
1621				       struct device_attribute *attr,
1622				       const char *buf, size_t count)
1623{
1624	u32 reg;
1625	struct ipw_priv *p = dev_get_drvdata(d);
1626
1627	sscanf(buf, "%x", &reg);
1628	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1629	return strnlen(buf, count);
1630}
1631
1632static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1633		   show_command_event_reg, store_command_event_reg);
1634
1635static ssize_t show_mem_gpio_reg(struct device *d,
1636				 struct device_attribute *attr, char *buf)
1637{
1638	u32 reg = 0;
1639	struct ipw_priv *p = dev_get_drvdata(d);
1640
1641	reg = ipw_read_reg32(p, 0x301100);
1642	return sprintf(buf, "0x%08x\n", reg);
1643}
1644static ssize_t store_mem_gpio_reg(struct device *d,
1645				  struct device_attribute *attr,
1646				  const char *buf, size_t count)
1647{
1648	u32 reg;
1649	struct ipw_priv *p = dev_get_drvdata(d);
1650
1651	sscanf(buf, "%x", &reg);
1652	ipw_write_reg32(p, 0x301100, reg);
1653	return strnlen(buf, count);
1654}
1655
1656static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1657		   show_mem_gpio_reg, store_mem_gpio_reg);
1658
1659static ssize_t show_indirect_dword(struct device *d,
1660				   struct device_attribute *attr, char *buf)
1661{
1662	u32 reg = 0;
1663	struct ipw_priv *priv = dev_get_drvdata(d);
1664
1665	if (priv->status & STATUS_INDIRECT_DWORD)
1666		reg = ipw_read_reg32(priv, priv->indirect_dword);
1667	else
1668		reg = 0;
1669
1670	return sprintf(buf, "0x%08x\n", reg);
1671}
1672static ssize_t store_indirect_dword(struct device *d,
1673				    struct device_attribute *attr,
1674				    const char *buf, size_t count)
1675{
1676	struct ipw_priv *priv = dev_get_drvdata(d);
1677
1678	sscanf(buf, "%x", &priv->indirect_dword);
1679	priv->status |= STATUS_INDIRECT_DWORD;
1680	return strnlen(buf, count);
1681}
1682
1683static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1684		   show_indirect_dword, store_indirect_dword);
1685
1686static ssize_t show_indirect_byte(struct device *d,
1687				  struct device_attribute *attr, char *buf)
1688{
1689	u8 reg = 0;
1690	struct ipw_priv *priv = dev_get_drvdata(d);
1691
1692	if (priv->status & STATUS_INDIRECT_BYTE)
1693		reg = ipw_read_reg8(priv, priv->indirect_byte);
1694	else
1695		reg = 0;
1696
1697	return sprintf(buf, "0x%02x\n", reg);
1698}
1699static ssize_t store_indirect_byte(struct device *d,
1700				   struct device_attribute *attr,
1701				   const char *buf, size_t count)
1702{
1703	struct ipw_priv *priv = dev_get_drvdata(d);
1704
1705	sscanf(buf, "%x", &priv->indirect_byte);
1706	priv->status |= STATUS_INDIRECT_BYTE;
1707	return strnlen(buf, count);
1708}
1709
1710static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1711		   show_indirect_byte, store_indirect_byte);
1712
1713static ssize_t show_direct_dword(struct device *d,
1714				 struct device_attribute *attr, char *buf)
1715{
1716	u32 reg = 0;
1717	struct ipw_priv *priv = dev_get_drvdata(d);
1718
1719	if (priv->status & STATUS_DIRECT_DWORD)
1720		reg = ipw_read32(priv, priv->direct_dword);
1721	else
1722		reg = 0;
1723
1724	return sprintf(buf, "0x%08x\n", reg);
1725}
1726static ssize_t store_direct_dword(struct device *d,
1727				  struct device_attribute *attr,
1728				  const char *buf, size_t count)
1729{
1730	struct ipw_priv *priv = dev_get_drvdata(d);
1731
1732	sscanf(buf, "%x", &priv->direct_dword);
1733	priv->status |= STATUS_DIRECT_DWORD;
1734	return strnlen(buf, count);
1735}
1736
1737static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1738		   show_direct_dword, store_direct_dword);
1739
1740static int rf_kill_active(struct ipw_priv *priv)
1741{
1742	if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1743		priv->status |= STATUS_RF_KILL_HW;
1744	else
1745		priv->status &= ~STATUS_RF_KILL_HW;
1746
1747	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1748}
1749
1750static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1751			    char *buf)
1752{
1753	/* 0 - RF kill not enabled
1754	   1 - SW based RF kill active (sysfs)
1755	   2 - HW based RF kill active
1756	   3 - Both HW and SW baed RF kill active */
1757	struct ipw_priv *priv = dev_get_drvdata(d);
1758	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1759	    (rf_kill_active(priv) ? 0x2 : 0x0);
1760	return sprintf(buf, "%i\n", val);
1761}
1762
1763static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1764{
1765	if ((disable_radio ? 1 : 0) ==
1766	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1767		return 0;
1768
1769	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1770			  disable_radio ? "OFF" : "ON");
1771
1772	if (disable_radio) {
1773		priv->status |= STATUS_RF_KILL_SW;
1774
1775		if (priv->workqueue) {
1776			cancel_delayed_work(&priv->request_scan);
1777			cancel_delayed_work(&priv->request_direct_scan);
1778			cancel_delayed_work(&priv->request_passive_scan);
1779			cancel_delayed_work(&priv->scan_event);
1780		}
1781		queue_work(priv->workqueue, &priv->down);
1782	} else {
1783		priv->status &= ~STATUS_RF_KILL_SW;
1784		if (rf_kill_active(priv)) {
1785			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1786					  "disabled by HW switch\n");
1787			/* Make sure the RF_KILL check timer is running */
1788			cancel_delayed_work(&priv->rf_kill);
1789			queue_delayed_work(priv->workqueue, &priv->rf_kill,
1790					   round_jiffies_relative(2 * HZ));
1791		} else
1792			queue_work(priv->workqueue, &priv->up);
1793	}
1794
1795	return 1;
1796}
1797
1798static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1799			     const char *buf, size_t count)
1800{
1801	struct ipw_priv *priv = dev_get_drvdata(d);
1802
1803	ipw_radio_kill_sw(priv, buf[0] == '1');
1804
1805	return count;
1806}
1807
1808static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1809
1810static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1811			       char *buf)
1812{
1813	struct ipw_priv *priv = dev_get_drvdata(d);
1814	int pos = 0, len = 0;
1815	if (priv->config & CFG_SPEED_SCAN) {
1816		while (priv->speed_scan[pos] != 0)
1817			len += sprintf(&buf[len], "%d ",
1818				       priv->speed_scan[pos++]);
1819		return len + sprintf(&buf[len], "\n");
1820	}
1821
1822	return sprintf(buf, "0\n");
1823}
1824
1825static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1826				const char *buf, size_t count)
1827{
1828	struct ipw_priv *priv = dev_get_drvdata(d);
1829	int channel, pos = 0;
1830	const char *p = buf;
1831
1832	/* list of space separated channels to scan, optionally ending with 0 */
1833	while ((channel = simple_strtol(p, NULL, 0))) {
1834		if (pos == MAX_SPEED_SCAN - 1) {
1835			priv->speed_scan[pos] = 0;
1836			break;
1837		}
1838
1839		if (libipw_is_valid_channel(priv->ieee, channel))
1840			priv->speed_scan[pos++] = channel;
1841		else
1842			IPW_WARNING("Skipping invalid channel request: %d\n",
1843				    channel);
1844		p = strchr(p, ' ');
1845		if (!p)
1846			break;
1847		while (*p == ' ' || *p == '\t')
1848			p++;
1849	}
1850
1851	if (pos == 0)
1852		priv->config &= ~CFG_SPEED_SCAN;
1853	else {
1854		priv->speed_scan_pos = 0;
1855		priv->config |= CFG_SPEED_SCAN;
1856	}
1857
1858	return count;
1859}
1860
1861static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1862		   store_speed_scan);
1863
1864static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1865			      char *buf)
1866{
1867	struct ipw_priv *priv = dev_get_drvdata(d);
1868	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1869}
1870
1871static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1872			       const char *buf, size_t count)
1873{
1874	struct ipw_priv *priv = dev_get_drvdata(d);
1875	if (buf[0] == '1')
1876		priv->config |= CFG_NET_STATS;
1877	else
1878		priv->config &= ~CFG_NET_STATS;
1879
1880	return count;
1881}
1882
1883static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1884		   show_net_stats, store_net_stats);
1885
1886static ssize_t show_channels(struct device *d,
1887			     struct device_attribute *attr,
1888			     char *buf)
1889{
1890	struct ipw_priv *priv = dev_get_drvdata(d);
1891	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1892	int len = 0, i;
1893
1894	len = sprintf(&buf[len],
1895		      "Displaying %d channels in 2.4Ghz band "
1896		      "(802.11bg):\n", geo->bg_channels);
1897
1898	for (i = 0; i < geo->bg_channels; i++) {
1899		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1900			       geo->bg[i].channel,
1901			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1902			       " (radar spectrum)" : "",
1903			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1904				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1905			       ? "" : ", IBSS",
1906			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1907			       "passive only" : "active/passive",
1908			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1909			       "B" : "B/G");
1910	}
1911
1912	len += sprintf(&buf[len],
1913		       "Displaying %d channels in 5.2Ghz band "
1914		       "(802.11a):\n", geo->a_channels);
1915	for (i = 0; i < geo->a_channels; i++) {
1916		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1917			       geo->a[i].channel,
1918			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1919			       " (radar spectrum)" : "",
1920			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1921				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1922			       ? "" : ", IBSS",
1923			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1924			       "passive only" : "active/passive");
1925	}
1926
1927	return len;
1928}
1929
1930static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1931
1932static void notify_wx_assoc_event(struct ipw_priv *priv)
1933{
1934	union iwreq_data wrqu;
1935	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1936	if (priv->status & STATUS_ASSOCIATED)
1937		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1938	else
1939		memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1940	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1941}
1942
1943static void ipw_irq_tasklet(struct ipw_priv *priv)
1944{
1945	u32 inta, inta_mask, handled = 0;
1946	unsigned long flags;
1947	int rc = 0;
1948
1949	spin_lock_irqsave(&priv->irq_lock, flags);
1950
1951	inta = ipw_read32(priv, IPW_INTA_RW);
1952	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1953	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1954
1955	/* Add any cached INTA values that need to be handled */
1956	inta |= priv->isr_inta;
1957
1958	spin_unlock_irqrestore(&priv->irq_lock, flags);
1959
1960	spin_lock_irqsave(&priv->lock, flags);
1961
1962	/* handle all the justifications for the interrupt */
1963	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1964		ipw_rx(priv);
1965		handled |= IPW_INTA_BIT_RX_TRANSFER;
1966	}
1967
1968	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1969		IPW_DEBUG_HC("Command completed.\n");
1970		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1971		priv->status &= ~STATUS_HCMD_ACTIVE;
1972		wake_up_interruptible(&priv->wait_command_queue);
1973		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1974	}
1975
1976	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1977		IPW_DEBUG_TX("TX_QUEUE_1\n");
1978		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1979		handled |= IPW_INTA_BIT_TX_QUEUE_1;
1980	}
1981
1982	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1983		IPW_DEBUG_TX("TX_QUEUE_2\n");
1984		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1985		handled |= IPW_INTA_BIT_TX_QUEUE_2;
1986	}
1987
1988	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1989		IPW_DEBUG_TX("TX_QUEUE_3\n");
1990		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1991		handled |= IPW_INTA_BIT_TX_QUEUE_3;
1992	}
1993
1994	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1995		IPW_DEBUG_TX("TX_QUEUE_4\n");
1996		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1997		handled |= IPW_INTA_BIT_TX_QUEUE_4;
1998	}
1999
2000	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2001		IPW_WARNING("STATUS_CHANGE\n");
2002		handled |= IPW_INTA_BIT_STATUS_CHANGE;
2003	}
2004
2005	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2006		IPW_WARNING("TX_PERIOD_EXPIRED\n");
2007		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2008	}
2009
2010	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2011		IPW_WARNING("HOST_CMD_DONE\n");
2012		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2013	}
2014
2015	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2016		IPW_WARNING("FW_INITIALIZATION_DONE\n");
2017		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2018	}
2019
2020	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2021		IPW_WARNING("PHY_OFF_DONE\n");
2022		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2023	}
2024
2025	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2026		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2027		priv->status |= STATUS_RF_KILL_HW;
2028		wake_up_interruptible(&priv->wait_command_queue);
2029		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2030		cancel_delayed_work(&priv->request_scan);
2031		cancel_delayed_work(&priv->request_direct_scan);
2032		cancel_delayed_work(&priv->request_passive_scan);
2033		cancel_delayed_work(&priv->scan_event);
2034		schedule_work(&priv->link_down);
2035		queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2036		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2037	}
2038
2039	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2040		IPW_WARNING("Firmware error detected.  Restarting.\n");
2041		if (priv->error) {
2042			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2043			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2044				struct ipw_fw_error *error =
2045				    ipw_alloc_error_log(priv);
2046				ipw_dump_error_log(priv, error);
2047				kfree(error);
2048			}
2049		} else {
2050			priv->error = ipw_alloc_error_log(priv);
2051			if (priv->error)
2052				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2053			else
2054				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2055					     "log.\n");
2056			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2057				ipw_dump_error_log(priv, priv->error);
2058		}
2059
2060		/* XXX: If hardware encryption is for WPA/WPA2,
2061		 * we have to notify the supplicant. */
2062		if (priv->ieee->sec.encrypt) {
2063			priv->status &= ~STATUS_ASSOCIATED;
2064			notify_wx_assoc_event(priv);
2065		}
2066
2067		/* Keep the restart process from trying to send host
2068		 * commands by clearing the INIT status bit */
2069		priv->status &= ~STATUS_INIT;
2070
2071		/* Cancel currently queued command. */
2072		priv->status &= ~STATUS_HCMD_ACTIVE;
2073		wake_up_interruptible(&priv->wait_command_queue);
2074
2075		queue_work(priv->workqueue, &priv->adapter_restart);
2076		handled |= IPW_INTA_BIT_FATAL_ERROR;
2077	}
2078
2079	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2080		IPW_ERROR("Parity error\n");
2081		handled |= IPW_INTA_BIT_PARITY_ERROR;
2082	}
2083
2084	if (handled != inta) {
2085		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2086	}
2087
2088	spin_unlock_irqrestore(&priv->lock, flags);
2089
2090	/* enable all interrupts */
2091	ipw_enable_interrupts(priv);
2092}
2093
2094#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2095static char *get_cmd_string(u8 cmd)
2096{
2097	switch (cmd) {
2098		IPW_CMD(HOST_COMPLETE);
2099		IPW_CMD(POWER_DOWN);
2100		IPW_CMD(SYSTEM_CONFIG);
2101		IPW_CMD(MULTICAST_ADDRESS);
2102		IPW_CMD(SSID);
2103		IPW_CMD(ADAPTER_ADDRESS);
2104		IPW_CMD(PORT_TYPE);
2105		IPW_CMD(RTS_THRESHOLD);
2106		IPW_CMD(FRAG_THRESHOLD);
2107		IPW_CMD(POWER_MODE);
2108		IPW_CMD(WEP_KEY);
2109		IPW_CMD(TGI_TX_KEY);
2110		IPW_CMD(SCAN_REQUEST);
2111		IPW_CMD(SCAN_REQUEST_EXT);
2112		IPW_CMD(ASSOCIATE);
2113		IPW_CMD(SUPPORTED_RATES);
2114		IPW_CMD(SCAN_ABORT);
2115		IPW_CMD(TX_FLUSH);
2116		IPW_CMD(QOS_PARAMETERS);
2117		IPW_CMD(DINO_CONFIG);
2118		IPW_CMD(RSN_CAPABILITIES);
2119		IPW_CMD(RX_KEY);
2120		IPW_CMD(CARD_DISABLE);
2121		IPW_CMD(SEED_NUMBER);
2122		IPW_CMD(TX_POWER);
2123		IPW_CMD(COUNTRY_INFO);
2124		IPW_CMD(AIRONET_INFO);
2125		IPW_CMD(AP_TX_POWER);
2126		IPW_CMD(CCKM_INFO);
2127		IPW_CMD(CCX_VER_INFO);
2128		IPW_CMD(SET_CALIBRATION);
2129		IPW_CMD(SENSITIVITY_CALIB);
2130		IPW_CMD(RETRY_LIMIT);
2131		IPW_CMD(IPW_PRE_POWER_DOWN);
2132		IPW_CMD(VAP_BEACON_TEMPLATE);
2133		IPW_CMD(VAP_DTIM_PERIOD);
2134		IPW_CMD(EXT_SUPPORTED_RATES);
2135		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2136		IPW_CMD(VAP_QUIET_INTERVALS);
2137		IPW_CMD(VAP_CHANNEL_SWITCH);
2138		IPW_CMD(VAP_MANDATORY_CHANNELS);
2139		IPW_CMD(VAP_CELL_PWR_LIMIT);
2140		IPW_CMD(VAP_CF_PARAM_SET);
2141		IPW_CMD(VAP_SET_BEACONING_STATE);
2142		IPW_CMD(MEASUREMENT);
2143		IPW_CMD(POWER_CAPABILITY);
2144		IPW_CMD(SUPPORTED_CHANNELS);
2145		IPW_CMD(TPC_REPORT);
2146		IPW_CMD(WME_INFO);
2147		IPW_CMD(PRODUCTION_COMMAND);
2148	default:
2149		return "UNKNOWN";
2150	}
2151}
2152
2153#define HOST_COMPLETE_TIMEOUT HZ
2154
2155static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2156{
2157	int rc = 0;
2158	unsigned long flags;
2159
2160	spin_lock_irqsave(&priv->lock, flags);
2161	if (priv->status & STATUS_HCMD_ACTIVE) {
2162		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2163			  get_cmd_string(cmd->cmd));
2164		spin_unlock_irqrestore(&priv->lock, flags);
2165		return -EAGAIN;
2166	}
2167
2168	priv->status |= STATUS_HCMD_ACTIVE;
2169
2170	if (priv->cmdlog) {
2171		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2172		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2173		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2174		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2175		       cmd->len);
2176		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2177	}
2178
2179	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2180		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2181		     priv->status);
2182
2183#ifndef DEBUG_CMD_WEP_KEY
2184	if (cmd->cmd == IPW_CMD_WEP_KEY)
2185		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2186	else
2187#endif
2188		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2189
2190	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2191	if (rc) {
2192		priv->status &= ~STATUS_HCMD_ACTIVE;
2193		IPW_ERROR("Failed to send %s: Reason %d\n",
2194			  get_cmd_string(cmd->cmd), rc);
2195		spin_unlock_irqrestore(&priv->lock, flags);
2196		goto exit;
2197	}
2198	spin_unlock_irqrestore(&priv->lock, flags);
2199
2200	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2201					      !(priv->
2202						status & STATUS_HCMD_ACTIVE),
2203					      HOST_COMPLETE_TIMEOUT);
2204	if (rc == 0) {
2205		spin_lock_irqsave(&priv->lock, flags);
2206		if (priv->status & STATUS_HCMD_ACTIVE) {
2207			IPW_ERROR("Failed to send %s: Command timed out.\n",
2208				  get_cmd_string(cmd->cmd));
2209			priv->status &= ~STATUS_HCMD_ACTIVE;
2210			spin_unlock_irqrestore(&priv->lock, flags);
2211			rc = -EIO;
2212			goto exit;
2213		}
2214		spin_unlock_irqrestore(&priv->lock, flags);
2215	} else
2216		rc = 0;
2217
2218	if (priv->status & STATUS_RF_KILL_HW) {
2219		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2220			  get_cmd_string(cmd->cmd));
2221		rc = -EIO;
2222		goto exit;
2223	}
2224
2225      exit:
2226	if (priv->cmdlog) {
2227		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2228		priv->cmdlog_pos %= priv->cmdlog_len;
2229	}
2230	return rc;
2231}
2232
2233static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2234{
2235	struct host_cmd cmd = {
2236		.cmd = command,
2237	};
2238
2239	return __ipw_send_cmd(priv, &cmd);
2240}
2241
2242static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2243			    void *data)
2244{
2245	struct host_cmd cmd = {
2246		.cmd = command,
2247		.len = len,
2248		.param = data,
2249	};
2250
2251	return __ipw_send_cmd(priv, &cmd);
2252}
2253
2254static int ipw_send_host_complete(struct ipw_priv *priv)
2255{
2256	if (!priv) {
2257		IPW_ERROR("Invalid args\n");
2258		return -1;
2259	}
2260
2261	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2262}
2263
2264static int ipw_send_system_config(struct ipw_priv *priv)
2265{
2266	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2267				sizeof(priv->sys_config),
2268				&priv->sys_config);
2269}
2270
2271static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2272{
2273	if (!priv || !ssid) {
2274		IPW_ERROR("Invalid args\n");
2275		return -1;
2276	}
2277
2278	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2279				ssid);
2280}
2281
2282static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2283{
2284	if (!priv || !mac) {
2285		IPW_ERROR("Invalid args\n");
2286		return -1;
2287	}
2288
2289	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2290		       priv->net_dev->name, mac);
2291
2292	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2293}
2294
2295/*
2296 * NOTE: This must be executed from our workqueue as it results in udelay
2297 * being called which may corrupt the keyboard if executed on default
2298 * workqueue
2299 */
2300static void ipw_adapter_restart(void *adapter)
2301{
2302	struct ipw_priv *priv = adapter;
2303
2304	if (priv->status & STATUS_RF_KILL_MASK)
2305		return;
2306
2307	ipw_down(priv);
2308
2309	if (priv->assoc_network &&
2310	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2311		ipw_remove_current_network(priv);
2312
2313	if (ipw_up(priv)) {
2314		IPW_ERROR("Failed to up device\n");
2315		return;
2316	}
2317}
2318
2319static void ipw_bg_adapter_restart(struct work_struct *work)
2320{
2321	struct ipw_priv *priv =
2322		container_of(work, struct ipw_priv, adapter_restart);
2323	mutex_lock(&priv->mutex);
2324	ipw_adapter_restart(priv);
2325	mutex_unlock(&priv->mutex);
2326}
2327
2328#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2329
2330static void ipw_scan_check(void *data)
2331{
2332	struct ipw_priv *priv = data;
2333	if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2334		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2335			       "adapter after (%dms).\n",
2336			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2337		queue_work(priv->workqueue, &priv->adapter_restart);
2338	}
2339}
2340
2341static void ipw_bg_scan_check(struct work_struct *work)
2342{
2343	struct ipw_priv *priv =
2344		container_of(work, struct ipw_priv, scan_check.work);
2345	mutex_lock(&priv->mutex);
2346	ipw_scan_check(priv);
2347	mutex_unlock(&priv->mutex);
2348}
2349
2350static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2351				     struct ipw_scan_request_ext *request)
2352{
2353	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2354				sizeof(*request), request);
2355}
2356
2357static int ipw_send_scan_abort(struct ipw_priv *priv)
2358{
2359	if (!priv) {
2360		IPW_ERROR("Invalid args\n");
2361		return -1;
2362	}
2363
2364	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2365}
2366
2367static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2368{
2369	struct ipw_sensitivity_calib calib = {
2370		.beacon_rssi_raw = cpu_to_le16(sens),
2371	};
2372
2373	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2374				&calib);
2375}
2376
2377static int ipw_send_associate(struct ipw_priv *priv,
2378			      struct ipw_associate *associate)
2379{
2380	if (!priv || !associate) {
2381		IPW_ERROR("Invalid args\n");
2382		return -1;
2383	}
2384
2385	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2386				associate);
2387}
2388
2389static int ipw_send_supported_rates(struct ipw_priv *priv,
2390				    struct ipw_supported_rates *rates)
2391{
2392	if (!priv || !rates) {
2393		IPW_ERROR("Invalid args\n");
2394		return -1;
2395	}
2396
2397	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2398				rates);
2399}
2400
2401static int ipw_set_random_seed(struct ipw_priv *priv)
2402{
2403	u32 val;
2404
2405	if (!priv) {
2406		IPW_ERROR("Invalid args\n");
2407		return -1;
2408	}
2409
2410	get_random_bytes(&val, sizeof(val));
2411
2412	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2413}
2414
2415static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2416{
2417	__le32 v = cpu_to_le32(phy_off);
2418	if (!priv) {
2419		IPW_ERROR("Invalid args\n");
2420		return -1;
2421	}
2422
2423	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2424}
2425
2426static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2427{
2428	if (!priv || !power) {
2429		IPW_ERROR("Invalid args\n");
2430		return -1;
2431	}
2432
2433	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2434}
2435
2436static int ipw_set_tx_power(struct ipw_priv *priv)
2437{
2438	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2439	struct ipw_tx_power tx_power;
2440	s8 max_power;
2441	int i;
2442
2443	memset(&tx_power, 0, sizeof(tx_power));
2444
2445	/* configure device for 'G' band */
2446	tx_power.ieee_mode = IPW_G_MODE;
2447	tx_power.num_channels = geo->bg_channels;
2448	for (i = 0; i < geo->bg_channels; i++) {
2449		max_power = geo->bg[i].max_power;
2450		tx_power.channels_tx_power[i].channel_number =
2451		    geo->bg[i].channel;
2452		tx_power.channels_tx_power[i].tx_power = max_power ?
2453		    min(max_power, priv->tx_power) : priv->tx_power;
2454	}
2455	if (ipw_send_tx_power(priv, &tx_power))
2456		return -EIO;
2457
2458	/* configure device to also handle 'B' band */
2459	tx_power.ieee_mode = IPW_B_MODE;
2460	if (ipw_send_tx_power(priv, &tx_power))
2461		return -EIO;
2462
2463	/* configure device to also handle 'A' band */
2464	if (priv->ieee->abg_true) {
2465		tx_power.ieee_mode = IPW_A_MODE;
2466		tx_power.num_channels = geo->a_channels;
2467		for (i = 0; i < tx_power.num_channels; i++) {
2468			max_power = geo->a[i].max_power;
2469			tx_power.channels_tx_power[i].channel_number =
2470			    geo->a[i].channel;
2471			tx_power.channels_tx_power[i].tx_power = max_power ?
2472			    min(max_power, priv->tx_power) : priv->tx_power;
2473		}
2474		if (ipw_send_tx_power(priv, &tx_power))
2475			return -EIO;
2476	}
2477	return 0;
2478}
2479
2480static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2481{
2482	struct ipw_rts_threshold rts_threshold = {
2483		.rts_threshold = cpu_to_le16(rts),
2484	};
2485
2486	if (!priv) {
2487		IPW_ERROR("Invalid args\n");
2488		return -1;
2489	}
2490
2491	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2492				sizeof(rts_threshold), &rts_threshold);
2493}
2494
2495static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2496{
2497	struct ipw_frag_threshold frag_threshold = {
2498		.frag_threshold = cpu_to_le16(frag),
2499	};
2500
2501	if (!priv) {
2502		IPW_ERROR("Invalid args\n");
2503		return -1;
2504	}
2505
2506	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2507				sizeof(frag_threshold), &frag_threshold);
2508}
2509
2510static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2511{
2512	__le32 param;
2513
2514	if (!priv) {
2515		IPW_ERROR("Invalid args\n");
2516		return -1;
2517	}
2518
2519	/* If on battery, set to 3, if AC set to CAM, else user
2520	 * level */
2521	switch (mode) {
2522	case IPW_POWER_BATTERY:
2523		param = cpu_to_le32(IPW_POWER_INDEX_3);
2524		break;
2525	case IPW_POWER_AC:
2526		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2527		break;
2528	default:
2529		param = cpu_to_le32(mode);
2530		break;
2531	}
2532
2533	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2534				&param);
2535}
2536
2537static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2538{
2539	struct ipw_retry_limit retry_limit = {
2540		.short_retry_limit = slimit,
2541		.long_retry_limit = llimit
2542	};
2543
2544	if (!priv) {
2545		IPW_ERROR("Invalid args\n");
2546		return -1;
2547	}
2548
2549	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2550				&retry_limit);
2551}
2552
2553/*
2554 * The IPW device contains a Microwire compatible EEPROM that stores
2555 * various data like the MAC address.  Usually the firmware has exclusive
2556 * access to the eeprom, but during device initialization (before the
2557 * device driver has sent the HostComplete command to the firmware) the
2558 * device driver has read access to the EEPROM by way of indirect addressing
2559 * through a couple of memory mapped registers.
2560 *
2561 * The following is a simplified implementation for pulling data out of the
2562 * the eeprom, along with some helper functions to find information in
2563 * the per device private data's copy of the eeprom.
2564 *
2565 * NOTE: To better understand how these functions work (i.e what is a chip
2566 *       select and why do have to keep driving the eeprom clock?), read
2567 *       just about any data sheet for a Microwire compatible EEPROM.
2568 */
2569
2570/* write a 32 bit value into the indirect accessor register */
2571static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2572{
2573	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2574
2575	/* the eeprom requires some time to complete the operation */
2576	udelay(p->eeprom_delay);
2577
2578	return;
2579}
2580
2581/* perform a chip select operation */
2582static void eeprom_cs(struct ipw_priv *priv)
2583{
2584	eeprom_write_reg(priv, 0);
2585	eeprom_write_reg(priv, EEPROM_BIT_CS);
2586	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2587	eeprom_write_reg(priv, EEPROM_BIT_CS);
2588}
2589
2590/* perform a chip select operation */
2591static void eeprom_disable_cs(struct ipw_priv *priv)
2592{
2593	eeprom_write_reg(priv, EEPROM_BIT_CS);
2594	eeprom_write_reg(priv, 0);
2595	eeprom_write_reg(priv, EEPROM_BIT_SK);
2596}
2597
2598/* push a single bit down to the eeprom */
2599static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2600{
2601	int d = (bit ? EEPROM_BIT_DI : 0);
2602	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2603	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2604}
2605
2606/* push an opcode followed by an address down to the eeprom */
2607static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2608{
2609	int i;
2610
2611	eeprom_cs(priv);
2612	eeprom_write_bit(priv, 1);
2613	eeprom_write_bit(priv, op & 2);
2614	eeprom_write_bit(priv, op & 1);
2615	for (i = 7; i >= 0; i--) {
2616		eeprom_write_bit(priv, addr & (1 << i));
2617	}
2618}
2619
2620/* pull 16 bits off the eeprom, one bit at a time */
2621static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2622{
2623	int i;
2624	u16 r = 0;
2625
2626	/* Send READ Opcode */
2627	eeprom_op(priv, EEPROM_CMD_READ, addr);
2628
2629	/* Send dummy bit */
2630	eeprom_write_reg(priv, EEPROM_BIT_CS);
2631
2632	/* Read the byte off the eeprom one bit at a time */
2633	for (i = 0; i < 16; i++) {
2634		u32 data = 0;
2635		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2636		eeprom_write_reg(priv, EEPROM_BIT_CS);
2637		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2638		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2639	}
2640
2641	/* Send another dummy bit */
2642	eeprom_write_reg(priv, 0);
2643	eeprom_disable_cs(priv);
2644
2645	return r;
2646}
2647
2648/* helper function for pulling the mac address out of the private */
2649/* data's copy of the eeprom data                                 */
2650static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2651{
2652	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2653}
2654
2655/*
2656 * Either the device driver (i.e. the host) or the firmware can
2657 * load eeprom data into the designated region in SRAM.  If neither
2658 * happens then the FW will shutdown with a fatal error.
2659 *
2660 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2661 * bit needs region of shared SRAM needs to be non-zero.
2662 */
2663static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2664{
2665	int i;
2666	__le16 *eeprom = (__le16 *) priv->eeprom;
2667
2668	IPW_DEBUG_TRACE(">>\n");
2669
2670	/* read entire contents of eeprom into private buffer */
2671	for (i = 0; i < 128; i++)
2672		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2673
2674	/*
2675	   If the data looks correct, then copy it to our private
2676	   copy.  Otherwise let the firmware know to perform the operation
2677	   on its own.
2678	 */
2679	if (priv->eeprom[EEPROM_VERSION] != 0) {
2680		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2681
2682		/* write the eeprom data to sram */
2683		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2684			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2685
2686		/* Do not load eeprom data on fatal error or suspend */
2687		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2688	} else {
2689		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2690
2691		/* Load eeprom data on fatal error or suspend */
2692		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2693	}
2694
2695	IPW_DEBUG_TRACE("<<\n");
2696}
2697
2698static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2699{
2700	count >>= 2;
2701	if (!count)
2702		return;
2703	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2704	while (count--)
2705		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2706}
2707
2708static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2709{
2710	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2711			CB_NUMBER_OF_ELEMENTS_SMALL *
2712			sizeof(struct command_block));
2713}
2714
2715static int ipw_fw_dma_enable(struct ipw_priv *priv)
2716{				/* start dma engine but no transfers yet */
2717
2718	IPW_DEBUG_FW(">> : \n");
2719
2720	/* Start the dma */
2721	ipw_fw_dma_reset_command_blocks(priv);
2722
2723	/* Write CB base address */
2724	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2725
2726	IPW_DEBUG_FW("<< : \n");
2727	return 0;
2728}
2729
2730static void ipw_fw_dma_abort(struct ipw_priv *priv)
2731{
2732	u32 control = 0;
2733
2734	IPW_DEBUG_FW(">> :\n");
2735
2736	/* set the Stop and Abort bit */
2737	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2738	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2739	priv->sram_desc.last_cb_index = 0;
2740
2741	IPW_DEBUG_FW("<< \n");
2742}
2743
2744static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2745					  struct command_block *cb)
2746{
2747	u32 address =
2748	    IPW_SHARED_SRAM_DMA_CONTROL +
2749	    (sizeof(struct command_block) * index);
2750	IPW_DEBUG_FW(">> :\n");
2751
2752	ipw_write_indirect(priv, address, (u8 *) cb,
2753			   (int)sizeof(struct command_block));
2754
2755	IPW_DEBUG_FW("<< :\n");
2756	return 0;
2757
2758}
2759
2760static int ipw_fw_dma_kick(struct ipw_priv *priv)
2761{
2762	u32 control = 0;
2763	u32 index = 0;
2764
2765	IPW_DEBUG_FW(">> :\n");
2766
2767	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2768		ipw_fw_dma_write_command_block(priv, index,
2769					       &priv->sram_desc.cb_list[index]);
2770
2771	/* Enable the DMA in the CSR register */
2772	ipw_clear_bit(priv, IPW_RESET_REG,
2773		      IPW_RESET_REG_MASTER_DISABLED |
2774		      IPW_RESET_REG_STOP_MASTER);
2775
2776	/* Set the Start bit. */
2777	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2778	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2779
2780	IPW_DEBUG_FW("<< :\n");
2781	return 0;
2782}
2783
2784static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2785{
2786	u32 address;
2787	u32 register_value = 0;
2788	u32 cb_fields_address = 0;
2789
2790	IPW_DEBUG_FW(">> :\n");
2791	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2792	IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2793
2794	/* Read the DMA Controlor register */
2795	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2796	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2797
2798	/* Print the CB values */
2799	cb_fields_address = address;
2800	register_value = ipw_read_reg32(priv, cb_fields_address);
2801	IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2802
2803	cb_fields_address += sizeof(u32);
2804	register_value = ipw_read_reg32(priv, cb_fields_address);
2805	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2806
2807	cb_fields_address += sizeof(u32);
2808	register_value = ipw_read_reg32(priv, cb_fields_address);
2809	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2810			  register_value);
2811
2812	cb_fields_address += sizeof(u32);
2813	register_value = ipw_read_reg32(priv, cb_fields_address);
2814	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2815
2816	IPW_DEBUG_FW(">> :\n");
2817}
2818
2819static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2820{
2821	u32 current_cb_address = 0;
2822	u32 current_cb_index = 0;
2823
2824	IPW_DEBUG_FW("<< :\n");
2825	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2826
2827	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2828	    sizeof(struct command_block);
2829
2830	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2831			  current_cb_index, current_cb_address);
2832
2833	IPW_DEBUG_FW(">> :\n");
2834	return current_cb_index;
2835
2836}
2837
2838static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2839					u32 src_address,
2840					u32 dest_address,
2841					u32 length,
2842					int interrupt_enabled, int is_last)
2843{
2844
2845	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2846	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2847	    CB_DEST_SIZE_LONG;
2848	struct command_block *cb;
2849	u32 last_cb_element = 0;
2850
2851	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2852			  src_address, dest_address, length);
2853
2854	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2855		return -1;
2856
2857	last_cb_element = priv->sram_desc.last_cb_index;
2858	cb = &priv->sram_desc.cb_list[last_cb_element];
2859	priv->sram_desc.last_cb_index++;
2860
2861	/* Calculate the new CB control word */
2862	if (interrupt_enabled)
2863		control |= CB_INT_ENABLED;
2864
2865	if (is_last)
2866		control |= CB_LAST_VALID;
2867
2868	control |= length;
2869
2870	/* Calculate the CB Element's checksum value */
2871	cb->status = control ^ src_address ^ dest_address;
2872
2873	/* Copy the Source and Destination addresses */
2874	cb->dest_addr = dest_address;
2875	cb->source_addr = src_address;
2876
2877	/* Copy the Control Word last */
2878	cb->control = control;
2879
2880	return 0;
2881}
2882
2883static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2884				 int nr, u32 dest_address, u32 len)
2885{
2886	int ret, i;
2887	u32 size;
2888
2889	IPW_DEBUG_FW(">> \n");
2890	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2891			  nr, dest_address, len);
2892
2893	for (i = 0; i < nr; i++) {
2894		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2895		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2896						   dest_address +
2897						   i * CB_MAX_LENGTH, size,
2898						   0, 0);
2899		if (ret) {
2900			IPW_DEBUG_FW_INFO(": Failed\n");
2901			return -1;
2902		} else
2903			IPW_DEBUG_FW_INFO(": Added new cb\n");
2904	}
2905
2906	IPW_DEBUG_FW("<< \n");
2907	return 0;
2908}
2909
2910static int ipw_fw_dma_wait(struct ipw_priv *priv)
2911{
2912	u32 current_index = 0, previous_index;
2913	u32 watchdog = 0;
2914
2915	IPW_DEBUG_FW(">> : \n");
2916
2917	current_index = ipw_fw_dma_command_block_index(priv);
2918	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2919			  (int)priv->sram_desc.last_cb_index);
2920
2921	while (current_index < priv->sram_desc.last_cb_index) {
2922		udelay(50);
2923		previous_index = current_index;
2924		current_index = ipw_fw_dma_command_block_index(priv);
2925
2926		if (previous_index < current_index) {
2927			watchdog = 0;
2928			continue;
2929		}
2930		if (++watchdog > 400) {
2931			IPW_DEBUG_FW_INFO("Timeout\n");
2932			ipw_fw_dma_dump_command_block(priv);
2933			ipw_fw_dma_abort(priv);
2934			return -1;
2935		}
2936	}
2937
2938	ipw_fw_dma_abort(priv);
2939
2940	/*Disable the DMA in the CSR register */
2941	ipw_set_bit(priv, IPW_RESET_REG,
2942		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2943
2944	IPW_DEBUG_FW("<< dmaWaitSync \n");
2945	return 0;
2946}
2947
2948static void ipw_remove_current_network(struct ipw_priv *priv)
2949{
2950	struct list_head *element, *safe;
2951	struct libipw_network *network = NULL;
2952	unsigned long flags;
2953
2954	spin_lock_irqsave(&priv->ieee->lock, flags);
2955	list_for_each_safe(element, safe, &priv->ieee->network_list) {
2956		network = list_entry(element, struct libipw_network, list);
2957		if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2958			list_del(element);
2959			list_add_tail(&network->list,
2960				      &priv->ieee->network_free_list);
2961		}
2962	}
2963	spin_unlock_irqrestore(&priv->ieee->lock, flags);
2964}
2965
2966/**
2967 * Check that card is still alive.
2968 * Reads debug register from domain0.
2969 * If card is present, pre-defined value should
2970 * be found there.
2971 *
2972 * @param priv
2973 * @return 1 if card is present, 0 otherwise
2974 */
2975static inline int ipw_alive(struct ipw_priv *priv)
2976{
2977	return ipw_read32(priv, 0x90) == 0xd55555d5;
2978}
2979
2980/* timeout in msec, attempted in 10-msec quanta */
2981static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2982			       int timeout)
2983{
2984	int i = 0;
2985
2986	do {
2987		if ((ipw_read32(priv, addr) & mask) == mask)
2988			return i;
2989		mdelay(10);
2990		i += 10;
2991	} while (i < timeout);
2992
2993	return -ETIME;
2994}
2995
2996/* These functions load the firmware and micro code for the operation of
2997 * the ipw hardware.  It assumes the buffer has all the bits for the
2998 * image and the caller is handling the memory allocation and clean up.
2999 */
3000
3001static int ipw_stop_master(struct ipw_priv *priv)
3002{
3003	int rc;
3004
3005	IPW_DEBUG_TRACE(">> \n");
3006	/* stop master. typical delay - 0 */
3007	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3008
3009	/* timeout is in msec, polled in 10-msec quanta */
3010	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3011			  IPW_RESET_REG_MASTER_DISABLED, 100);
3012	if (rc < 0) {
3013		IPW_ERROR("wait for stop master failed after 100ms\n");
3014		return -1;
3015	}
3016
3017	IPW_DEBUG_INFO("stop master %dms\n", rc);
3018
3019	return rc;
3020}
3021
3022static void ipw_arc_release(struct ipw_priv *priv)
3023{
3024	IPW_DEBUG_TRACE(">> \n");
3025	mdelay(5);
3026
3027	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3028
3029	/* no one knows timing, for safety add some delay */
3030	mdelay(5);
3031}
3032
3033struct fw_chunk {
3034	__le32 address;
3035	__le32 length;
3036};
3037
3038static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3039{
3040	int rc = 0, i, addr;
3041	u8 cr = 0;
3042	__le16 *image;
3043
3044	image = (__le16 *) data;
3045
3046	IPW_DEBUG_TRACE(">> \n");
3047
3048	rc = ipw_stop_master(priv);
3049
3050	if (rc < 0)
3051		return rc;
3052
3053	for (addr = IPW_SHARED_LOWER_BOUND;
3054	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3055		ipw_write32(priv, addr, 0);
3056	}
3057
3058	/* no ucode (yet) */
3059	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3060	/* destroy DMA queues */
3061	/* reset sequence */
3062
3063	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3064	ipw_arc_release(priv);
3065	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3066	mdelay(1);
3067
3068	/* reset PHY */
3069	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3070	mdelay(1);
3071
3072	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3073	mdelay(1);
3074
3075	/* enable ucode store */
3076	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3077	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3078	mdelay(1);
3079
3080	/* write ucode */
3081	/**
3082	 * @bug
3083	 * Do NOT set indirect address register once and then
3084	 * store data to indirect data register in the loop.
3085	 * It seems very reasonable, but in this case DINO do not
3086	 * accept ucode. It is essential to set address each time.
3087	 */
3088	/* load new ipw uCode */
3089	for (i = 0; i < len / 2; i++)
3090		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3091				le16_to_cpu(image[i]));
3092
3093	/* enable DINO */
3094	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3095	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3096
3097	/* this is where the igx / win driver deveates from the VAP driver. */
3098
3099	/* wait for alive response */
3100	for (i = 0; i < 100; i++) {
3101		/* poll for incoming data */
3102		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3103		if (cr & DINO_RXFIFO_DATA)
3104			break;
3105		mdelay(1);
3106	}
3107
3108	if (cr & DINO_RXFIFO_DATA) {
3109		/* alive_command_responce size is NOT multiple of 4 */
3110		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3111
3112		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3113			response_buffer[i] =
3114			    cpu_to_le32(ipw_read_reg32(priv,
3115						       IPW_BASEBAND_RX_FIFO_READ));
3116		memcpy(&priv->dino_alive, response_buffer,
3117		       sizeof(priv->dino_alive));
3118		if (priv->dino_alive.alive_command == 1
3119		    && priv->dino_alive.ucode_valid == 1) {
3120			rc = 0;
3121			IPW_DEBUG_INFO
3122			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3123			     "of %02d/%02d/%02d %02d:%02d\n",
3124			     priv->dino_alive.software_revision,
3125			     priv->dino_alive.software_revision,
3126			     priv->dino_alive.device_identifier,
3127			     priv->dino_alive.device_identifier,
3128			     priv->dino_alive.time_stamp[0],
3129			     priv->dino_alive.time_stamp[1],
3130			     priv->dino_alive.time_stamp[2],
3131			     priv->dino_alive.time_stamp[3],
3132			     priv->dino_alive.time_stamp[4]);
3133		} else {
3134			IPW_DEBUG_INFO("Microcode is not alive\n");
3135			rc = -EINVAL;
3136		}
3137	} else {
3138		IPW_DEBUG_INFO("No alive response from DINO\n");
3139		rc = -ETIME;
3140	}
3141
3142	/* disable DINO, otherwise for some reason
3143	   firmware have problem getting alive resp. */
3144	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3145
3146	return rc;
3147}
3148
3149static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3150{
3151	int ret = -1;
3152	int offset = 0;
3153	struct fw_chunk *chunk;
3154	int total_nr = 0;
3155	int i;
3156	struct pci_pool *pool;
3157	u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
3158	dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
3159
3160	IPW_DEBUG_TRACE("<< : \n");
3161
3162	pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3163	if (!pool) {
3164		IPW_ERROR("pci_pool_create failed\n");
3165		return -ENOMEM;
3166	}
3167
3168	/* Start the Dma */
3169	ret = ipw_fw_dma_enable(priv);
3170
3171	/* the DMA is already ready this would be a bug. */
3172	BUG_ON(priv->sram_desc.last_cb_index > 0);
3173
3174	do {
3175		u32 chunk_len;
3176		u8 *start;
3177		int size;
3178		int nr = 0;
3179
3180		chunk = (struct fw_chunk *)(data + offset);
3181		offset += sizeof(struct fw_chunk);
3182		chunk_len = le32_to_cpu(chunk->length);
3183		start = data + offset;
3184
3185		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3186		for (i = 0; i < nr; i++) {
3187			virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3188							 &phys[total_nr]);
3189			if (!virts[total_nr]) {
3190				ret = -ENOMEM;
3191				goto out;
3192			}
3193			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3194				     CB_MAX_LENGTH);
3195			memcpy(virts[total_nr], start, size);
3196			start += size;
3197			total_nr++;
3198			/* We don't support fw chunk larger than 64*8K */
3199			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3200		}
3201
3202		/* build DMA packet and queue up for sending */
3203		/* dma to chunk->address, the chunk->length bytes from data +
3204		 * offeset*/
3205		/* Dma loading */
3206		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3207					    nr, le32_to_cpu(chunk->address),
3208					    chunk_len);
3209		if (ret) {
3210			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3211			goto out;
3212		}
3213
3214		offset += chunk_len;
3215	} while (offset < len);
3216
3217	/* Run the DMA and wait for the answer */
3218	ret = ipw_fw_dma_kick(priv);
3219	if (ret) {
3220		IPW_ERROR("dmaKick Failed\n");
3221		goto out;
3222	}
3223
3224	ret = ipw_fw_dma_wait(priv);
3225	if (ret) {
3226		IPW_ERROR("dmaWaitSync Failed\n");
3227		goto out;
3228	}
3229 out:
3230	for (i = 0; i < total_nr; i++)
3231		pci_pool_free(pool, virts[i], phys[i]);
3232
3233	pci_pool_destroy(pool);
3234
3235	return ret;
3236}
3237
3238/* stop nic */
3239static int ipw_stop_nic(struct ipw_priv *priv)
3240{
3241	int rc = 0;
3242
3243	/* stop */
3244	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3245
3246	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3247			  IPW_RESET_REG_MASTER_DISABLED, 500);
3248	if (rc < 0) {
3249		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3250		return rc;
3251	}
3252
3253	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3254
3255	return rc;
3256}
3257
3258static void ipw_start_nic(struct ipw_priv *priv)
3259{
3260	IPW_DEBUG_TRACE(">>\n");
3261
3262	/* prvHwStartNic  release ARC */
3263	ipw_clear_bit(priv, IPW_RESET_REG,
3264		      IPW_RESET_REG_MASTER_DISABLED |
3265		      IPW_RESET_REG_STOP_MASTER |
3266		      CBD_RESET_REG_PRINCETON_RESET);
3267
3268	/* enable power management */
3269	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3270		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3271
3272	IPW_DEBUG_TRACE("<<\n");
3273}
3274
3275static int ipw_init_nic(struct ipw_priv *priv)
3276{
3277	int rc;
3278
3279	IPW_DEBUG_TRACE(">>\n");
3280	/* reset */
3281	/*prvHwInitNic */
3282	/* set "initialization complete" bit to move adapter to D0 state */
3283	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3284
3285	/* low-level PLL activation */
3286	ipw_write32(priv, IPW_READ_INT_REGISTER,
3287		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3288
3289	/* wait for clock stabilization */
3290	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3291			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3292	if (rc < 0)
3293		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3294
3295	/* assert SW reset */
3296	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3297
3298	udelay(10);
3299
3300	/* set "initialization complete" bit to move adapter to D0 state */
3301	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3302
3303	IPW_DEBUG_TRACE(">>\n");
3304	return 0;
3305}
3306
3307/* Call this function from process context, it will sleep in request_firmware.
3308 * Probe is an ok place to call this from.
3309 */
3310static int ipw_reset_nic(struct ipw_priv *priv)
3311{
3312	int rc = 0;
3313	unsigned long flags;
3314
3315	IPW_DEBUG_TRACE(">>\n");
3316
3317	rc = ipw_init_nic(priv);
3318
3319	spin_lock_irqsave(&priv->lock, flags);
3320	/* Clear the 'host command active' bit... */
3321	priv->status &= ~STATUS_HCMD_ACTIVE;
3322	wake_up_interruptible(&priv->wait_command_queue);
3323	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3324	wake_up_interruptible(&priv->wait_state);
3325	spin_unlock_irqrestore(&priv->lock, flags);
3326
3327	IPW_DEBUG_TRACE("<<\n");
3328	return rc;
3329}
3330
3331
3332struct ipw_fw {
3333	__le32 ver;
3334	__le32 boot_size;
3335	__le32 ucode_size;
3336	__le32 fw_size;
3337	u8 data[0];
3338};
3339
3340static int ipw_get_fw(struct ipw_priv *priv,
3341		      const struct firmware **raw, const char *name)
3342{
3343	struct ipw_fw *fw;
3344	int rc;
3345
3346	/* ask firmware_class module to get the boot firmware off disk */
3347	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3348	if (rc < 0) {
3349		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3350		return rc;
3351	}
3352
3353	if ((*raw)->size < sizeof(*fw)) {
3354		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3355		return -EINVAL;
3356	}
3357
3358	fw = (void *)(*raw)->data;
3359
3360	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3361	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3362		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3363			  name, (*raw)->size);
3364		return -EINVAL;
3365	}
3366
3367	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3368		       name,
3369		       le32_to_cpu(fw->ver) >> 16,
3370		       le32_to_cpu(fw->ver) & 0xff,
3371		       (*raw)->size - sizeof(*fw));
3372	return 0;
3373}
3374
3375#define IPW_RX_BUF_SIZE (3000)
3376
3377static void ipw_rx_queue_reset(struct ipw_priv *priv,
3378				      struct ipw_rx_queue *rxq)
3379{
3380	unsigned long flags;
3381	int i;
3382
3383	spin_lock_irqsave(&rxq->lock, flags);
3384
3385	INIT_LIST_HEAD(&rxq->rx_free);
3386	INIT_LIST_HEAD(&rxq->rx_used);
3387
3388	/* Fill the rx_used queue with _all_ of the Rx buffers */
3389	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3390		/* In the reset function, these buffers may have been allocated
3391		 * to an SKB, so we need to unmap and free potential storage */
3392		if (rxq->pool[i].skb != NULL) {
3393			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3394					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3395			dev_kfree_skb(rxq->pool[i].skb);
3396			rxq->pool[i].skb = NULL;
3397		}
3398		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3399	}
3400
3401	/* Set us so that we have processed and used all buffers, but have
3402	 * not restocked the Rx queue with fresh buffers */
3403	rxq->read = rxq->write = 0;
3404	rxq->free_count = 0;
3405	spin_unlock_irqrestore(&rxq->lock, flags);
3406}
3407
3408#ifdef CONFIG_PM
3409static int fw_loaded = 0;
3410static const struct firmware *raw = NULL;
3411
3412static void free_firmware(void)
3413{
3414	if (fw_loaded) {
3415		release_firmware(raw);
3416		raw = NULL;
3417		fw_loaded = 0;
3418	}
3419}
3420#else
3421#define free_firmware() do {} while (0)
3422#endif
3423
3424static int ipw_load(struct ipw_priv *priv)
3425{
3426#ifndef CONFIG_PM
3427	const struct firmware *raw = NULL;
3428#endif
3429	struct ipw_fw *fw;
3430	u8 *boot_img, *ucode_img, *fw_img;
3431	u8 *name = NULL;
3432	int rc = 0, retries = 3;
3433
3434	switch (priv->ieee->iw_mode) {
3435	case IW_MODE_ADHOC:
3436		name = "ipw2200-ibss.fw";
3437		break;
3438#ifdef CONFIG_IPW2200_MONITOR
3439	case IW_MODE_MONITOR:
3440		name = "ipw2200-sniffer.fw";
3441		break;
3442#endif
3443	case IW_MODE_INFRA:
3444		name = "ipw2200-bss.fw";
3445		break;
3446	}
3447
3448	if (!name) {
3449		rc = -EINVAL;
3450		goto error;
3451	}
3452
3453#ifdef CONFIG_PM
3454	if (!fw_loaded) {
3455#endif
3456		rc = ipw_get_fw(priv, &raw, name);
3457		if (rc < 0)
3458			goto error;
3459#ifdef CONFIG_PM
3460	}
3461#endif
3462
3463	fw = (void *)raw->data;
3464	boot_img = &fw->data[0];
3465	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3466	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3467			   le32_to_cpu(fw->ucode_size)];
3468
3469	if (rc < 0)
3470		goto error;
3471
3472	if (!priv->rxq)
3473		priv->rxq = ipw_rx_queue_alloc(priv);
3474	else
3475		ipw_rx_queue_reset(priv, priv->rxq);
3476	if (!priv->rxq) {
3477		IPW_ERROR("Unable to initialize Rx queue\n");
3478		goto error;
3479	}
3480
3481      retry:
3482	/* Ensure interrupts are disabled */
3483	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3484	priv->status &= ~STATUS_INT_ENABLED;
3485
3486	/* ack pending interrupts */
3487	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3488
3489	ipw_stop_nic(priv);
3490
3491	rc = ipw_reset_nic(priv);
3492	if (rc < 0) {
3493		IPW_ERROR("Unable to reset NIC\n");
3494		goto error;
3495	}
3496
3497	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3498			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3499
3500	/* DMA the initial boot firmware into the device */
3501	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3502	if (rc < 0) {
3503		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3504		goto error;
3505	}
3506
3507	/* kick start the device */
3508	ipw_start_nic(priv);
3509
3510	/* wait for the device to finish its initial startup sequence */
3511	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3512			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3513	if (rc < 0) {
3514		IPW_ERROR("device failed to boot initial fw image\n");
3515		goto error;
3516	}
3517	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3518
3519	/* ack fw init done interrupt */
3520	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3521
3522	/* DMA the ucode into the device */
3523	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3524	if (rc < 0) {
3525		IPW_ERROR("Unable to load ucode: %d\n", rc);
3526		goto error;
3527	}
3528
3529	/* stop nic */
3530	ipw_stop_nic(priv);
3531
3532	/* DMA bss firmware into the device */
3533	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3534	if (rc < 0) {
3535		IPW_ERROR("Unable to load firmware: %d\n", rc);
3536		goto error;
3537	}
3538#ifdef CONFIG_PM
3539	fw_loaded = 1;
3540#endif
3541
3542	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3543
3544	rc = ipw_queue_reset(priv);
3545	if (rc < 0) {
3546		IPW_ERROR("Unable to initialize queues\n");
3547		goto error;
3548	}
3549
3550	/* Ensure interrupts are disabled */
3551	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3552	/* ack pending interrupts */
3553	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3554
3555	/* kick start the device */
3556	ipw_start_nic(priv);
3557
3558	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3559		if (retries > 0) {
3560			IPW_WARNING("Parity error.  Retrying init.\n");
3561			retries--;
3562			goto retry;
3563		}
3564
3565		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3566		rc = -EIO;
3567		goto error;
3568	}
3569
3570	/* wait for the device */
3571	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3572			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3573	if (rc < 0) {
3574		IPW_ERROR("device failed to start within 500ms\n");
3575		goto error;
3576	}
3577	IPW_DEBUG_INFO("device response after %dms\n", rc);
3578
3579	/* ack fw init done interrupt */
3580	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3581
3582	/* read eeprom data and initialize the eeprom region of sram */
3583	priv->eeprom_delay = 1;
3584	ipw_eeprom_init_sram(priv);
3585
3586	/* enable interrupts */
3587	ipw_enable_interrupts(priv);
3588
3589	/* Ensure our queue has valid packets */
3590	ipw_rx_queue_replenish(priv);
3591
3592	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3593
3594	/* ack pending interrupts */
3595	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3596
3597#ifndef CONFIG_PM
3598	release_firmware(raw);
3599#endif
3600	return 0;
3601
3602      error:
3603	if (priv->rxq) {
3604		ipw_rx_queue_free(priv, priv->rxq);
3605		priv->rxq = NULL;
3606	}
3607	ipw_tx_queue_free(priv);
3608	if (raw)
3609		release_firmware(raw);
3610#ifdef CONFIG_PM
3611	fw_loaded = 0;
3612	raw = NULL;
3613#endif
3614
3615	return rc;
3616}
3617
3618/**
3619 * DMA services
3620 *
3621 * Theory of operation
3622 *
3623 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3624 * 2 empty entries always kept in the buffer to protect from overflow.
3625 *
3626 * For Tx queue, there are low mark and high mark limits. If, after queuing
3627 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3628 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3629 * Tx queue resumed.
3630 *
3631 * The IPW operates with six queues, one receive queue in the device's
3632 * sram, one transmit queue for sending commands to the device firmware,
3633 * and four transmit queues for data.
3634 *
3635 * The four transmit queues allow for performing quality of service (qos)
3636 * transmissions as per the 802.11 protocol.  Currently Linux does not
3637 * provide a mechanism to the user for utilizing prioritized queues, so
3638 * we only utilize the first data transmit queue (queue1).
3639 */
3640
3641/**
3642 * Driver allocates buffers of this size for Rx
3643 */
3644
3645/**
3646 * ipw_rx_queue_space - Return number of free slots available in queue.
3647 */
3648static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3649{
3650	int s = q->read - q->write;
3651	if (s <= 0)
3652		s += RX_QUEUE_SIZE;
3653	/* keep some buffer to not confuse full and empty queue */
3654	s -= 2;
3655	if (s < 0)
3656		s = 0;
3657	return s;
3658}
3659
3660static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3661{
3662	int s = q->last_used - q->first_empty;
3663	if (s <= 0)
3664		s += q->n_bd;
3665	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3666	if (s < 0)
3667		s = 0;
3668	return s;
3669}
3670
3671static inline int ipw_queue_inc_wrap(int index, int n_bd)
3672{
3673	return (++index == n_bd) ? 0 : index;
3674}
3675
3676/**
3677 * Initialize common DMA queue structure
3678 *
3679 * @param q                queue to init
3680 * @param count            Number of BD's to allocate. Should be power of 2
3681 * @param read_register    Address for 'read' register
3682 *                         (not offset within BAR, full address)
3683 * @param write_register   Address for 'write' register
3684 *                         (not offset within BAR, full address)
3685 * @param base_register    Address for 'base' register
3686 *                         (not offset within BAR, full address)
3687 * @param size             Address for 'size' register
3688 *                         (not offset within BAR, full address)
3689 */
3690static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3691			   int count, u32 read, u32 write, u32 base, u32 size)
3692{
3693	q->n_bd = count;
3694
3695	q->low_mark = q->n_bd / 4;
3696	if (q->low_mark < 4)
3697		q->low_mark = 4;
3698
3699	q->high_mark = q->n_bd / 8;
3700	if (q->high_mark < 2)
3701		q->high_mark = 2;
3702
3703	q->first_empty = q->last_used = 0;
3704	q->reg_r = read;
3705	q->reg_w = write;
3706
3707	ipw_write32(priv, base, q->dma_addr);
3708	ipw_write32(priv, size, count);
3709	ipw_write32(priv, read, 0);
3710	ipw_write32(priv, write, 0);
3711
3712	_ipw_read32(priv, 0x90);
3713}
3714
3715static int ipw_queue_tx_init(struct ipw_priv *priv,
3716			     struct clx2_tx_queue *q,
3717			     int count, u32 read, u32 write, u32 base, u32 size)
3718{
3719	struct pci_dev *dev = priv->pci_dev;
3720
3721	q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3722	if (!q->txb) {
3723		IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3724		return -ENOMEM;
3725	}
3726
3727	q->bd =
3728	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3729	if (!q->bd) {
3730		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3731			  sizeof(q->bd[0]) * count);
3732		kfree(q->txb);
3733		q->txb = NULL;
3734		return -ENOMEM;
3735	}
3736
3737	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3738	return 0;
3739}
3740
3741/**
3742 * Free one TFD, those at index [txq->q.last_used].
3743 * Do NOT advance any indexes
3744 *
3745 * @param dev
3746 * @param txq
3747 */
3748static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3749				  struct clx2_tx_queue *txq)
3750{
3751	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3752	struct pci_dev *dev = priv->pci_dev;
3753	int i;
3754
3755	/* classify bd */
3756	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3757		/* nothing to cleanup after for host commands */
3758		return;
3759
3760	/* sanity check */
3761	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3762		IPW_ERROR("Too many chunks: %i\n",
3763			  le32_to_cpu(bd->u.data.num_chunks));
3764		/** @todo issue fatal error, it is quite serious situation */
3765		return;
3766	}
3767
3768	/* unmap chunks if any */
3769	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3770		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3771				 le16_to_cpu(bd->u.data.chunk_len[i]),
3772				 PCI_DMA_TODEVICE);
3773		if (txq->txb[txq->q.last_used]) {
3774			libipw_txb_free(txq->txb[txq->q.last_used]);
3775			txq->txb[txq->q.last_used] = NULL;
3776		}
3777	}
3778}
3779
3780/**
3781 * Deallocate DMA queue.
3782 *
3783 * Empty queue by removing and destroying all BD's.
3784 * Free all buffers.
3785 *
3786 * @param dev
3787 * @param q
3788 */
3789static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3790{
3791	struct clx2_queue *q = &txq->q;
3792	struct pci_dev *dev = priv->pci_dev;
3793
3794	if (q->n_bd == 0)
3795		return;
3796
3797	/* first, empty all BD's */
3798	for (; q->first_empty != q->last_used;
3799	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3800		ipw_queue_tx_free_tfd(priv, txq);
3801	}
3802
3803	/* free buffers belonging to queue itself */
3804	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3805			    q->dma_addr);
3806	kfree(txq->txb);
3807
3808	/* 0 fill whole structure */
3809	memset(txq, 0, sizeof(*txq));
3810}
3811
3812/**
3813 * Destroy all DMA queues and structures
3814 *
3815 * @param priv
3816 */
3817static void ipw_tx_queue_free(struct ipw_priv *priv)
3818{
3819	/* Tx CMD queue */
3820	ipw_queue_tx_free(priv, &priv->txq_cmd);
3821
3822	/* Tx queues */
3823	ipw_queue_tx_free(priv, &priv->txq[0]);
3824	ipw_queue_tx_free(priv, &priv->txq[1]);
3825	ipw_queue_tx_free(priv, &priv->txq[2]);
3826	ipw_queue_tx_free(priv, &priv->txq[3]);
3827}
3828
3829static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3830{
3831	/* First 3 bytes are manufacturer */
3832	bssid[0] = priv->mac_addr[0];
3833	bssid[1] = priv->mac_addr[1];
3834	bssid[2] = priv->mac_addr[2];
3835
3836	/* Last bytes are random */
3837	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3838
3839	bssid[0] &= 0xfe;	/* clear multicast bit */
3840	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3841}
3842
3843static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3844{
3845	struct ipw_station_entry entry;
3846	int i;
3847
3848	for (i = 0; i < priv->num_stations; i++) {
3849		if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3850			/* Another node is active in network */
3851			priv->missed_adhoc_beacons = 0;
3852			if (!(priv->config & CFG_STATIC_CHANNEL))
3853				/* when other nodes drop out, we drop out */
3854				priv->config &= ~CFG_ADHOC_PERSIST;
3855
3856			return i;
3857		}
3858	}
3859
3860	if (i == MAX_STATIONS)
3861		return IPW_INVALID_STATION;
3862
3863	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3864
3865	entry.reserved = 0;
3866	entry.support_mode = 0;
3867	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3868	memcpy(priv->stations[i], bssid, ETH_ALEN);
3869	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3870			 &entry, sizeof(entry));
3871	priv->num_stations++;
3872
3873	return i;
3874}
3875
3876static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3877{
3878	int i;
3879
3880	for (i = 0; i < priv->num_stations; i++)
3881		if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3882			return i;
3883
3884	return IPW_INVALID_STATION;
3885}
3886
3887static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3888{
3889	int err;
3890
3891	if (priv->status & STATUS_ASSOCIATING) {
3892		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3893		queue_work(priv->workqueue, &priv->disassociate);
3894		return;
3895	}
3896
3897	if (!(priv->status & STATUS_ASSOCIATED)) {
3898		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3899		return;
3900	}
3901
3902	IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3903			"on channel %d.\n",
3904			priv->assoc_request.bssid,
3905			priv->assoc_request.channel);
3906
3907	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3908	priv->status |= STATUS_DISASSOCIATING;
3909
3910	if (quiet)
3911		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3912	else
3913		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3914
3915	err = ipw_send_associate(priv, &priv->assoc_request);
3916	if (err) {
3917		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3918			     "failed.\n");
3919		return;
3920	}
3921
3922}
3923
3924static int ipw_disassociate(void *data)
3925{
3926	struct ipw_priv *priv = data;
3927	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3928		return 0;
3929	ipw_send_disassociate(data, 0);
3930	netif_carrier_off(priv->net_dev);
3931	return 1;
3932}
3933
3934static void ipw_bg_disassociate(struct work_struct *work)
3935{
3936	struct ipw_priv *priv =
3937		container_of(work, struct ipw_priv, disassociate);
3938	mutex_lock(&priv->mutex);
3939	ipw_disassociate(priv);
3940	mutex_unlock(&priv->mutex);
3941}
3942
3943static void ipw_system_config(struct work_struct *work)
3944{
3945	struct ipw_priv *priv =
3946		container_of(work, struct ipw_priv, system_config);
3947
3948#ifdef CONFIG_IPW2200_PROMISCUOUS
3949	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3950		priv->sys_config.accept_all_data_frames = 1;
3951		priv->sys_config.accept_non_directed_frames = 1;
3952		priv->sys_config.accept_all_mgmt_bcpr = 1;
3953		priv->sys_config.accept_all_mgmt_frames = 1;
3954	}
3955#endif
3956
3957	ipw_send_system_config(priv);
3958}
3959
3960struct ipw_status_code {
3961	u16 status;
3962	const char *reason;
3963};
3964
3965static const struct ipw_status_code ipw_status_codes[] = {
3966	{0x00, "Successful"},
3967	{0x01, "Unspecified failure"},
3968	{0x0A, "Cannot support all requested capabilities in the "
3969	 "Capability information field"},
3970	{0x0B, "Reassociation denied due to inability to confirm that "
3971	 "association exists"},
3972	{0x0C, "Association denied due to reason outside the scope of this "
3973	 "standard"},
3974	{0x0D,
3975	 "Responding station does not support the specified authentication "
3976	 "algorithm"},
3977	{0x0E,
3978	 "Received an Authentication frame with authentication sequence "
3979	 "transaction sequence number out of expected sequence"},
3980	{0x0F, "Authentication rejected because of challenge failure"},
3981	{0x10, "Authentication rejected due to timeout waiting for next "
3982	 "frame in sequence"},
3983	{0x11, "Association denied because AP is unable to handle additional "
3984	 "associated stations"},
3985	{0x12,
3986	 "Association denied due to requesting station not supporting all "
3987	 "of the datarates in the BSSBasicServiceSet Parameter"},
3988	{0x13,
3989	 "Association denied due to requesting station not supporting "
3990	 "short preamble operation"},
3991	{0x14,
3992	 "Association denied due to requesting station not supporting "
3993	 "PBCC encoding"},
3994	{0x15,
3995	 "Association denied due to requesting station not supporting "
3996	 "channel agility"},
3997	{0x19,
3998	 "Association denied due to requesting station not supporting "
3999	 "short slot operation"},
4000	{0x1A,
4001	 "Association denied due to requesting station not supporting "
4002	 "DSSS-OFDM operation"},
4003	{0x28, "Invalid Information Element"},
4004	{0x29, "Group Cipher is not valid"},
4005	{0x2A, "Pairwise Cipher is not valid"},
4006	{0x2B, "AKMP is not valid"},
4007	{0x2C, "Unsupported RSN IE version"},
4008	{0x2D, "Invalid RSN IE Capabilities"},
4009	{0x2E, "Cipher suite is rejected per security policy"},
4010};
4011
4012static const char *ipw_get_status_code(u16 status)
4013{
4014	int i;
4015	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4016		if (ipw_status_codes[i].status == (status & 0xff))
4017			return ipw_status_codes[i].reason;
4018	return "Unknown status value.";
4019}
4020
4021static void inline average_init(struct average *avg)
4022{
4023	memset(avg, 0, sizeof(*avg));
4024}
4025
4026#define DEPTH_RSSI 8
4027#define DEPTH_NOISE 16
4028static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4029{
4030	return ((depth-1)*prev_avg +  val)/depth;
4031}
4032
4033static void average_add(struct average *avg, s16 val)
4034{
4035	avg->sum -= avg->entries[avg->pos];
4036	avg->sum += val;
4037	avg->entries[avg->pos++] = val;
4038	if (unlikely(avg->pos == AVG_ENTRIES)) {
4039		avg->init = 1;
4040		avg->pos = 0;
4041	}
4042}
4043
4044static s16 average_value(struct average *avg)
4045{
4046	if (!unlikely(avg->init)) {
4047		if (avg->pos)
4048			return avg->sum / avg->pos;
4049		return 0;
4050	}
4051
4052	return avg->sum / AVG_ENTRIES;
4053}
4054
4055static void ipw_reset_stats(struct ipw_priv *priv)
4056{
4057	u32 len = sizeof(u32);
4058
4059	priv->quality = 0;
4060
4061	average_init(&priv->average_missed_beacons);
4062	priv->exp_avg_rssi = -60;
4063	priv->exp_avg_noise = -85 + 0x100;
4064
4065	priv->last_rate = 0;
4066	priv->last_missed_beacons = 0;
4067	priv->last_rx_packets = 0;
4068	priv->last_tx_packets = 0;
4069	priv->last_tx_failures = 0;
4070
4071	/* Firmware managed, reset only when NIC is restarted, so we have to
4072	 * normalize on the current value */
4073	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4074			&priv->last_rx_err, &len);
4075	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4076			&priv->last_tx_failures, &len);
4077
4078	/* Driver managed, reset with each association */
4079	priv->missed_adhoc_beacons = 0;
4080	priv->missed_beacons = 0;
4081	priv->tx_packets = 0;
4082	priv->rx_packets = 0;
4083
4084}
4085
4086static u32 ipw_get_max_rate(struct ipw_priv *priv)
4087{
4088	u32 i = 0x80000000;
4089	u32 mask = priv->rates_mask;
4090	/* If currently associated in B mode, restrict the maximum
4091	 * rate match to B rates */
4092	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4093		mask &= LIBIPW_CCK_RATES_MASK;
4094
4095	/* TODO: Verify that the rate is supported by the current rates
4096	 * list. */
4097
4098	while (i && !(mask & i))
4099		i >>= 1;
4100	switch (i) {
4101	case LIBIPW_CCK_RATE_1MB_MASK:
4102		return 1000000;
4103	case LIBIPW_CCK_RATE_2MB_MASK:
4104		return 2000000;
4105	case LIBIPW_CCK_RATE_5MB_MASK:
4106		return 5500000;
4107	case LIBIPW_OFDM_RATE_6MB_MASK:
4108		return 6000000;
4109	case LIBIPW_OFDM_RATE_9MB_MASK:
4110		return 9000000;
4111	case LIBIPW_CCK_RATE_11MB_MASK:
4112		return 11000000;
4113	case LIBIPW_OFDM_RATE_12MB_MASK:
4114		return 12000000;
4115	case LIBIPW_OFDM_RATE_18MB_MASK:
4116		return 18000000;
4117	case LIBIPW_OFDM_RATE_24MB_MASK:
4118		return 24000000;
4119	case LIBIPW_OFDM_RATE_36MB_MASK:
4120		return 36000000;
4121	case LIBIPW_OFDM_RATE_48MB_MASK:
4122		return 48000000;
4123	case LIBIPW_OFDM_RATE_54MB_MASK:
4124		return 54000000;
4125	}
4126
4127	if (priv->ieee->mode == IEEE_B)
4128		return 11000000;
4129	else
4130		return 54000000;
4131}
4132
4133static u32 ipw_get_current_rate(struct ipw_priv *priv)
4134{
4135	u32 rate, len = sizeof(rate);
4136	int err;
4137
4138	if (!(priv->status & STATUS_ASSOCIATED))
4139		return 0;
4140
4141	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4142		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4143				      &len);
4144		if (err) {
4145			IPW_DEBUG_INFO("failed querying ordinals.\n");
4146			return 0;
4147		}
4148	} else
4149		return ipw_get_max_rate(priv);
4150
4151	switch (rate) {
4152	case IPW_TX_RATE_1MB:
4153		return 1000000;
4154	case IPW_TX_RATE_2MB:
4155		return 2000000;
4156	case IPW_TX_RATE_5MB:
4157		return 5500000;
4158	case IPW_TX_RATE_6MB:
4159		return 6000000;
4160	case IPW_TX_RATE_9MB:
4161		return 9000000;
4162	case IPW_TX_RATE_11MB:
4163		return 11000000;
4164	case IPW_TX_RATE_12MB:
4165		return 12000000;
4166	case IPW_TX_RATE_18MB:
4167		return 18000000;
4168	case IPW_TX_RATE_24MB:
4169		return 24000000;
4170	case IPW_TX_RATE_36MB:
4171		return 36000000;
4172	case IPW_TX_RATE_48MB:
4173		return 48000000;
4174	case IPW_TX_RATE_54MB:
4175		return 54000000;
4176	}
4177
4178	return 0;
4179}
4180
4181#define IPW_STATS_INTERVAL (2 * HZ)
4182static void ipw_gather_stats(struct ipw_priv *priv)
4183{
4184	u32 rx_err, rx_err_delta, rx_packets_delta;
4185	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4186	u32 missed_beacons_percent, missed_beacons_delta;
4187	u32 quality = 0;
4188	u32 len = sizeof(u32);
4189	s16 rssi;
4190	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4191	    rate_quality;
4192	u32 max_rate;
4193
4194	if (!(priv->status & STATUS_ASSOCIATED)) {
4195		priv->quality = 0;
4196		return;
4197	}
4198
4199	/* Update the statistics */
4200	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4201			&priv->missed_beacons, &len);
4202	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4203	priv->last_missed_beacons = priv->missed_beacons;
4204	if (priv->assoc_request.beacon_interval) {
4205		missed_beacons_percent = missed_beacons_delta *
4206		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4207		    (IPW_STATS_INTERVAL * 10);
4208	} else {
4209		missed_beacons_percent = 0;
4210	}
4211	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4212
4213	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4214	rx_err_delta = rx_err - priv->last_rx_err;
4215	priv->last_rx_err = rx_err;
4216
4217	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4218	tx_failures_delta = tx_failures - priv->last_tx_failures;
4219	priv->last_tx_failures = tx_failures;
4220
4221	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4222	priv->last_rx_packets = priv->rx_packets;
4223
4224	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4225	priv->last_tx_packets = priv->tx_packets;
4226
4227	/* Calculate quality based on the following:
4228	 *
4229	 * Missed beacon: 100% = 0, 0% = 70% missed
4230	 * Rate: 60% = 1Mbs, 100% = Max
4231	 * Rx and Tx errors represent a straight % of total Rx/Tx
4232	 * RSSI: 100% = > -50,  0% = < -80
4233	 * Rx errors: 100% = 0, 0% = 50% missed
4234	 *
4235	 * The lowest computed quality is used.
4236	 *
4237	 */
4238#define BEACON_THRESHOLD 5
4239	beacon_quality = 100 - missed_beacons_percent;
4240	if (beacon_quality < BEACON_THRESHOLD)
4241		beacon_quality = 0;
4242	else
4243		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4244		    (100 - BEACON_THRESHOLD);
4245	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4246			beacon_quality, missed_beacons_percent);
4247
4248	priv->last_rate = ipw_get_current_rate(priv);
4249	max_rate = ipw_get_max_rate(priv);
4250	rate_quality = priv->last_rate * 40 / max_rate + 60;
4251	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4252			rate_quality, priv->last_rate / 1000000);
4253
4254	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4255		rx_quality = 100 - (rx_err_delta * 100) /
4256		    (rx_packets_delta + rx_err_delta);
4257	else
4258		rx_quality = 100;
4259	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4260			rx_quality, rx_err_delta, rx_packets_delta);
4261
4262	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4263		tx_quality = 100 - (tx_failures_delta * 100) /
4264		    (tx_packets_delta + tx_failures_delta);
4265	else
4266		tx_quality = 100;
4267	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4268			tx_quality, tx_failures_delta, tx_packets_delta);
4269
4270	rssi = priv->exp_avg_rssi;
4271	signal_quality =
4272	    (100 *
4273	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4274	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4275	     (priv->ieee->perfect_rssi - rssi) *
4276	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4277	      62 * (priv->ieee->perfect_rssi - rssi))) /
4278	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4279	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4280	if (signal_quality > 100)
4281		signal_quality = 100;
4282	else if (signal_quality < 1)
4283		signal_quality = 0;
4284
4285	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4286			signal_quality, rssi);
4287
4288	quality = min(rx_quality, signal_quality);
4289	quality = min(tx_quality, quality);
4290	quality = min(rate_quality, quality);
4291	quality = min(beacon_quality, quality);
4292	if (quality == beacon_quality)
4293		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4294				quality);
4295	if (quality == rate_quality)
4296		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4297				quality);
4298	if (quality == tx_quality)
4299		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4300				quality);
4301	if (quality == rx_quality)
4302		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4303				quality);
4304	if (quality == signal_quality)
4305		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4306				quality);
4307
4308	priv->quality = quality;
4309
4310	queue_delayed_work(priv->workqueue, &priv->gather_stats,
4311			   IPW_STATS_INTERVAL);
4312}
4313
4314static void ipw_bg_gather_stats(struct work_struct *work)
4315{
4316	struct ipw_priv *priv =
4317		container_of(work, struct ipw_priv, gather_stats.work);
4318	mutex_lock(&priv->mutex);
4319	ipw_gather_stats(priv);
4320	mutex_unlock(&priv->mutex);
4321}
4322
4323/* Missed beacon behavior:
4324 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4325 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4326 * Above disassociate threshold, give up and stop scanning.
4327 * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4328static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4329					    int missed_count)
4330{
4331	priv->notif_missed_beacons = missed_count;
4332
4333	if (missed_count > priv->disassociate_threshold &&
4334	    priv->status & STATUS_ASSOCIATED) {
4335		/* If associated and we've hit the missed
4336		 * beacon threshold, disassociate, turn
4337		 * off roaming, and abort any active scans */
4338		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4339			  IPW_DL_STATE | IPW_DL_ASSOC,
4340			  "Missed beacon: %d - disassociate\n", missed_count);
4341		priv->status &= ~STATUS_ROAMING;
4342		if (priv->status & STATUS_SCANNING) {
4343			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4344				  IPW_DL_STATE,
4345				  "Aborting scan with missed beacon.\n");
4346			queue_work(priv->workqueue, &priv->abort_scan);
4347		}
4348
4349		queue_work(priv->workqueue, &priv->disassociate);
4350		return;
4351	}
4352
4353	if (priv->status & STATUS_ROAMING) {
4354		/* If we are currently roaming, then just
4355		 * print a debug statement... */
4356		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4357			  "Missed beacon: %d - roam in progress\n",
4358			  missed_count);
4359		return;
4360	}
4361
4362	if (roaming &&
4363	    (missed_count > priv->roaming_threshold &&
4364	     missed_count <= priv->disassociate_threshold)) {
4365		/* If we are not already roaming, set the ROAM
4366		 * bit in the status and kick off a scan.
4367		 * This can happen several times before we reach
4368		 * disassociate_threshold. */
4369		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4370			  "Missed beacon: %d - initiate "
4371			  "roaming\n", missed_count);
4372		if (!(priv->status & STATUS_ROAMING)) {
4373			priv->status |= STATUS_ROAMING;
4374			if (!(priv->status & STATUS_SCANNING))
4375				queue_delayed_work(priv->workqueue,
4376						   &priv->request_scan, 0);
4377		}
4378		return;
4379	}
4380
4381	if (priv->status & STATUS_SCANNING &&
4382	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4383		/* Stop scan to keep fw from getting
4384		 * stuck (only if we aren't roaming --
4385		 * otherwise we'll never scan more than 2 or 3
4386		 * channels..) */
4387		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4388			  "Aborting scan with missed beacon.\n");
4389		queue_work(priv->workqueue, &priv->abort_scan);
4390	}
4391
4392	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4393}
4394
4395static void ipw_scan_event(struct work_struct *work)
4396{
4397	union iwreq_data wrqu;
4398
4399	struct ipw_priv *priv =
4400		container_of(work, struct ipw_priv, scan_event.work);
4401
4402	wrqu.data.length = 0;
4403	wrqu.data.flags = 0;
4404	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4405}
4406
4407static void handle_scan_event(struct ipw_priv *priv)
4408{
4409	/* Only userspace-requested scan completion events go out immediately */
4410	if (!priv->user_requested_scan) {
4411		if (!delayed_work_pending(&priv->scan_event))
4412			queue_delayed_work(priv->workqueue, &priv->scan_event,
4413					 round_jiffies_relative(msecs_to_jiffies(4000)));
4414	} else {
4415		union iwreq_data wrqu;
4416
4417		priv->user_requested_scan = 0;
4418		cancel_delayed_work(&priv->scan_event);
4419
4420		wrqu.data.length = 0;
4421		wrqu.data.flags = 0;
4422		wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4423	}
4424}
4425
4426/**
4427 * Handle host notification packet.
4428 * Called from interrupt routine
4429 */
4430static void ipw_rx_notification(struct ipw_priv *priv,
4431				       struct ipw_rx_notification *notif)
4432{
4433	DECLARE_SSID_BUF(ssid);
4434	u16 size = le16_to_cpu(notif->size);
4435
4436	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4437
4438	switch (notif->subtype) {
4439	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4440			struct notif_association *assoc = &notif->u.assoc;
4441
4442			switch (assoc->state) {
4443			case CMAS_ASSOCIATED:{
4444					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4445						  IPW_DL_ASSOC,
4446						  "associated: '%s' %pM \n",
4447						  print_ssid(ssid, priv->essid,
4448							     priv->essid_len),
4449						  priv->bssid);
4450
4451					switch (priv->ieee->iw_mode) {
4452					case IW_MODE_INFRA:
4453						memcpy(priv->ieee->bssid,
4454						       priv->bssid, ETH_ALEN);
4455						break;
4456
4457					case IW_MODE_ADHOC:
4458						memcpy(priv->ieee->bssid,
4459						       priv->bssid, ETH_ALEN);
4460
4461						/* clear out the station table */
4462						priv->num_stations = 0;
4463
4464						IPW_DEBUG_ASSOC
4465						    ("queueing adhoc check\n");
4466						queue_delayed_work(priv->
4467								   workqueue,
4468								   &priv->
4469								   adhoc_check,
4470								   le16_to_cpu(priv->
4471								   assoc_request.
4472								   beacon_interval));
4473						break;
4474					}
4475
4476					priv->status &= ~STATUS_ASSOCIATING;
4477					priv->status |= STATUS_ASSOCIATED;
4478					queue_work(priv->workqueue,
4479						   &priv->system_config);
4480
4481#ifdef CONFIG_IPW2200_QOS
4482#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4483			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4484					if ((priv->status & STATUS_AUTH) &&
4485					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4486					     == IEEE80211_STYPE_ASSOC_RESP)) {
4487						if ((sizeof
4488						     (struct
4489						      libipw_assoc_response)
4490						     <= size)
4491						    && (size <= 2314)) {
4492							struct
4493							libipw_rx_stats
4494							    stats = {
4495								.len = size - 1,
4496							};
4497
4498							IPW_DEBUG_QOS
4499							    ("QoS Associate "
4500							     "size %d\n", size);
4501							libipw_rx_mgt(priv->
4502									 ieee,
4503									 (struct
4504									  libipw_hdr_4addr
4505									  *)
4506									 &notif->u.raw, &stats);
4507						}
4508					}
4509#endif
4510
4511					schedule_work(&priv->link_up);
4512
4513					break;
4514				}
4515
4516			case CMAS_AUTHENTICATED:{
4517					if (priv->
4518					    status & (STATUS_ASSOCIATED |
4519						      STATUS_AUTH)) {
4520						struct notif_authenticate *auth
4521						    = &notif->u.auth;
4522						IPW_DEBUG(IPW_DL_NOTIF |
4523							  IPW_DL_STATE |
4524							  IPW_DL_ASSOC,
4525							  "deauthenticated: '%s' "
4526							  "%pM"
4527							  ": (0x%04X) - %s \n",
4528							  print_ssid(ssid,
4529								     priv->
4530								     essid,
4531								     priv->
4532								     essid_len),
4533							  priv->bssid,
4534							  le16_to_cpu(auth->status),
4535							  ipw_get_status_code
4536							  (le16_to_cpu
4537							   (auth->status)));
4538
4539						priv->status &=
4540						    ~(STATUS_ASSOCIATING |
4541						      STATUS_AUTH |
4542						      STATUS_ASSOCIATED);
4543
4544						schedule_work(&priv->link_down);
4545						break;
4546					}
4547
4548					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4549						  IPW_DL_ASSOC,
4550						  "authenticated: '%s' %pM\n",
4551						  print_ssid(ssid, priv->essid,
4552							     priv->essid_len),
4553						  priv->bssid);
4554					break;
4555				}
4556
4557			case CMAS_INIT:{
4558					if (priv->status & STATUS_AUTH) {
4559						struct
4560						    libipw_assoc_response
4561						*resp;
4562						resp =
4563						    (struct
4564						     libipw_assoc_response
4565						     *)&notif->u.raw;
4566						IPW_DEBUG(IPW_DL_NOTIF |
4567							  IPW_DL_STATE |
4568							  IPW_DL_ASSOC,
4569							  "association failed (0x%04X): %s\n",
4570							  le16_to_cpu(resp->status),
4571							  ipw_get_status_code
4572							  (le16_to_cpu
4573							   (resp->status)));
4574					}
4575
4576					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4577						  IPW_DL_ASSOC,
4578						  "disassociated: '%s' %pM \n",
4579						  print_ssid(ssid, priv->essid,
4580							     priv->essid_len),
4581						  priv->bssid);
4582
4583					priv->status &=
4584					    ~(STATUS_DISASSOCIATING |
4585					      STATUS_ASSOCIATING |
4586					      STATUS_ASSOCIATED | STATUS_AUTH);
4587					if (priv->assoc_network
4588					    && (priv->assoc_network->
4589						capability &
4590						WLAN_CAPABILITY_IBSS))
4591						ipw_remove_current_network
4592						    (priv);
4593
4594					schedule_work(&priv->link_down);
4595
4596					break;
4597				}
4598
4599			case CMAS_RX_ASSOC_RESP:
4600				break;
4601
4602			default:
4603				IPW_ERROR("assoc: unknown (%d)\n",
4604					  assoc->state);
4605				break;
4606			}
4607
4608			break;
4609		}
4610
4611	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4612			struct notif_authenticate *auth = &notif->u.auth;
4613			switch (auth->state) {
4614			case CMAS_AUTHENTICATED:
4615				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4616					  "authenticated: '%s' %pM \n",
4617					  print_ssid(ssid, priv->essid,
4618						     priv->essid_len),
4619					  priv->bssid);
4620				priv->status |= STATUS_AUTH;
4621				break;
4622
4623			case CMAS_INIT:
4624				if (priv->status & STATUS_AUTH) {
4625					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4626						  IPW_DL_ASSOC,
4627						  "authentication failed (0x%04X): %s\n",
4628						  le16_to_cpu(auth->status),
4629						  ipw_get_status_code(le16_to_cpu
4630								      (auth->
4631								       status)));
4632				}
4633				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4634					  IPW_DL_ASSOC,
4635					  "deauthenticated: '%s' %pM\n",
4636					  print_ssid(ssid, priv->essid,
4637						     priv->essid_len),
4638					  priv->bssid);
4639
4640				priv->status &= ~(STATUS_ASSOCIATING |
4641						  STATUS_AUTH |
4642						  STATUS_ASSOCIATED);
4643
4644				schedule_work(&priv->link_down);
4645				break;
4646
4647			case CMAS_TX_AUTH_SEQ_1:
4648				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4649					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4650				break;
4651			case CMAS_RX_AUTH_SEQ_2:
4652				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4653					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4654				break;
4655			case CMAS_AUTH_SEQ_1_PASS:
4656				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4657					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4658				break;
4659			case CMAS_AUTH_SEQ_1_FAIL:
4660				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4661					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4662				break;
4663			case CMAS_TX_AUTH_SEQ_3:
4664				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4665					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4666				break;
4667			case CMAS_RX_AUTH_SEQ_4:
4668				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4669					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4670				break;
4671			case CMAS_AUTH_SEQ_2_PASS:
4672				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4673					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4674				break;
4675			case CMAS_AUTH_SEQ_2_FAIL:
4676				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4677					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4678				break;
4679			case CMAS_TX_ASSOC:
4680				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4681					  IPW_DL_ASSOC, "TX_ASSOC\n");
4682				break;
4683			case CMAS_RX_ASSOC_RESP:
4684				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4685					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4686
4687				break;
4688			case CMAS_ASSOCIATED:
4689				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4690					  IPW_DL_ASSOC, "ASSOCIATED\n");
4691				break;
4692			default:
4693				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4694						auth->state);
4695				break;
4696			}
4697			break;
4698		}
4699
4700	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4701			struct notif_channel_result *x =
4702			    &notif->u.channel_result;
4703
4704			if (size == sizeof(*x)) {
4705				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4706					       x->channel_num);
4707			} else {
4708				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4709					       "(should be %zd)\n",
4710					       size, sizeof(*x));
4711			}
4712			break;
4713		}
4714
4715	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4716			struct notif_scan_complete *x = &notif->u.scan_complete;
4717			if (size == sizeof(*x)) {
4718				IPW_DEBUG_SCAN
4719				    ("Scan completed: type %d, %d channels, "
4720				     "%d status\n", x->scan_type,
4721				     x->num_channels, x->status);
4722			} else {
4723				IPW_ERROR("Scan completed of wrong size %d "
4724					  "(should be %zd)\n",
4725					  size, sizeof(*x));
4726			}
4727
4728			priv->status &=
4729			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4730
4731			wake_up_interruptible(&priv->wait_state);
4732			cancel_delayed_work(&priv->scan_check);
4733
4734			if (priv->status & STATUS_EXIT_PENDING)
4735				break;
4736
4737			priv->ieee->scans++;
4738
4739#ifdef CONFIG_IPW2200_MONITOR
4740			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4741				priv->status |= STATUS_SCAN_FORCED;
4742				queue_delayed_work(priv->workqueue,
4743						   &priv->request_scan, 0);
4744				break;
4745			}
4746			priv->status &= ~STATUS_SCAN_FORCED;
4747#endif				/* CONFIG_IPW2200_MONITOR */
4748
4749			/* Do queued direct scans first */
4750			if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4751				queue_delayed_work(priv->workqueue,
4752						   &priv->request_direct_scan, 0);
4753			}
4754
4755			if (!(priv->status & (STATUS_ASSOCIATED |
4756					      STATUS_ASSOCIATING |
4757					      STATUS_ROAMING |
4758					      STATUS_DISASSOCIATING)))
4759				queue_work(priv->workqueue, &priv->associate);
4760			else if (priv->status & STATUS_ROAMING) {
4761				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4762					/* If a scan completed and we are in roam mode, then
4763					 * the scan that completed was the one requested as a
4764					 * result of entering roam... so, schedule the
4765					 * roam work */
4766					queue_work(priv->workqueue,
4767						   &priv->roam);
4768				else
4769					/* Don't schedule if we aborted the scan */
4770					priv->status &= ~STATUS_ROAMING;
4771			} else if (priv->status & STATUS_SCAN_PENDING)
4772				queue_delayed_work(priv->workqueue,
4773						   &priv->request_scan, 0);
4774			else if (priv->config & CFG_BACKGROUND_SCAN
4775				 && priv->status & STATUS_ASSOCIATED)
4776				queue_delayed_work(priv->workqueue,
4777						   &priv->request_scan,
4778						   round_jiffies_relative(HZ));
4779
4780			/* Send an empty event to user space.
4781			 * We don't send the received data on the event because
4782			 * it would require us to do complex transcoding, and
4783			 * we want to minimise the work done in the irq handler
4784			 * Use a request to extract the data.
4785			 * Also, we generate this even for any scan, regardless
4786			 * on how the scan was initiated. User space can just
4787			 * sync on periodic scan to get fresh data...
4788			 * Jean II */
4789			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4790				handle_scan_event(priv);
4791			break;
4792		}
4793
4794	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4795			struct notif_frag_length *x = &notif->u.frag_len;
4796
4797			if (size == sizeof(*x))
4798				IPW_ERROR("Frag length: %d\n",
4799					  le16_to_cpu(x->frag_length));
4800			else
4801				IPW_ERROR("Frag length of wrong size %d "
4802					  "(should be %zd)\n",
4803					  size, sizeof(*x));
4804			break;
4805		}
4806
4807	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4808			struct notif_link_deterioration *x =
4809			    &notif->u.link_deterioration;
4810
4811			if (size == sizeof(*x)) {
4812				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4813					"link deterioration: type %d, cnt %d\n",
4814					x->silence_notification_type,
4815					x->silence_count);
4816				memcpy(&priv->last_link_deterioration, x,
4817				       sizeof(*x));
4818			} else {
4819				IPW_ERROR("Link Deterioration of wrong size %d "
4820					  "(should be %zd)\n",
4821					  size, sizeof(*x));
4822			}
4823			break;
4824		}
4825
4826	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4827			IPW_ERROR("Dino config\n");
4828			if (priv->hcmd
4829			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4830				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4831
4832			break;
4833		}
4834
4835	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4836			struct notif_beacon_state *x = &notif->u.beacon_state;
4837			if (size != sizeof(*x)) {
4838				IPW_ERROR
4839				    ("Beacon state of wrong size %d (should "
4840				     "be %zd)\n", size, sizeof(*x));
4841				break;
4842			}
4843
4844			if (le32_to_cpu(x->state) ==
4845			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4846				ipw_handle_missed_beacon(priv,
4847							 le32_to_cpu(x->
4848								     number));
4849
4850			break;
4851		}
4852
4853	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4854			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4855			if (size == sizeof(*x)) {
4856				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4857					  "0x%02x station %d\n",
4858					  x->key_state, x->security_type,
4859					  x->station_index);
4860				break;
4861			}
4862
4863			IPW_ERROR
4864			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4865			     size, sizeof(*x));
4866			break;
4867		}
4868
4869	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4870			struct notif_calibration *x = &notif->u.calibration;
4871
4872			if (size == sizeof(*x)) {
4873				memcpy(&priv->calib, x, sizeof(*x));
4874				IPW_DEBUG_INFO("TODO: Calibration\n");
4875				break;
4876			}
4877
4878			IPW_ERROR
4879			    ("Calibration of wrong size %d (should be %zd)\n",
4880			     size, sizeof(*x));
4881			break;
4882		}
4883
4884	case HOST_NOTIFICATION_NOISE_STATS:{
4885			if (size == sizeof(u32)) {
4886				priv->exp_avg_noise =
4887				    exponential_average(priv->exp_avg_noise,
4888				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4889				    DEPTH_NOISE);
4890				break;
4891			}
4892
4893			IPW_ERROR
4894			    ("Noise stat is wrong size %d (should be %zd)\n",
4895			     size, sizeof(u32));
4896			break;
4897		}
4898
4899	default:
4900		IPW_DEBUG_NOTIF("Unknown notification: "
4901				"subtype=%d,flags=0x%2x,size=%d\n",
4902				notif->subtype, notif->flags, size);
4903	}
4904}
4905
4906/**
4907 * Destroys all DMA structures and initialise them again
4908 *
4909 * @param priv
4910 * @return error code
4911 */
4912static int ipw_queue_reset(struct ipw_priv *priv)
4913{
4914	int rc = 0;
4915	/** @todo customize queue sizes */
4916	int nTx = 64, nTxCmd = 8;
4917	ipw_tx_queue_free(priv);
4918	/* Tx CMD queue */
4919	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4920			       IPW_TX_CMD_QUEUE_READ_INDEX,
4921			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4922			       IPW_TX_CMD_QUEUE_BD_BASE,
4923			       IPW_TX_CMD_QUEUE_BD_SIZE);
4924	if (rc) {
4925		IPW_ERROR("Tx Cmd queue init failed\n");
4926		goto error;
4927	}
4928	/* Tx queue(s) */
4929	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4930			       IPW_TX_QUEUE_0_READ_INDEX,
4931			       IPW_TX_QUEUE_0_WRITE_INDEX,
4932			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4933	if (rc) {
4934		IPW_ERROR("Tx 0 queue init failed\n");
4935		goto error;
4936	}
4937	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4938			       IPW_TX_QUEUE_1_READ_INDEX,
4939			       IPW_TX_QUEUE_1_WRITE_INDEX,
4940			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4941	if (rc) {
4942		IPW_ERROR("Tx 1 queue init failed\n");
4943		goto error;
4944	}
4945	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4946			       IPW_TX_QUEUE_2_READ_INDEX,
4947			       IPW_TX_QUEUE_2_WRITE_INDEX,
4948			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4949	if (rc) {
4950		IPW_ERROR("Tx 2 queue init failed\n");
4951		goto error;
4952	}
4953	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4954			       IPW_TX_QUEUE_3_READ_INDEX,
4955			       IPW_TX_QUEUE_3_WRITE_INDEX,
4956			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4957	if (rc) {
4958		IPW_ERROR("Tx 3 queue init failed\n");
4959		goto error;
4960	}
4961	/* statistics */
4962	priv->rx_bufs_min = 0;
4963	priv->rx_pend_max = 0;
4964	return rc;
4965
4966      error:
4967	ipw_tx_queue_free(priv);
4968	return rc;
4969}
4970
4971/**
4972 * Reclaim Tx queue entries no more used by NIC.
4973 *
4974 * When FW advances 'R' index, all entries between old and
4975 * new 'R' index need to be reclaimed. As result, some free space
4976 * forms. If there is enough free space (> low mark), wake Tx queue.
4977 *
4978 * @note Need to protect against garbage in 'R' index
4979 * @param priv
4980 * @param txq
4981 * @param qindex
4982 * @return Number of used entries remains in the queue
4983 */
4984static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4985				struct clx2_tx_queue *txq, int qindex)
4986{
4987	u32 hw_tail;
4988	int used;
4989	struct clx2_queue *q = &txq->q;
4990
4991	hw_tail = ipw_read32(priv, q->reg_r);
4992	if (hw_tail >= q->n_bd) {
4993		IPW_ERROR
4994		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4995		     hw_tail, q->n_bd);
4996		goto done;
4997	}
4998	for (; q->last_used != hw_tail;
4999	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5000		ipw_queue_tx_free_tfd(priv, txq);
5001		priv->tx_packets++;
5002	}
5003      done:
5004	if ((ipw_tx_queue_space(q) > q->low_mark) &&
5005	    (qindex >= 0))
5006		netif_wake_queue(priv->net_dev);
5007	used = q->first_empty - q->last_used;
5008	if (used < 0)
5009		used += q->n_bd;
5010
5011	return used;
5012}
5013
5014static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5015			     int len, int sync)
5016{
5017	struct clx2_tx_queue *txq = &priv->txq_cmd;
5018	struct clx2_queue *q = &txq->q;
5019	struct tfd_frame *tfd;
5020
5021	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5022		IPW_ERROR("No space for Tx\n");
5023		return -EBUSY;
5024	}
5025
5026	tfd = &txq->bd[q->first_empty];
5027	txq->txb[q->first_empty] = NULL;
5028
5029	memset(tfd, 0, sizeof(*tfd));
5030	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5031	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5032	priv->hcmd_seq++;
5033	tfd->u.cmd.index = hcmd;
5034	tfd->u.cmd.length = len;
5035	memcpy(tfd->u.cmd.payload, buf, len);
5036	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5037	ipw_write32(priv, q->reg_w, q->first_empty);
5038	_ipw_read32(priv, 0x90);
5039
5040	return 0;
5041}
5042
5043/*
5044 * Rx theory of operation
5045 *
5046 * The host allocates 32 DMA target addresses and passes the host address
5047 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5048 * 0 to 31
5049 *
5050 * Rx Queue Indexes
5051 * The host/firmware share two index registers for managing the Rx buffers.
5052 *
5053 * The READ index maps to the first position that the firmware may be writing
5054 * to -- the driver can read up to (but not including) this position and get
5055 * good data.
5056 * The READ index is managed by the firmware once the card is enabled.
5057 *
5058 * The WRITE index maps to the last position the driver has read from -- the
5059 * position preceding WRITE is the last slot the firmware can place a packet.
5060 *
5061 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5062 * WRITE = READ.
5063 *
5064 * During initialization the host sets up the READ queue position to the first
5065 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5066 *
5067 * When the firmware places a packet in a buffer it will advance the READ index
5068 * and fire the RX interrupt.  The driver can then query the READ index and
5069 * process as many packets as possible, moving the WRITE index forward as it
5070 * resets the Rx queue buffers with new memory.
5071 *
5072 * The management in the driver is as follows:
5073 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5074 *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5075 *   to replensish the ipw->rxq->rx_free.
5076 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5077 *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5078 *   'processed' and 'read' driver indexes as well)
5079 * + A received packet is processed and handed to the kernel network stack,
5080 *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5081 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5082 *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5083 *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5084 *   were enough free buffers and RX_STALLED is set it is cleared.
5085 *
5086 *
5087 * Driver sequence:
5088 *
5089 * ipw_rx_queue_alloc()       Allocates rx_free
5090 * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5091 *                            ipw_rx_queue_restock
5092 * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5093 *                            queue, updates firmware pointers, and updates
5094 *                            the WRITE index.  If insufficient rx_free buffers
5095 *                            are available, schedules ipw_rx_queue_replenish
5096 *
5097 * -- enable interrupts --
5098 * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5099 *                            READ INDEX, detaching the SKB from the pool.
5100 *                            Moves the packet buffer from queue to rx_used.
5101 *                            Calls ipw_rx_queue_restock to refill any empty
5102 *                            slots.
5103 * ...
5104 *
5105 */
5106
5107/*
5108 * If there are slots in the RX queue that  need to be restocked,
5109 * and we have free pre-allocated buffers, fill the ranks as much
5110 * as we can pulling from rx_free.
5111 *
5112 * This moves the 'write' index forward to catch up with 'processed', and
5113 * also updates the memory address in the firmware to reference the new
5114 * target buffer.
5115 */
5116static void ipw_rx_queue_restock(struct ipw_priv *priv)
5117{
5118	struct ipw_rx_queue *rxq = priv->rxq;
5119	struct list_head *element;
5120	struct ipw_rx_mem_buffer *rxb;
5121	unsigned long flags;
5122	int write;
5123
5124	spin_lock_irqsave(&rxq->lock, flags);
5125	write = rxq->write;
5126	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5127		element = rxq->rx_free.next;
5128		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5129		list_del(element);
5130
5131		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5132			    rxb->dma_addr);
5133		rxq->queue[rxq->write] = rxb;
5134		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5135		rxq->free_count--;
5136	}
5137	spin_unlock_irqrestore(&rxq->lock, flags);
5138
5139	/* If the pre-allocated buffer pool is dropping low, schedule to
5140	 * refill it */
5141	if (rxq->free_count <= RX_LOW_WATERMARK)
5142		queue_work(priv->workqueue, &priv->rx_replenish);
5143
5144	/* If we've added more space for the firmware to place data, tell it */
5145	if (write != rxq->write)
5146		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5147}
5148
5149/*
5150 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5151 * Also restock the Rx queue via ipw_rx_queue_restock.
5152 *
5153 * This is called as a scheduled work item (except for during intialization)
5154 */
5155static void ipw_rx_queue_replenish(void *data)
5156{
5157	struct ipw_priv *priv = data;
5158	struct ipw_rx_queue *rxq = priv->rxq;
5159	struct list_head *element;
5160	struct ipw_rx_mem_buffer *rxb;
5161	unsigned long flags;
5162
5163	spin_lock_irqsave(&rxq->lock, flags);
5164	while (!list_empty(&rxq->rx_used)) {
5165		element = rxq->rx_used.next;
5166		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5167		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5168		if (!rxb->skb) {
5169			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5170			       priv->net_dev->name);
5171			/* We don't reschedule replenish work here -- we will
5172			 * call the restock method and if it still needs
5173			 * more buffers it will schedule replenish */
5174			break;
5175		}
5176		list_del(element);
5177
5178		rxb->dma_addr =
5179		    pci_map_single(priv->pci_dev, rxb->skb->data,
5180				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5181
5182		list_add_tail(&rxb->list, &rxq->rx_free);
5183		rxq->free_count++;
5184	}
5185	spin_unlock_irqrestore(&rxq->lock, flags);
5186
5187	ipw_rx_queue_restock(priv);
5188}
5189
5190static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5191{
5192	struct ipw_priv *priv =
5193		container_of(work, struct ipw_priv, rx_replenish);
5194	mutex_lock(&priv->mutex);
5195	ipw_rx_queue_replenish(priv);
5196	mutex_unlock(&priv->mutex);
5197}
5198
5199/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5200 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5201 * This free routine walks the list of POOL entries and if SKB is set to
5202 * non NULL it is unmapped and freed
5203 */
5204static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5205{
5206	int i;
5207
5208	if (!rxq)
5209		return;
5210
5211	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5212		if (rxq->pool[i].skb != NULL) {
5213			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5214					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5215			dev_kfree_skb(rxq->pool[i].skb);
5216		}
5217	}
5218
5219	kfree(rxq);
5220}
5221
5222static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5223{
5224	struct ipw_rx_queue *rxq;
5225	int i;
5226
5227	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5228	if (unlikely(!rxq)) {
5229		IPW_ERROR("memory allocation failed\n");
5230		return NULL;
5231	}
5232	spin_lock_init(&rxq->lock);
5233	INIT_LIST_HEAD(&rxq->rx_free);
5234	INIT_LIST_HEAD(&rxq->rx_used);
5235
5236	/* Fill the rx_used queue with _all_ of the Rx buffers */
5237	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5238		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5239
5240	/* Set us so that we have processed and used all buffers, but have
5241	 * not restocked the Rx queue with fresh buffers */
5242	rxq->read = rxq->write = 0;
5243	rxq->free_count = 0;
5244
5245	return rxq;
5246}
5247
5248static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5249{
5250	rate &= ~LIBIPW_BASIC_RATE_MASK;
5251	if (ieee_mode == IEEE_A) {
5252		switch (rate) {
5253		case LIBIPW_OFDM_RATE_6MB:
5254			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5255			    1 : 0;
5256		case LIBIPW_OFDM_RATE_9MB:
5257			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5258			    1 : 0;
5259		case LIBIPW_OFDM_RATE_12MB:
5260			return priv->
5261			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5262		case LIBIPW_OFDM_RATE_18MB:
5263			return priv->
5264			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5265		case LIBIPW_OFDM_RATE_24MB:
5266			return priv->
5267			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5268		case LIBIPW_OFDM_RATE_36MB:
5269			return priv->
5270			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5271		case LIBIPW_OFDM_RATE_48MB:
5272			return priv->
5273			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5274		case LIBIPW_OFDM_RATE_54MB:
5275			return priv->
5276			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5277		default:
5278			return 0;
5279		}
5280	}
5281
5282	/* B and G mixed */
5283	switch (rate) {
5284	case LIBIPW_CCK_RATE_1MB:
5285		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5286	case LIBIPW_CCK_RATE_2MB:
5287		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5288	case LIBIPW_CCK_RATE_5MB:
5289		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5290	case LIBIPW_CCK_RATE_11MB:
5291		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5292	}
5293
5294	/* If we are limited to B modulations, bail at this point */
5295	if (ieee_mode == IEEE_B)
5296		return 0;
5297
5298	/* G */
5299	switch (rate) {
5300	case LIBIPW_OFDM_RATE_6MB:
5301		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5302	case LIBIPW_OFDM_RATE_9MB:
5303		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5304	case LIBIPW_OFDM_RATE_12MB:
5305		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5306	case LIBIPW_OFDM_RATE_18MB:
5307		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5308	case LIBIPW_OFDM_RATE_24MB:
5309		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5310	case LIBIPW_OFDM_RATE_36MB:
5311		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5312	case LIBIPW_OFDM_RATE_48MB:
5313		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5314	case LIBIPW_OFDM_RATE_54MB:
5315		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5316	}
5317
5318	return 0;
5319}
5320
5321static int ipw_compatible_rates(struct ipw_priv *priv,
5322				const struct libipw_network *network,
5323				struct ipw_supported_rates *rates)
5324{
5325	int num_rates, i;
5326
5327	memset(rates, 0, sizeof(*rates));
5328	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5329	rates->num_rates = 0;
5330	for (i = 0; i < num_rates; i++) {
5331		if (!ipw_is_rate_in_mask(priv, network->mode,
5332					 network->rates[i])) {
5333
5334			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5335				IPW_DEBUG_SCAN("Adding masked mandatory "
5336					       "rate %02X\n",
5337					       network->rates[i]);
5338				rates->supported_rates[rates->num_rates++] =
5339				    network->rates[i];
5340				continue;
5341			}
5342
5343			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5344				       network->rates[i], priv->rates_mask);
5345			continue;
5346		}
5347
5348		rates->supported_rates[rates->num_rates++] = network->rates[i];
5349	}
5350
5351	num_rates = min(network->rates_ex_len,
5352			(u8) (IPW_MAX_RATES - num_rates));
5353	for (i = 0; i < num_rates; i++) {
5354		if (!ipw_is_rate_in_mask(priv, network->mode,
5355					 network->rates_ex[i])) {
5356			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5357				IPW_DEBUG_SCAN("Adding masked mandatory "
5358					       "rate %02X\n",
5359					       network->rates_ex[i]);
5360				rates->supported_rates[rates->num_rates++] =
5361				    network->rates[i];
5362				continue;
5363			}
5364
5365			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5366				       network->rates_ex[i], priv->rates_mask);
5367			continue;
5368		}
5369
5370		rates->supported_rates[rates->num_rates++] =
5371		    network->rates_ex[i];
5372	}
5373
5374	return 1;
5375}
5376
5377static void ipw_copy_rates(struct ipw_supported_rates *dest,
5378				  const struct ipw_supported_rates *src)
5379{
5380	u8 i;
5381	for (i = 0; i < src->num_rates; i++)
5382		dest->supported_rates[i] = src->supported_rates[i];
5383	dest->num_rates = src->num_rates;
5384}
5385
5386/* TODO: Look at sniffed packets in the air to determine if the basic rate
5387 * mask should ever be used -- right now all callers to add the scan rates are
5388 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5389static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5390				   u8 modulation, u32 rate_mask)
5391{
5392	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5393	    LIBIPW_BASIC_RATE_MASK : 0;
5394
5395	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5396		rates->supported_rates[rates->num_rates++] =
5397		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5398
5399	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5400		rates->supported_rates[rates->num_rates++] =
5401		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5402
5403	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5404		rates->supported_rates[rates->num_rates++] = basic_mask |
5405		    LIBIPW_CCK_RATE_5MB;
5406
5407	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5408		rates->supported_rates[rates->num_rates++] = basic_mask |
5409		    LIBIPW_CCK_RATE_11MB;
5410}
5411
5412static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5413				    u8 modulation, u32 rate_mask)
5414{
5415	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5416	    LIBIPW_BASIC_RATE_MASK : 0;
5417
5418	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5419		rates->supported_rates[rates->num_rates++] = basic_mask |
5420		    LIBIPW_OFDM_RATE_6MB;
5421
5422	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5423		rates->supported_rates[rates->num_rates++] =
5424		    LIBIPW_OFDM_RATE_9MB;
5425
5426	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5427		rates->supported_rates[rates->num_rates++] = basic_mask |
5428		    LIBIPW_OFDM_RATE_12MB;
5429
5430	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5431		rates->supported_rates[rates->num_rates++] =
5432		    LIBIPW_OFDM_RATE_18MB;
5433
5434	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5435		rates->supported_rates[rates->num_rates++] = basic_mask |
5436		    LIBIPW_OFDM_RATE_24MB;
5437
5438	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5439		rates->supported_rates[rates->num_rates++] =
5440		    LIBIPW_OFDM_RATE_36MB;
5441
5442	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5443		rates->supported_rates[rates->num_rates++] =
5444		    LIBIPW_OFDM_RATE_48MB;
5445
5446	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5447		rates->supported_rates[rates->num_rates++] =
5448		    LIBIPW_OFDM_RATE_54MB;
5449}
5450
5451struct ipw_network_match {
5452	struct libipw_network *network;
5453	struct ipw_supported_rates rates;
5454};
5455
5456static int ipw_find_adhoc_network(struct ipw_priv *priv,
5457				  struct ipw_network_match *match,
5458				  struct libipw_network *network,
5459				  int roaming)
5460{
5461	struct ipw_supported_rates rates;
5462	DECLARE_SSID_BUF(ssid);
5463
5464	/* Verify that this network's capability is compatible with the
5465	 * current mode (AdHoc or Infrastructure) */
5466	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5467	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5468		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5469				"capability mismatch.\n",
5470				print_ssid(ssid, network->ssid,
5471					   network->ssid_len),
5472				network->bssid);
5473		return 0;
5474	}
5475
5476	if (unlikely(roaming)) {
5477		/* If we are roaming, then ensure check if this is a valid
5478		 * network to try and roam to */
5479		if ((network->ssid_len != match->network->ssid_len) ||
5480		    memcmp(network->ssid, match->network->ssid,
5481			   network->ssid_len)) {
5482			IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5483					"because of non-network ESSID.\n",
5484					print_ssid(ssid, network->ssid,
5485						   network->ssid_len),
5486					network->bssid);
5487			return 0;
5488		}
5489	} else {
5490		/* If an ESSID has been configured then compare the broadcast
5491		 * ESSID to ours */
5492		if ((priv->config & CFG_STATIC_ESSID) &&
5493		    ((network->ssid_len != priv->essid_len) ||
5494		     memcmp(network->ssid, priv->essid,
5495			    min(network->ssid_len, priv->essid_len)))) {
5496			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5497
5498			strncpy(escaped,
5499				print_ssid(ssid, network->ssid,
5500					   network->ssid_len),
5501				sizeof(escaped));
5502			IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5503					"because of ESSID mismatch: '%s'.\n",
5504					escaped, network->bssid,
5505					print_ssid(ssid, priv->essid,
5506						   priv->essid_len));
5507			return 0;
5508		}
5509	}
5510
5511	/* If the old network rate is better than this one, don't bother
5512	 * testing everything else. */
5513
5514	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5515		IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5516				"current network.\n",
5517				print_ssid(ssid, match->network->ssid,
5518					   match->network->ssid_len));
5519		return 0;
5520	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5521		IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5522				"current network.\n",
5523				print_ssid(ssid, match->network->ssid,
5524					   match->network->ssid_len));
5525		return 0;
5526	}
5527
5528	/* Now go through and see if the requested network is valid... */
5529	if (priv->ieee->scan_age != 0 &&
5530	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5531		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5532				"because of age: %ums.\n",
5533				print_ssid(ssid, network->ssid,
5534					   network->ssid_len),
5535				network->bssid,
5536				jiffies_to_msecs(jiffies -
5537						 network->last_scanned));
5538		return 0;
5539	}
5540
5541	if ((priv->config & CFG_STATIC_CHANNEL) &&
5542	    (network->channel != priv->channel)) {
5543		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5544				"because of channel mismatch: %d != %d.\n",
5545				print_ssid(ssid, network->ssid,
5546					   network->ssid_len),
5547				network->bssid,
5548				network->channel, priv->channel);
5549		return 0;
5550	}
5551
5552	/* Verify privacy compatability */
5553	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5554	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5555		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5556				"because of privacy mismatch: %s != %s.\n",
5557				print_ssid(ssid, network->ssid,
5558					   network->ssid_len),
5559				network->bssid,
5560				priv->
5561				capability & CAP_PRIVACY_ON ? "on" : "off",
5562				network->
5563				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5564				"off");
5565		return 0;
5566	}
5567
5568	if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5569		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5570				"because of the same BSSID match: %pM"
5571				".\n", print_ssid(ssid, network->ssid,
5572						  network->ssid_len),
5573				network->bssid,
5574				priv->bssid);
5575		return 0;
5576	}
5577
5578	/* Filter out any incompatible freq / mode combinations */
5579	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5580		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5581				"because of invalid frequency/mode "
5582				"combination.\n",
5583				print_ssid(ssid, network->ssid,
5584					   network->ssid_len),
5585				network->bssid);
5586		return 0;
5587	}
5588
5589	/* Ensure that the rates supported by the driver are compatible with
5590	 * this AP, including verification of basic rates (mandatory) */
5591	if (!ipw_compatible_rates(priv, network, &rates)) {
5592		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5593				"because configured rate mask excludes "
5594				"AP mandatory rate.\n",
5595				print_ssid(ssid, network->ssid,
5596					   network->ssid_len),
5597				network->bssid);
5598		return 0;
5599	}
5600
5601	if (rates.num_rates == 0) {
5602		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5603				"because of no compatible rates.\n",
5604				print_ssid(ssid, network->ssid,
5605					   network->ssid_len),
5606				network->bssid);
5607		return 0;
5608	}
5609
5610	/* TODO: Perform any further minimal comparititive tests.  We do not
5611	 * want to put too much policy logic here; intelligent scan selection
5612	 * should occur within a generic IEEE 802.11 user space tool.  */
5613
5614	/* Set up 'new' AP to this network */
5615	ipw_copy_rates(&match->rates, &rates);
5616	match->network = network;
5617	IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5618			print_ssid(ssid, network->ssid, network->ssid_len),
5619			network->bssid);
5620
5621	return 1;
5622}
5623
5624static void ipw_merge_adhoc_network(struct work_struct *work)
5625{
5626	DECLARE_SSID_BUF(ssid);
5627	struct ipw_priv *priv =
5628		container_of(work, struct ipw_priv, merge_networks);
5629	struct libipw_network *network = NULL;
5630	struct ipw_network_match match = {
5631		.network = priv->assoc_network
5632	};
5633
5634	if ((priv->status & STATUS_ASSOCIATED) &&
5635	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5636		/* First pass through ROAM process -- look for a better
5637		 * network */
5638		unsigned long flags;
5639
5640		spin_lock_irqsave(&priv->ieee->lock, flags);
5641		list_for_each_entry(network, &priv->ieee->network_list, list) {
5642			if (network != priv->assoc_network)
5643				ipw_find_adhoc_network(priv, &match, network,
5644						       1);
5645		}
5646		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5647
5648		if (match.network == priv->assoc_network) {
5649			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5650					"merge to.\n");
5651			return;
5652		}
5653
5654		mutex_lock(&priv->mutex);
5655		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5656			IPW_DEBUG_MERGE("remove network %s\n",
5657					print_ssid(ssid, priv->essid,
5658						   priv->essid_len));
5659			ipw_remove_current_network(priv);
5660		}
5661
5662		ipw_disassociate(priv);
5663		priv->assoc_network = match.network;
5664		mutex_unlock(&priv->mutex);
5665		return;
5666	}
5667}
5668
5669static int ipw_best_network(struct ipw_priv *priv,
5670			    struct ipw_network_match *match,
5671			    struct libipw_network *network, int roaming)
5672{
5673	struct ipw_supported_rates rates;
5674	DECLARE_SSID_BUF(ssid);
5675
5676	/* Verify that this network's capability is compatible with the
5677	 * current mode (AdHoc or Infrastructure) */
5678	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5679	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5680	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5681	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5682		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5683				"capability mismatch.\n",
5684				print_ssid(ssid, network->ssid,
5685					   network->ssid_len),
5686				network->bssid);
5687		return 0;
5688	}
5689
5690	if (unlikely(roaming)) {
5691		/* If we are roaming, then ensure check if this is a valid
5692		 * network to try and roam to */
5693		if ((network->ssid_len != match->network->ssid_len) ||
5694		    memcmp(network->ssid, match->network->ssid,
5695			   network->ssid_len)) {
5696			IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5697					"because of non-network ESSID.\n",
5698					print_ssid(ssid, network->ssid,
5699						   network->ssid_len),
5700					network->bssid);
5701			return 0;
5702		}
5703	} else {
5704		/* If an ESSID has been configured then compare the broadcast
5705		 * ESSID to ours */
5706		if ((priv->config & CFG_STATIC_ESSID) &&
5707		    ((network->ssid_len != priv->essid_len) ||
5708		     memcmp(network->ssid, priv->essid,
5709			    min(network->ssid_len, priv->essid_len)))) {
5710			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5711			strncpy(escaped,
5712				print_ssid(ssid, network->ssid,
5713					   network->ssid_len),
5714				sizeof(escaped));
5715			IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5716					"because of ESSID mismatch: '%s'.\n",
5717					escaped, network->bssid,
5718					print_ssid(ssid, priv->essid,
5719						   priv->essid_len));
5720			return 0;
5721		}
5722	}
5723
5724	/* If the old network rate is better than this one, don't bother
5725	 * testing everything else. */
5726	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5727		char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5728		strncpy(escaped,
5729			print_ssid(ssid, network->ssid, network->ssid_len),
5730			sizeof(escaped));
5731		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5732				"'%s (%pM)' has a stronger signal.\n",
5733				escaped, network->bssid,
5734				print_ssid(ssid, match->network->ssid,
5735					   match->network->ssid_len),
5736				match->network->bssid);
5737		return 0;
5738	}
5739
5740	/* If this network has already had an association attempt within the
5741	 * last 3 seconds, do not try and associate again... */
5742	if (network->last_associate &&
5743	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5744		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5745				"because of storming (%ums since last "
5746				"assoc attempt).\n",
5747				print_ssid(ssid, network->ssid,
5748					   network->ssid_len),
5749				network->bssid,
5750				jiffies_to_msecs(jiffies -
5751						 network->last_associate));
5752		return 0;
5753	}
5754
5755	/* Now go through and see if the requested network is valid... */
5756	if (priv->ieee->scan_age != 0 &&
5757	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5758		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5759				"because of age: %ums.\n",
5760				print_ssid(ssid, network->ssid,
5761					   network->ssid_len),
5762				network->bssid,
5763				jiffies_to_msecs(jiffies -
5764						 network->last_scanned));
5765		return 0;
5766	}
5767
5768	if ((priv->config & CFG_STATIC_CHANNEL) &&
5769	    (network->channel != priv->channel)) {
5770		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5771				"because of channel mismatch: %d != %d.\n",
5772				print_ssid(ssid, network->ssid,
5773					   network->ssid_len),
5774				network->bssid,
5775				network->channel, priv->channel);
5776		return 0;
5777	}
5778
5779	/* Verify privacy compatability */
5780	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5781	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5782		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5783				"because of privacy mismatch: %s != %s.\n",
5784				print_ssid(ssid, network->ssid,
5785					   network->ssid_len),
5786				network->bssid,
5787				priv->capability & CAP_PRIVACY_ON ? "on" :
5788				"off",
5789				network->capability &
5790				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5791		return 0;
5792	}
5793
5794	if ((priv->config & CFG_STATIC_BSSID) &&
5795	    memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5796		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5797				"because of BSSID mismatch: %pM.\n",
5798				print_ssid(ssid, network->ssid,
5799					   network->ssid_len),
5800				network->bssid, priv->bssid);
5801		return 0;
5802	}
5803
5804	/* Filter out any incompatible freq / mode combinations */
5805	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5806		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5807				"because of invalid frequency/mode "
5808				"combination.\n",
5809				print_ssid(ssid, network->ssid,
5810					   network->ssid_len),
5811				network->bssid);
5812		return 0;
5813	}
5814
5815	/* Filter out invalid channel in current GEO */
5816	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5817		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5818				"because of invalid channel in current GEO\n",
5819				print_ssid(ssid, network->ssid,
5820					   network->ssid_len),
5821				network->bssid);
5822		return 0;
5823	}
5824
5825	/* Ensure that the rates supported by the driver are compatible with
5826	 * this AP, including verification of basic rates (mandatory) */
5827	if (!ipw_compatible_rates(priv, network, &rates)) {
5828		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5829				"because configured rate mask excludes "
5830				"AP mandatory rate.\n",
5831				print_ssid(ssid, network->ssid,
5832					   network->ssid_len),
5833				network->bssid);
5834		return 0;
5835	}
5836
5837	if (rates.num_rates == 0) {
5838		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5839				"because of no compatible rates.\n",
5840				print_ssid(ssid, network->ssid,
5841					   network->ssid_len),
5842				network->bssid);
5843		return 0;
5844	}
5845
5846	/* TODO: Perform any further minimal comparititive tests.  We do not
5847	 * want to put too much policy logic here; intelligent scan selection
5848	 * should occur within a generic IEEE 802.11 user space tool.  */
5849
5850	/* Set up 'new' AP to this network */
5851	ipw_copy_rates(&match->rates, &rates);
5852	match->network = network;
5853
5854	IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5855			print_ssid(ssid, network->ssid, network->ssid_len),
5856			network->bssid);
5857
5858	return 1;
5859}
5860
5861static void ipw_adhoc_create(struct ipw_priv *priv,
5862			     struct libipw_network *network)
5863{
5864	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5865	int i;
5866
5867	/*
5868	 * For the purposes of scanning, we can set our wireless mode
5869	 * to trigger scans across combinations of bands, but when it
5870	 * comes to creating a new ad-hoc network, we have tell the FW
5871	 * exactly which band to use.
5872	 *
5873	 * We also have the possibility of an invalid channel for the
5874	 * chossen band.  Attempting to create a new ad-hoc network
5875	 * with an invalid channel for wireless mode will trigger a
5876	 * FW fatal error.
5877	 *
5878	 */
5879	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5880	case LIBIPW_52GHZ_BAND:
5881		network->mode = IEEE_A;
5882		i = libipw_channel_to_index(priv->ieee, priv->channel);
5883		BUG_ON(i == -1);
5884		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5885			IPW_WARNING("Overriding invalid channel\n");
5886			priv->channel = geo->a[0].channel;
5887		}
5888		break;
5889
5890	case LIBIPW_24GHZ_BAND:
5891		if (priv->ieee->mode & IEEE_G)
5892			network->mode = IEEE_G;
5893		else
5894			network->mode = IEEE_B;
5895		i = libipw_channel_to_index(priv->ieee, priv->channel);
5896		BUG_ON(i == -1);
5897		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5898			IPW_WARNING("Overriding invalid channel\n");
5899			priv->channel = geo->bg[0].channel;
5900		}
5901		break;
5902
5903	default:
5904		IPW_WARNING("Overriding invalid channel\n");
5905		if (priv->ieee->mode & IEEE_A) {
5906			network->mode = IEEE_A;
5907			priv->channel = geo->a[0].channel;
5908		} else if (priv->ieee->mode & IEEE_G) {
5909			network->mode = IEEE_G;
5910			priv->channel = geo->bg[0].channel;
5911		} else {
5912			network->mode = IEEE_B;
5913			priv->channel = geo->bg[0].channel;
5914		}
5915		break;
5916	}
5917
5918	network->channel = priv->channel;
5919	priv->config |= CFG_ADHOC_PERSIST;
5920	ipw_create_bssid(priv, network->bssid);
5921	network->ssid_len = priv->essid_len;
5922	memcpy(network->ssid, priv->essid, priv->essid_len);
5923	memset(&network->stats, 0, sizeof(network->stats));
5924	network->capability = WLAN_CAPABILITY_IBSS;
5925	if (!(priv->config & CFG_PREAMBLE_LONG))
5926		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5927	if (priv->capability & CAP_PRIVACY_ON)
5928		network->capability |= WLAN_CAPABILITY_PRIVACY;
5929	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5930	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5931	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5932	memcpy(network->rates_ex,
5933	       &priv->rates.supported_rates[network->rates_len],
5934	       network->rates_ex_len);
5935	network->last_scanned = 0;
5936	network->flags = 0;
5937	network->last_associate = 0;
5938	network->time_stamp[0] = 0;
5939	network->time_stamp[1] = 0;
5940	network->beacon_interval = 100;	/* Default */
5941	network->listen_interval = 10;	/* Default */
5942	network->atim_window = 0;	/* Default */
5943	network->wpa_ie_len = 0;
5944	network->rsn_ie_len = 0;
5945}
5946
5947static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5948{
5949	struct ipw_tgi_tx_key key;
5950
5951	if (!(priv->ieee->sec.flags & (1 << index)))
5952		return;
5953
5954	key.key_id = index;
5955	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5956	key.security_type = type;
5957	key.station_index = 0;	/* always 0 for BSS */
5958	key.flags = 0;
5959	/* 0 for new key; previous value of counter (after fatal error) */
5960	key.tx_counter[0] = cpu_to_le32(0);
5961	key.tx_counter[1] = cpu_to_le32(0);
5962
5963	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5964}
5965
5966static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5967{
5968	struct ipw_wep_key key;
5969	int i;
5970
5971	key.cmd_id = DINO_CMD_WEP_KEY;
5972	key.seq_num = 0;
5973
5974	/* Note: AES keys cannot be set for multiple times.
5975	 * Only set it at the first time. */
5976	for (i = 0; i < 4; i++) {
5977		key.key_index = i | type;
5978		if (!(priv->ieee->sec.flags & (1 << i))) {
5979			key.key_size = 0;
5980			continue;
5981		}
5982
5983		key.key_size = priv->ieee->sec.key_sizes[i];
5984		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5985
5986		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5987	}
5988}
5989
5990static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5991{
5992	if (priv->ieee->host_encrypt)
5993		return;
5994
5995	switch (level) {
5996	case SEC_LEVEL_3:
5997		priv->sys_config.disable_unicast_decryption = 0;
5998		priv->ieee->host_decrypt = 0;
5999		break;
6000	case SEC_LEVEL_2:
6001		priv->sys_config.disable_unicast_decryption = 1;
6002		priv->ieee->host_decrypt = 1;
6003		break;
6004	case SEC_LEVEL_1:
6005		priv->sys_config.disable_unicast_decryption = 0;
6006		priv->ieee->host_decrypt = 0;
6007		break;
6008	case SEC_LEVEL_0:
6009		priv->sys_config.disable_unicast_decryption = 1;
6010		break;
6011	default:
6012		break;
6013	}
6014}
6015
6016static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6017{
6018	if (priv->ieee->host_encrypt)
6019		return;
6020
6021	switch (level) {
6022	case SEC_LEVEL_3:
6023		priv->sys_config.disable_multicast_decryption = 0;
6024		break;
6025	case SEC_LEVEL_2:
6026		priv->sys_config.disable_multicast_decryption = 1;
6027		break;
6028	case SEC_LEVEL_1:
6029		priv->sys_config.disable_multicast_decryption = 0;
6030		break;
6031	case SEC_LEVEL_0:
6032		priv->sys_config.disable_multicast_decryption = 1;
6033		break;
6034	default:
6035		break;
6036	}
6037}
6038
6039static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6040{
6041	switch (priv->ieee->sec.level) {
6042	case SEC_LEVEL_3:
6043		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6044			ipw_send_tgi_tx_key(priv,
6045					    DCT_FLAG_EXT_SECURITY_CCM,
6046					    priv->ieee->sec.active_key);
6047
6048		if (!priv->ieee->host_mc_decrypt)
6049			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6050		break;
6051	case SEC_LEVEL_2:
6052		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6053			ipw_send_tgi_tx_key(priv,
6054					    DCT_FLAG_EXT_SECURITY_TKIP,
6055					    priv->ieee->sec.active_key);
6056		break;
6057	case SEC_LEVEL_1:
6058		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6059		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6060		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6061		break;
6062	case SEC_LEVEL_0:
6063	default:
6064		break;
6065	}
6066}
6067
6068static void ipw_adhoc_check(void *data)
6069{
6070	struct ipw_priv *priv = data;
6071
6072	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6073	    !(priv->config & CFG_ADHOC_PERSIST)) {
6074		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6075			  IPW_DL_STATE | IPW_DL_ASSOC,
6076			  "Missed beacon: %d - disassociate\n",
6077			  priv->missed_adhoc_beacons);
6078		ipw_remove_current_network(priv);
6079		ipw_disassociate(priv);
6080		return;
6081	}
6082
6083	queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6084			   le16_to_cpu(priv->assoc_request.beacon_interval));
6085}
6086
6087static void ipw_bg_adhoc_check(struct work_struct *work)
6088{
6089	struct ipw_priv *priv =
6090		container_of(work, struct ipw_priv, adhoc_check.work);
6091	mutex_lock(&priv->mutex);
6092	ipw_adhoc_check(priv);
6093	mutex_unlock(&priv->mutex);
6094}
6095
6096static void ipw_debug_config(struct ipw_priv *priv)
6097{
6098	DECLARE_SSID_BUF(ssid);
6099	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6100		       "[CFG 0x%08X]\n", priv->config);
6101	if (priv->config & CFG_STATIC_CHANNEL)
6102		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6103	else
6104		IPW_DEBUG_INFO("Channel unlocked.\n");
6105	if (priv->config & CFG_STATIC_ESSID)
6106		IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6107			       print_ssid(ssid, priv->essid, priv->essid_len));
6108	else
6109		IPW_DEBUG_INFO("ESSID unlocked.\n");
6110	if (priv->config & CFG_STATIC_BSSID)
6111		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6112	else
6113		IPW_DEBUG_INFO("BSSID unlocked.\n");
6114	if (priv->capability & CAP_PRIVACY_ON)
6115		IPW_DEBUG_INFO("PRIVACY on\n");
6116	else
6117		IPW_DEBUG_INFO("PRIVACY off\n");
6118	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6119}
6120
6121static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6122{
6123	/* TODO: Verify that this works... */
6124	struct ipw_fixed_rate fr;
6125	u32 reg;
6126	u16 mask = 0;
6127	u16 new_tx_rates = priv->rates_mask;
6128
6129	/* Identify 'current FW band' and match it with the fixed
6130	 * Tx rates */
6131
6132	switch (priv->ieee->freq_band) {
6133	case LIBIPW_52GHZ_BAND:	/* A only */
6134		/* IEEE_A */
6135		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6136			/* Invalid fixed rate mask */
6137			IPW_DEBUG_WX
6138			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6139			new_tx_rates = 0;
6140			break;
6141		}
6142
6143		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6144		break;
6145
6146	default:		/* 2.4Ghz or Mixed */
6147		/* IEEE_B */
6148		if (mode == IEEE_B) {
6149			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6150				/* Invalid fixed rate mask */
6151				IPW_DEBUG_WX
6152				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6153				new_tx_rates = 0;
6154			}
6155			break;
6156		}
6157
6158		/* IEEE_G */
6159		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6160				    LIBIPW_OFDM_RATES_MASK)) {
6161			/* Invalid fixed rate mask */
6162			IPW_DEBUG_WX
6163			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6164			new_tx_rates = 0;
6165			break;
6166		}
6167
6168		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6169			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6170			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6171		}
6172
6173		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6174			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6175			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6176		}
6177
6178		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6179			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6180			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6181		}
6182
6183		new_tx_rates |= mask;
6184		break;
6185	}
6186
6187	fr.tx_rates = cpu_to_le16(new_tx_rates);
6188
6189	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6190	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6191}
6192
6193static void ipw_abort_scan(struct ipw_priv *priv)
6194{
6195	int err;
6196
6197	if (priv->status & STATUS_SCAN_ABORTING) {
6198		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6199		return;
6200	}
6201	priv->status |= STATUS_SCAN_ABORTING;
6202
6203	err = ipw_send_scan_abort(priv);
6204	if (err)
6205		IPW_DEBUG_HC("Request to abort scan failed.\n");
6206}
6207
6208static void ipw_add_scan_channels(struct ipw_priv *priv,
6209				  struct ipw_scan_request_ext *scan,
6210				  int scan_type)
6211{
6212	int channel_index = 0;
6213	const struct libipw_geo *geo;
6214	int i;
6215
6216	geo = libipw_get_geo(priv->ieee);
6217
6218	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6219		int start = channel_index;
6220		for (i = 0; i < geo->a_channels; i++) {
6221			if ((priv->status & STATUS_ASSOCIATED) &&
6222			    geo->a[i].channel == priv->channel)
6223				continue;
6224			channel_index++;
6225			scan->channels_list[channel_index] = geo->a[i].channel;
6226			ipw_set_scan_type(scan, channel_index,
6227					  geo->a[i].
6228					  flags & LIBIPW_CH_PASSIVE_ONLY ?
6229					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6230					  scan_type);
6231		}
6232
6233		if (start != channel_index) {
6234			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6235			    (channel_index - start);
6236			channel_index++;
6237		}
6238	}
6239
6240	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6241		int start = channel_index;
6242		if (priv->config & CFG_SPEED_SCAN) {
6243			int index;
6244			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6245				/* nop out the list */
6246				[0] = 0
6247			};
6248
6249			u8 channel;
6250			while (channel_index < IPW_SCAN_CHANNELS - 1) {
6251				channel =
6252				    priv->speed_scan[priv->speed_scan_pos];
6253				if (channel == 0) {
6254					priv->speed_scan_pos = 0;
6255					channel = priv->speed_scan[0];
6256				}
6257				if ((priv->status & STATUS_ASSOCIATED) &&
6258				    channel == priv->channel) {
6259					priv->speed_scan_pos++;
6260					continue;
6261				}
6262
6263				/* If this channel has already been
6264				 * added in scan, break from loop
6265				 * and this will be the first channel
6266				 * in the next scan.
6267				 */
6268				if (channels[channel - 1] != 0)
6269					break;
6270
6271				channels[channel - 1] = 1;
6272				priv->speed_scan_pos++;
6273				channel_index++;
6274				scan->channels_list[channel_index] = channel;
6275				index =
6276				    libipw_channel_to_index(priv->ieee, channel);
6277				ipw_set_scan_type(scan, channel_index,
6278						  geo->bg[index].
6279						  flags &
6280						  LIBIPW_CH_PASSIVE_ONLY ?
6281						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6282						  : scan_type);
6283			}
6284		} else {
6285			for (i = 0; i < geo->bg_channels; i++) {
6286				if ((priv->status & STATUS_ASSOCIATED) &&
6287				    geo->bg[i].channel == priv->channel)
6288					continue;
6289				channel_index++;
6290				scan->channels_list[channel_index] =
6291				    geo->bg[i].channel;
6292				ipw_set_scan_type(scan, channel_index,
6293						  geo->bg[i].
6294						  flags &
6295						  LIBIPW_CH_PASSIVE_ONLY ?
6296						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6297						  : scan_type);
6298			}
6299		}
6300
6301		if (start != channel_index) {
6302			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6303			    (channel_index - start);
6304		}
6305	}
6306}
6307
6308static int ipw_passive_dwell_time(struct ipw_priv *priv)
6309{
6310	/* staying on passive channels longer than the DTIM interval during a
6311	 * scan, while associated, causes the firmware to cancel the scan
6312	 * without notification. Hence, don't stay on passive channels longer
6313	 * than the beacon interval.
6314	 */
6315	if (priv->status & STATUS_ASSOCIATED
6316	    && priv->assoc_network->beacon_interval > 10)
6317		return priv->assoc_network->beacon_interval - 10;
6318	else
6319		return 120;
6320}
6321
6322static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6323{
6324	struct ipw_scan_request_ext scan;
6325	int err = 0, scan_type;
6326
6327	if (!(priv->status & STATUS_INIT) ||
6328	    (priv->status & STATUS_EXIT_PENDING))
6329		return 0;
6330
6331	mutex_lock(&priv->mutex);
6332
6333	if (direct && (priv->direct_scan_ssid_len == 0)) {
6334		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6335		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6336		goto done;
6337	}
6338
6339	if (priv->status & STATUS_SCANNING) {
6340		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6341		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6342					STATUS_SCAN_PENDING;
6343		goto done;
6344	}
6345
6346	if (!(priv->status & STATUS_SCAN_FORCED) &&
6347	    priv->status & STATUS_SCAN_ABORTING) {
6348		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6349		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6350					STATUS_SCAN_PENDING;
6351		goto done;
6352	}
6353
6354	if (priv->status & STATUS_RF_KILL_MASK) {
6355		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6356		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6357					STATUS_SCAN_PENDING;
6358		goto done;
6359	}
6360
6361	memset(&scan, 0, sizeof(scan));
6362	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6363
6364	if (type == IW_SCAN_TYPE_PASSIVE) {
6365		IPW_DEBUG_WX("use passive scanning\n");
6366		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6367		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6368			cpu_to_le16(ipw_passive_dwell_time(priv));
6369		ipw_add_scan_channels(priv, &scan, scan_type);
6370		goto send_request;
6371	}
6372
6373	/* Use active scan by default. */
6374	if (priv->config & CFG_SPEED_SCAN)
6375		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6376			cpu_to_le16(30);
6377	else
6378		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6379			cpu_to_le16(20);
6380
6381	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6382		cpu_to_le16(20);
6383
6384	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6385		cpu_to_le16(ipw_passive_dwell_time(priv));
6386	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6387
6388#ifdef CONFIG_IPW2200_MONITOR
6389	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6390		u8 channel;
6391		u8 band = 0;
6392
6393		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6394		case LIBIPW_52GHZ_BAND:
6395			band = (u8) (IPW_A_MODE << 6) | 1;
6396			channel = priv->channel;
6397			break;
6398
6399		case LIBIPW_24GHZ_BAND:
6400			band = (u8) (IPW_B_MODE << 6) | 1;
6401			channel = priv->channel;
6402			break;
6403
6404		default:
6405			band = (u8) (IPW_B_MODE << 6) | 1;
6406			channel = 9;
6407			break;
6408		}
6409
6410		scan.channels_list[0] = band;
6411		scan.channels_list[1] = channel;
6412		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6413
6414		/* NOTE:  The card will sit on this channel for this time
6415		 * period.  Scan aborts are timing sensitive and frequently
6416		 * result in firmware restarts.  As such, it is best to
6417		 * set a small dwell_time here and just keep re-issuing
6418		 * scans.  Otherwise fast channel hopping will not actually
6419		 * hop channels.
6420		 *
6421		 * TODO: Move SPEED SCAN support to all modes and bands */
6422		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6423			cpu_to_le16(2000);
6424	} else {
6425#endif				/* CONFIG_IPW2200_MONITOR */
6426		/* Honor direct scans first, otherwise if we are roaming make
6427		 * this a direct scan for the current network.  Finally,
6428		 * ensure that every other scan is a fast channel hop scan */
6429		if (direct) {
6430			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6431			                    priv->direct_scan_ssid_len);
6432			if (err) {
6433				IPW_DEBUG_HC("Attempt to send SSID command  "
6434					     "failed\n");
6435				goto done;
6436			}
6437
6438			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6439		} else if ((priv->status & STATUS_ROAMING)
6440			   || (!(priv->status & STATUS_ASSOCIATED)
6441			       && (priv->config & CFG_STATIC_ESSID)
6442			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6443			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6444			if (err) {
6445				IPW_DEBUG_HC("Attempt to send SSID command "
6446					     "failed.\n");
6447				goto done;
6448			}
6449
6450			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6451		} else
6452			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6453
6454		ipw_add_scan_channels(priv, &scan, scan_type);
6455#ifdef CONFIG_IPW2200_MONITOR
6456	}
6457#endif
6458
6459send_request:
6460	err = ipw_send_scan_request_ext(priv, &scan);
6461	if (err) {
6462		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6463		goto done;
6464	}
6465
6466	priv->status |= STATUS_SCANNING;
6467	if (direct) {
6468		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6469		priv->direct_scan_ssid_len = 0;
6470	} else
6471		priv->status &= ~STATUS_SCAN_PENDING;
6472
6473	queue_delayed_work(priv->workqueue, &priv->scan_check,
6474			   IPW_SCAN_CHECK_WATCHDOG);
6475done:
6476	mutex_unlock(&priv->mutex);
6477	return err;
6478}
6479
6480static void ipw_request_passive_scan(struct work_struct *work)
6481{
6482	struct ipw_priv *priv =
6483		container_of(work, struct ipw_priv, request_passive_scan.work);
6484	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6485}
6486
6487static void ipw_request_scan(struct work_struct *work)
6488{
6489	struct ipw_priv *priv =
6490		container_of(work, struct ipw_priv, request_scan.work);
6491	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6492}
6493
6494static void ipw_request_direct_scan(struct work_struct *work)
6495{
6496	struct ipw_priv *priv =
6497		container_of(work, struct ipw_priv, request_direct_scan.work);
6498	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6499}
6500
6501static void ipw_bg_abort_scan(struct work_struct *work)
6502{
6503	struct ipw_priv *priv =
6504		container_of(work, struct ipw_priv, abort_scan);
6505	mutex_lock(&priv->mutex);
6506	ipw_abort_scan(priv);
6507	mutex_unlock(&priv->mutex);
6508}
6509
6510static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6511{
6512	/* This is called when wpa_supplicant loads and closes the driver
6513	 * interface. */
6514	priv->ieee->wpa_enabled = value;
6515	return 0;
6516}
6517
6518static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6519{
6520	struct libipw_device *ieee = priv->ieee;
6521	struct libipw_security sec = {
6522		.flags = SEC_AUTH_MODE,
6523	};
6524	int ret = 0;
6525
6526	if (value & IW_AUTH_ALG_SHARED_KEY) {
6527		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6528		ieee->open_wep = 0;
6529	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6530		sec.auth_mode = WLAN_AUTH_OPEN;
6531		ieee->open_wep = 1;
6532	} else if (value & IW_AUTH_ALG_LEAP) {
6533		sec.auth_mode = WLAN_AUTH_LEAP;
6534		ieee->open_wep = 1;
6535	} else
6536		return -EINVAL;
6537
6538	if (ieee->set_security)
6539		ieee->set_security(ieee->dev, &sec);
6540	else
6541		ret = -EOPNOTSUPP;
6542
6543	return ret;
6544}
6545
6546static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6547				int wpa_ie_len)
6548{
6549	/* make sure WPA is enabled */
6550	ipw_wpa_enable(priv, 1);
6551}
6552
6553static int ipw_set_rsn_capa(struct ipw_priv *priv,
6554			    char *capabilities, int length)
6555{
6556	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6557
6558	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6559				capabilities);
6560}
6561
6562/*
6563 * WE-18 support
6564 */
6565
6566/* SIOCSIWGENIE */
6567static int ipw_wx_set_genie(struct net_device *dev,
6568			    struct iw_request_info *info,
6569			    union iwreq_data *wrqu, char *extra)
6570{
6571	struct ipw_priv *priv = libipw_priv(dev);
6572	struct libipw_device *ieee = priv->ieee;
6573	u8 *buf;
6574	int err = 0;
6575
6576	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6577	    (wrqu->data.length && extra == NULL))
6578		return -EINVAL;
6579
6580	if (wrqu->data.length) {
6581		buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6582		if (buf == NULL) {
6583			err = -ENOMEM;
6584			goto out;
6585		}
6586
6587		memcpy(buf, extra, wrqu->data.length);
6588		kfree(ieee->wpa_ie);
6589		ieee->wpa_ie = buf;
6590		ieee->wpa_ie_len = wrqu->data.length;
6591	} else {
6592		kfree(ieee->wpa_ie);
6593		ieee->wpa_ie = NULL;
6594		ieee->wpa_ie_len = 0;
6595	}
6596
6597	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6598      out:
6599	return err;
6600}
6601
6602/* SIOCGIWGENIE */
6603static int ipw_wx_get_genie(struct net_device *dev,
6604			    struct iw_request_info *info,
6605			    union iwreq_data *wrqu, char *extra)
6606{
6607	struct ipw_priv *priv = libipw_priv(dev);
6608	struct libipw_device *ieee = priv->ieee;
6609	int err = 0;
6610
6611	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6612		wrqu->data.length = 0;
6613		goto out;
6614	}
6615
6616	if (wrqu->data.length < ieee->wpa_ie_len) {
6617		err = -E2BIG;
6618		goto out;
6619	}
6620
6621	wrqu->data.length = ieee->wpa_ie_len;
6622	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6623
6624      out:
6625	return err;
6626}
6627
6628static int wext_cipher2level(int cipher)
6629{
6630	switch (cipher) {
6631	case IW_AUTH_CIPHER_NONE:
6632		return SEC_LEVEL_0;
6633	case IW_AUTH_CIPHER_WEP40:
6634	case IW_AUTH_CIPHER_WEP104:
6635		return SEC_LEVEL_1;
6636	case IW_AUTH_CIPHER_TKIP:
6637		return SEC_LEVEL_2;
6638	case IW_AUTH_CIPHER_CCMP:
6639		return SEC_LEVEL_3;
6640	default:
6641		return -1;
6642	}
6643}
6644
6645/* SIOCSIWAUTH */
6646static int ipw_wx_set_auth(struct net_device *dev,
6647			   struct iw_request_info *info,
6648			   union iwreq_data *wrqu, char *extra)
6649{
6650	struct ipw_priv *priv = libipw_priv(dev);
6651	struct libipw_device *ieee = priv->ieee;
6652	struct iw_param *param = &wrqu->param;
6653	struct lib80211_crypt_data *crypt;
6654	unsigned long flags;
6655	int ret = 0;
6656
6657	switch (param->flags & IW_AUTH_INDEX) {
6658	case IW_AUTH_WPA_VERSION:
6659		break;
6660	case IW_AUTH_CIPHER_PAIRWISE:
6661		ipw_set_hw_decrypt_unicast(priv,
6662					   wext_cipher2level(param->value));
6663		break;
6664	case IW_AUTH_CIPHER_GROUP:
6665		ipw_set_hw_decrypt_multicast(priv,
6666					     wext_cipher2level(param->value));
6667		break;
6668	case IW_AUTH_KEY_MGMT:
6669		/*
6670		 * ipw2200 does not use these parameters
6671		 */
6672		break;
6673
6674	case IW_AUTH_TKIP_COUNTERMEASURES:
6675		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6676		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6677			break;
6678
6679		flags = crypt->ops->get_flags(crypt->priv);
6680
6681		if (param->value)
6682			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6683		else
6684			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6685
6686		crypt->ops->set_flags(flags, crypt->priv);
6687
6688		break;
6689
6690	case IW_AUTH_DROP_UNENCRYPTED:{
6691			/* HACK:
6692			 *
6693			 * wpa_supplicant calls set_wpa_enabled when the driver
6694			 * is loaded and unloaded, regardless of if WPA is being
6695			 * used.  No other calls are made which can be used to
6696			 * determine if encryption will be used or not prior to
6697			 * association being expected.  If encryption is not being
6698			 * used, drop_unencrypted is set to false, else true -- we
6699			 * can use this to determine if the CAP_PRIVACY_ON bit should
6700			 * be set.
6701			 */
6702			struct libipw_security sec = {
6703				.flags = SEC_ENABLED,
6704				.enabled = param->value,
6705			};
6706			priv->ieee->drop_unencrypted = param->value;
6707			/* We only change SEC_LEVEL for open mode. Others
6708			 * are set by ipw_wpa_set_encryption.
6709			 */
6710			if (!param->value) {
6711				sec.flags |= SEC_LEVEL;
6712				sec.level = SEC_LEVEL_0;
6713			} else {
6714				sec.flags |= SEC_LEVEL;
6715				sec.level = SEC_LEVEL_1;
6716			}
6717			if (priv->ieee->set_security)
6718				priv->ieee->set_security(priv->ieee->dev, &sec);
6719			break;
6720		}
6721
6722	case IW_AUTH_80211_AUTH_ALG:
6723		ret = ipw_wpa_set_auth_algs(priv, param->value);
6724		break;
6725
6726	case IW_AUTH_WPA_ENABLED:
6727		ret = ipw_wpa_enable(priv, param->value);
6728		ipw_disassociate(priv);
6729		break;
6730
6731	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6732		ieee->ieee802_1x = param->value;
6733		break;
6734
6735	case IW_AUTH_PRIVACY_INVOKED:
6736		ieee->privacy_invoked = param->value;
6737		break;
6738
6739	default:
6740		return -EOPNOTSUPP;
6741	}
6742	return ret;
6743}
6744
6745/* SIOCGIWAUTH */
6746static int ipw_wx_get_auth(struct net_device *dev,
6747			   struct iw_request_info *info,
6748			   union iwreq_data *wrqu, char *extra)
6749{
6750	struct ipw_priv *priv = libipw_priv(dev);
6751	struct libipw_device *ieee = priv->ieee;
6752	struct lib80211_crypt_data *crypt;
6753	struct iw_param *param = &wrqu->param;
6754	int ret = 0;
6755
6756	switch (param->flags & IW_AUTH_INDEX) {
6757	case IW_AUTH_WPA_VERSION:
6758	case IW_AUTH_CIPHER_PAIRWISE:
6759	case IW_AUTH_CIPHER_GROUP:
6760	case IW_AUTH_KEY_MGMT:
6761		/*
6762		 * wpa_supplicant will control these internally
6763		 */
6764		ret = -EOPNOTSUPP;
6765		break;
6766
6767	case IW_AUTH_TKIP_COUNTERMEASURES:
6768		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6769		if (!crypt || !crypt->ops->get_flags)
6770			break;
6771
6772		param->value = (crypt->ops->get_flags(crypt->priv) &
6773				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6774
6775		break;
6776
6777	case IW_AUTH_DROP_UNENCRYPTED:
6778		param->value = ieee->drop_unencrypted;
6779		break;
6780
6781	case IW_AUTH_80211_AUTH_ALG:
6782		param->value = ieee->sec.auth_mode;
6783		break;
6784
6785	case IW_AUTH_WPA_ENABLED:
6786		param->value = ieee->wpa_enabled;
6787		break;
6788
6789	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6790		param->value = ieee->ieee802_1x;
6791		break;
6792
6793	case IW_AUTH_ROAMING_CONTROL:
6794	case IW_AUTH_PRIVACY_INVOKED:
6795		param->value = ieee->privacy_invoked;
6796		break;
6797
6798	default:
6799		return -EOPNOTSUPP;
6800	}
6801	return 0;
6802}
6803
6804/* SIOCSIWENCODEEXT */
6805static int ipw_wx_set_encodeext(struct net_device *dev,
6806				struct iw_request_info *info,
6807				union iwreq_data *wrqu, char *extra)
6808{
6809	struct ipw_priv *priv = libipw_priv(dev);
6810	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6811
6812	if (hwcrypto) {
6813		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6814			/* IPW HW can't build TKIP MIC,
6815			   host decryption still needed */
6816			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6817				priv->ieee->host_mc_decrypt = 1;
6818			else {
6819				priv->ieee->host_encrypt = 0;
6820				priv->ieee->host_encrypt_msdu = 1;
6821				priv->ieee->host_decrypt = 1;
6822			}
6823		} else {
6824			priv->ieee->host_encrypt = 0;
6825			priv->ieee->host_encrypt_msdu = 0;
6826			priv->ieee->host_decrypt = 0;
6827			priv->ieee->host_mc_decrypt = 0;
6828		}
6829	}
6830
6831	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6832}
6833
6834/* SIOCGIWENCODEEXT */
6835static int ipw_wx_get_encodeext(struct net_device *dev,
6836				struct iw_request_info *info,
6837				union iwreq_data *wrqu, char *extra)
6838{
6839	struct ipw_priv *priv = libipw_priv(dev);
6840	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6841}
6842
6843/* SIOCSIWMLME */
6844static int ipw_wx_set_mlme(struct net_device *dev,
6845			   struct iw_request_info *info,
6846			   union iwreq_data *wrqu, char *extra)
6847{
6848	struct ipw_priv *priv = libipw_priv(dev);
6849	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6850	__le16 reason;
6851
6852	reason = cpu_to_le16(mlme->reason_code);
6853
6854	switch (mlme->cmd) {
6855	case IW_MLME_DEAUTH:
6856		/* silently ignore */
6857		break;
6858
6859	case IW_MLME_DISASSOC:
6860		ipw_disassociate(priv);
6861		break;
6862
6863	default:
6864		return -EOPNOTSUPP;
6865	}
6866	return 0;
6867}
6868
6869#ifdef CONFIG_IPW2200_QOS
6870
6871/* QoS */
6872/*
6873* get the modulation type of the current network or
6874* the card current mode
6875*/
6876static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6877{
6878	u8 mode = 0;
6879
6880	if (priv->status & STATUS_ASSOCIATED) {
6881		unsigned long flags;
6882
6883		spin_lock_irqsave(&priv->ieee->lock, flags);
6884		mode = priv->assoc_network->mode;
6885		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6886	} else {
6887		mode = priv->ieee->mode;
6888	}
6889	IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6890	return mode;
6891}
6892
6893/*
6894* Handle management frame beacon and probe response
6895*/
6896static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6897					 int active_network,
6898					 struct libipw_network *network)
6899{
6900	u32 size = sizeof(struct libipw_qos_parameters);
6901
6902	if (network->capability & WLAN_CAPABILITY_IBSS)
6903		network->qos_data.active = network->qos_data.supported;
6904
6905	if (network->flags & NETWORK_HAS_QOS_MASK) {
6906		if (active_network &&
6907		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6908			network->qos_data.active = network->qos_data.supported;
6909
6910		if ((network->qos_data.active == 1) && (active_network == 1) &&
6911		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6912		    (network->qos_data.old_param_count !=
6913		     network->qos_data.param_count)) {
6914			network->qos_data.old_param_count =
6915			    network->qos_data.param_count;
6916			schedule_work(&priv->qos_activate);
6917			IPW_DEBUG_QOS("QoS parameters change call "
6918				      "qos_activate\n");
6919		}
6920	} else {
6921		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6922			memcpy(&network->qos_data.parameters,
6923			       &def_parameters_CCK, size);
6924		else
6925			memcpy(&network->qos_data.parameters,
6926			       &def_parameters_OFDM, size);
6927
6928		if ((network->qos_data.active == 1) && (active_network == 1)) {
6929			IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6930			schedule_work(&priv->qos_activate);
6931		}
6932
6933		network->qos_data.active = 0;
6934		network->qos_data.supported = 0;
6935	}
6936	if ((priv->status & STATUS_ASSOCIATED) &&
6937	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6938		if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6939			if (network->capability & WLAN_CAPABILITY_IBSS)
6940				if ((network->ssid_len ==
6941				     priv->assoc_network->ssid_len) &&
6942				    !memcmp(network->ssid,
6943					    priv->assoc_network->ssid,
6944					    network->ssid_len)) {
6945					queue_work(priv->workqueue,
6946						   &priv->merge_networks);
6947				}
6948	}
6949
6950	return 0;
6951}
6952
6953/*
6954* This function set up the firmware to support QoS. It sends
6955* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6956*/
6957static int ipw_qos_activate(struct ipw_priv *priv,
6958			    struct libipw_qos_data *qos_network_data)
6959{
6960	int err;
6961	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6962	struct libipw_qos_parameters *active_one = NULL;
6963	u32 size = sizeof(struct libipw_qos_parameters);
6964	u32 burst_duration;
6965	int i;
6966	u8 type;
6967
6968	type = ipw_qos_current_mode(priv);
6969
6970	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6971	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6972	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6973	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6974
6975	if (qos_network_data == NULL) {
6976		if (type == IEEE_B) {
6977			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6978			active_one = &def_parameters_CCK;
6979		} else
6980			active_one = &def_parameters_OFDM;
6981
6982		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6983		burst_duration = ipw_qos_get_burst_duration(priv);
6984		for (i = 0; i < QOS_QUEUE_NUM; i++)
6985			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6986			    cpu_to_le16(burst_duration);
6987	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6988		if (type == IEEE_B) {
6989			IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6990				      type);
6991			if (priv->qos_data.qos_enable == 0)
6992				active_one = &def_parameters_CCK;
6993			else
6994				active_one = priv->qos_data.def_qos_parm_CCK;
6995		} else {
6996			if (priv->qos_data.qos_enable == 0)
6997				active_one = &def_parameters_OFDM;
6998			else
6999				active_one = priv->qos_data.def_qos_parm_OFDM;
7000		}
7001		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7002	} else {
7003		unsigned long flags;
7004		int active;
7005
7006		spin_lock_irqsave(&priv->ieee->lock, flags);
7007		active_one = &(qos_network_data->parameters);
7008		qos_network_data->old_param_count =
7009		    qos_network_data->param_count;
7010		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7011		active = qos_network_data->supported;
7012		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7013
7014		if (active == 0) {
7015			burst_duration = ipw_qos_get_burst_duration(priv);
7016			for (i = 0; i < QOS_QUEUE_NUM; i++)
7017				qos_parameters[QOS_PARAM_SET_ACTIVE].
7018				    tx_op_limit[i] = cpu_to_le16(burst_duration);
7019		}
7020	}
7021
7022	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7023	err = ipw_send_qos_params_command(priv,
7024					  (struct libipw_qos_parameters *)
7025					  &(qos_parameters[0]));
7026	if (err)
7027		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7028
7029	return err;
7030}
7031
7032/*
7033* send IPW_CMD_WME_INFO to the firmware
7034*/
7035static int ipw_qos_set_info_element(struct ipw_priv *priv)
7036{
7037	int ret = 0;
7038	struct libipw_qos_information_element qos_info;
7039
7040	if (priv == NULL)
7041		return -1;
7042
7043	qos_info.elementID = QOS_ELEMENT_ID;
7044	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7045
7046	qos_info.version = QOS_VERSION_1;
7047	qos_info.ac_info = 0;
7048
7049	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7050	qos_info.qui_type = QOS_OUI_TYPE;
7051	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7052
7053	ret = ipw_send_qos_info_command(priv, &qos_info);
7054	if (ret != 0) {
7055		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7056	}
7057	return ret;
7058}
7059
7060/*
7061* Set the QoS parameter with the association request structure
7062*/
7063static int ipw_qos_association(struct ipw_priv *priv,
7064			       struct libipw_network *network)
7065{
7066	int err = 0;
7067	struct libipw_qos_data *qos_data = NULL;
7068	struct libipw_qos_data ibss_data = {
7069		.supported = 1,
7070		.active = 1,
7071	};
7072
7073	switch (priv->ieee->iw_mode) {
7074	case IW_MODE_ADHOC:
7075		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7076
7077		qos_data = &ibss_data;
7078		break;
7079
7080	case IW_MODE_INFRA:
7081		qos_data = &network->qos_data;
7082		break;
7083
7084	default:
7085		BUG();
7086		break;
7087	}
7088
7089	err = ipw_qos_activate(priv, qos_data);
7090	if (err) {
7091		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7092		return err;
7093	}
7094
7095	if (priv->qos_data.qos_enable && qos_data->supported) {
7096		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7097		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7098		return ipw_qos_set_info_element(priv);
7099	}
7100
7101	return 0;
7102}
7103
7104/*
7105* handling the beaconing responses. if we get different QoS setting
7106* off the network from the associated setting, adjust the QoS
7107* setting
7108*/
7109static int ipw_qos_association_resp(struct ipw_priv *priv,
7110				    struct libipw_network *network)
7111{
7112	int ret = 0;
7113	unsigned long flags;
7114	u32 size = sizeof(struct libipw_qos_parameters);
7115	int set_qos_param = 0;
7116
7117	if ((priv == NULL) || (network == NULL) ||
7118	    (priv->assoc_network == NULL))
7119		return ret;
7120
7121	if (!(priv->status & STATUS_ASSOCIATED))
7122		return ret;
7123
7124	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7125		return ret;
7126
7127	spin_lock_irqsave(&priv->ieee->lock, flags);
7128	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7129		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7130		       sizeof(struct libipw_qos_data));
7131		priv->assoc_network->qos_data.active = 1;
7132		if ((network->qos_data.old_param_count !=
7133		     network->qos_data.param_count)) {
7134			set_qos_param = 1;
7135			network->qos_data.old_param_count =
7136			    network->qos_data.param_count;
7137		}
7138
7139	} else {
7140		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7141			memcpy(&priv->assoc_network->qos_data.parameters,
7142			       &def_parameters_CCK, size);
7143		else
7144			memcpy(&priv->assoc_network->qos_data.parameters,
7145			       &def_parameters_OFDM, size);
7146		priv->assoc_network->qos_data.active = 0;
7147		priv->assoc_network->qos_data.supported = 0;
7148		set_qos_param = 1;
7149	}
7150
7151	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7152
7153	if (set_qos_param == 1)
7154		schedule_work(&priv->qos_activate);
7155
7156	return ret;
7157}
7158
7159static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7160{
7161	u32 ret = 0;
7162
7163	if ((priv == NULL))
7164		return 0;
7165
7166	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7167		ret = priv->qos_data.burst_duration_CCK;
7168	else
7169		ret = priv->qos_data.burst_duration_OFDM;
7170
7171	return ret;
7172}
7173
7174/*
7175* Initialize the setting of QoS global
7176*/
7177static void ipw_qos_init(struct ipw_priv *priv, int enable,
7178			 int burst_enable, u32 burst_duration_CCK,
7179			 u32 burst_duration_OFDM)
7180{
7181	priv->qos_data.qos_enable = enable;
7182
7183	if (priv->qos_data.qos_enable) {
7184		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7185		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7186		IPW_DEBUG_QOS("QoS is enabled\n");
7187	} else {
7188		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7189		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7190		IPW_DEBUG_QOS("QoS is not enabled\n");
7191	}
7192
7193	priv->qos_data.burst_enable = burst_enable;
7194
7195	if (burst_enable) {
7196		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7197		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7198	} else {
7199		priv->qos_data.burst_duration_CCK = 0;
7200		priv->qos_data.burst_duration_OFDM = 0;
7201	}
7202}
7203
7204/*
7205* map the packet priority to the right TX Queue
7206*/
7207static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7208{
7209	if (priority > 7 || !priv->qos_data.qos_enable)
7210		priority = 0;
7211
7212	return from_priority_to_tx_queue[priority] - 1;
7213}
7214
7215static int ipw_is_qos_active(struct net_device *dev,
7216			     struct sk_buff *skb)
7217{
7218	struct ipw_priv *priv = libipw_priv(dev);
7219	struct libipw_qos_data *qos_data = NULL;
7220	int active, supported;
7221	u8 *daddr = skb->data + ETH_ALEN;
7222	int unicast = !is_multicast_ether_addr(daddr);
7223
7224	if (!(priv->status & STATUS_ASSOCIATED))
7225		return 0;
7226
7227	qos_data = &priv->assoc_network->qos_data;
7228
7229	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7230		if (unicast == 0)
7231			qos_data->active = 0;
7232		else
7233			qos_data->active = qos_data->supported;
7234	}
7235	active = qos_data->active;
7236	supported = qos_data->supported;
7237	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7238		      "unicast %d\n",
7239		      priv->qos_data.qos_enable, active, supported, unicast);
7240	if (active && priv->qos_data.qos_enable)
7241		return 1;
7242
7243	return 0;
7244
7245}
7246/*
7247* add QoS parameter to the TX command
7248*/
7249static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7250					u16 priority,
7251					struct tfd_data *tfd)
7252{
7253	int tx_queue_id = 0;
7254
7255
7256	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7257	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7258
7259	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7260		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7261		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7262	}
7263	return 0;
7264}
7265
7266/*
7267* background support to run QoS activate functionality
7268*/
7269static void ipw_bg_qos_activate(struct work_struct *work)
7270{
7271	struct ipw_priv *priv =
7272		container_of(work, struct ipw_priv, qos_activate);
7273
7274	mutex_lock(&priv->mutex);
7275
7276	if (priv->status & STATUS_ASSOCIATED)
7277		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7278
7279	mutex_unlock(&priv->mutex);
7280}
7281
7282static int ipw_handle_probe_response(struct net_device *dev,
7283				     struct libipw_probe_response *resp,
7284				     struct libipw_network *network)
7285{
7286	struct ipw_priv *priv = libipw_priv(dev);
7287	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7288			      (network == priv->assoc_network));
7289
7290	ipw_qos_handle_probe_response(priv, active_network, network);
7291
7292	return 0;
7293}
7294
7295static int ipw_handle_beacon(struct net_device *dev,
7296			     struct libipw_beacon *resp,
7297			     struct libipw_network *network)
7298{
7299	struct ipw_priv *priv = libipw_priv(dev);
7300	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7301			      (network == priv->assoc_network));
7302
7303	ipw_qos_handle_probe_response(priv, active_network, network);
7304
7305	return 0;
7306}
7307
7308static int ipw_handle_assoc_response(struct net_device *dev,
7309				     struct libipw_assoc_response *resp,
7310				     struct libipw_network *network)
7311{
7312	struct ipw_priv *priv = libipw_priv(dev);
7313	ipw_qos_association_resp(priv, network);
7314	return 0;
7315}
7316
7317static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7318				       *qos_param)
7319{
7320	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7321				sizeof(*qos_param) * 3, qos_param);
7322}
7323
7324static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7325				     *qos_param)
7326{
7327	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7328				qos_param);
7329}
7330
7331#endif				/* CONFIG_IPW2200_QOS */
7332
7333static int ipw_associate_network(struct ipw_priv *priv,
7334				 struct libipw_network *network,
7335				 struct ipw_supported_rates *rates, int roaming)
7336{
7337	int err;
7338	DECLARE_SSID_BUF(ssid);
7339
7340	if (priv->config & CFG_FIXED_RATE)
7341		ipw_set_fixed_rate(priv, network->mode);
7342
7343	if (!(priv->config & CFG_STATIC_ESSID)) {
7344		priv->essid_len = min(network->ssid_len,
7345				      (u8) IW_ESSID_MAX_SIZE);
7346		memcpy(priv->essid, network->ssid, priv->essid_len);
7347	}
7348
7349	network->last_associate = jiffies;
7350
7351	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7352	priv->assoc_request.channel = network->channel;
7353	priv->assoc_request.auth_key = 0;
7354
7355	if ((priv->capability & CAP_PRIVACY_ON) &&
7356	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7357		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7358		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7359
7360		if (priv->ieee->sec.level == SEC_LEVEL_1)
7361			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7362
7363	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7364		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7365		priv->assoc_request.auth_type = AUTH_LEAP;
7366	else
7367		priv->assoc_request.auth_type = AUTH_OPEN;
7368
7369	if (priv->ieee->wpa_ie_len) {
7370		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7371		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7372				 priv->ieee->wpa_ie_len);
7373	}
7374
7375	/*
7376	 * It is valid for our ieee device to support multiple modes, but
7377	 * when it comes to associating to a given network we have to choose
7378	 * just one mode.
7379	 */
7380	if (network->mode & priv->ieee->mode & IEEE_A)
7381		priv->assoc_request.ieee_mode = IPW_A_MODE;
7382	else if (network->mode & priv->ieee->mode & IEEE_G)
7383		priv->assoc_request.ieee_mode = IPW_G_MODE;
7384	else if (network->mode & priv->ieee->mode & IEEE_B)
7385		priv->assoc_request.ieee_mode = IPW_B_MODE;
7386
7387	priv->assoc_request.capability = cpu_to_le16(network->capability);
7388	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7389	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7390		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7391	} else {
7392		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7393
7394		/* Clear the short preamble if we won't be supporting it */
7395		priv->assoc_request.capability &=
7396		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7397	}
7398
7399	/* Clear capability bits that aren't used in Ad Hoc */
7400	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7401		priv->assoc_request.capability &=
7402		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7403
7404	IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7405			"802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7406			roaming ? "Rea" : "A",
7407			print_ssid(ssid, priv->essid, priv->essid_len),
7408			network->channel,
7409			ipw_modes[priv->assoc_request.ieee_mode],
7410			rates->num_rates,
7411			(priv->assoc_request.preamble_length ==
7412			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7413			network->capability &
7414			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7415			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7416			priv->capability & CAP_PRIVACY_ON ?
7417			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7418			 "(open)") : "",
7419			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7420			priv->capability & CAP_PRIVACY_ON ?
7421			'1' + priv->ieee->sec.active_key : '.',
7422			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7423
7424	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7425	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7426	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7427		priv->assoc_request.assoc_type = HC_IBSS_START;
7428		priv->assoc_request.assoc_tsf_msw = 0;
7429		priv->assoc_request.assoc_tsf_lsw = 0;
7430	} else {
7431		if (unlikely(roaming))
7432			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7433		else
7434			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7435		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7436		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7437	}
7438
7439	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7440
7441	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7442		memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7443		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7444	} else {
7445		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7446		priv->assoc_request.atim_window = 0;
7447	}
7448
7449	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7450
7451	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7452	if (err) {
7453		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7454		return err;
7455	}
7456
7457	rates->ieee_mode = priv->assoc_request.ieee_mode;
7458	rates->purpose = IPW_RATE_CONNECT;
7459	ipw_send_supported_rates(priv, rates);
7460
7461	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7462		priv->sys_config.dot11g_auto_detection = 1;
7463	else
7464		priv->sys_config.dot11g_auto_detection = 0;
7465
7466	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7467		priv->sys_config.answer_broadcast_ssid_probe = 1;
7468	else
7469		priv->sys_config.answer_broadcast_ssid_probe = 0;
7470
7471	err = ipw_send_system_config(priv);
7472	if (err) {
7473		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7474		return err;
7475	}
7476
7477	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7478	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7479	if (err) {
7480		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7481		return err;
7482	}
7483
7484	/*
7485	 * If preemption is enabled, it is possible for the association
7486	 * to complete before we return from ipw_send_associate.  Therefore
7487	 * we have to be sure and update our priviate data first.
7488	 */
7489	priv->channel = network->channel;
7490	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7491	priv->status |= STATUS_ASSOCIATING;
7492	priv->status &= ~STATUS_SECURITY_UPDATED;
7493
7494	priv->assoc_network = network;
7495
7496#ifdef CONFIG_IPW2200_QOS
7497	ipw_qos_association(priv, network);
7498#endif
7499
7500	err = ipw_send_associate(priv, &priv->assoc_request);
7501	if (err) {
7502		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7503		return err;
7504	}
7505
7506	IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
7507		  print_ssid(ssid, priv->essid, priv->essid_len),
7508		  priv->bssid);
7509
7510	return 0;
7511}
7512
7513static void ipw_roam(void *data)
7514{
7515	struct ipw_priv *priv = data;
7516	struct libipw_network *network = NULL;
7517	struct ipw_network_match match = {
7518		.network = priv->assoc_network
7519	};
7520
7521	/* The roaming process is as follows:
7522	 *
7523	 * 1.  Missed beacon threshold triggers the roaming process by
7524	 *     setting the status ROAM bit and requesting a scan.
7525	 * 2.  When the scan completes, it schedules the ROAM work
7526	 * 3.  The ROAM work looks at all of the known networks for one that
7527	 *     is a better network than the currently associated.  If none
7528	 *     found, the ROAM process is over (ROAM bit cleared)
7529	 * 4.  If a better network is found, a disassociation request is
7530	 *     sent.
7531	 * 5.  When the disassociation completes, the roam work is again
7532	 *     scheduled.  The second time through, the driver is no longer
7533	 *     associated, and the newly selected network is sent an
7534	 *     association request.
7535	 * 6.  At this point ,the roaming process is complete and the ROAM
7536	 *     status bit is cleared.
7537	 */
7538
7539	/* If we are no longer associated, and the roaming bit is no longer
7540	 * set, then we are not actively roaming, so just return */
7541	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7542		return;
7543
7544	if (priv->status & STATUS_ASSOCIATED) {
7545		/* First pass through ROAM process -- look for a better
7546		 * network */
7547		unsigned long flags;
7548		u8 rssi = priv->assoc_network->stats.rssi;
7549		priv->assoc_network->stats.rssi = -128;
7550		spin_lock_irqsave(&priv->ieee->lock, flags);
7551		list_for_each_entry(network, &priv->ieee->network_list, list) {
7552			if (network != priv->assoc_network)
7553				ipw_best_network(priv, &match, network, 1);
7554		}
7555		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7556		priv->assoc_network->stats.rssi = rssi;
7557
7558		if (match.network == priv->assoc_network) {
7559			IPW_DEBUG_ASSOC("No better APs in this network to "
7560					"roam to.\n");
7561			priv->status &= ~STATUS_ROAMING;
7562			ipw_debug_config(priv);
7563			return;
7564		}
7565
7566		ipw_send_disassociate(priv, 1);
7567		priv->assoc_network = match.network;
7568
7569		return;
7570	}
7571
7572	/* Second pass through ROAM process -- request association */
7573	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7574	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7575	priv->status &= ~STATUS_ROAMING;
7576}
7577
7578static void ipw_bg_roam(struct work_struct *work)
7579{
7580	struct ipw_priv *priv =
7581		container_of(work, struct ipw_priv, roam);
7582	mutex_lock(&priv->mutex);
7583	ipw_roam(priv);
7584	mutex_unlock(&priv->mutex);
7585}
7586
7587static int ipw_associate(void *data)
7588{
7589	struct ipw_priv *priv = data;
7590
7591	struct libipw_network *network = NULL;
7592	struct ipw_network_match match = {
7593		.network = NULL
7594	};
7595	struct ipw_supported_rates *rates;
7596	struct list_head *element;
7597	unsigned long flags;
7598	DECLARE_SSID_BUF(ssid);
7599
7600	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7601		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7602		return 0;
7603	}
7604
7605	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7606		IPW_DEBUG_ASSOC("Not attempting association (already in "
7607				"progress)\n");
7608		return 0;
7609	}
7610
7611	if (priv->status & STATUS_DISASSOCIATING) {
7612		IPW_DEBUG_ASSOC("Not attempting association (in "
7613				"disassociating)\n ");
7614		queue_work(priv->workqueue, &priv->associate);
7615		return 0;
7616	}
7617
7618	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7619		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7620				"initialized)\n");
7621		return 0;
7622	}
7623
7624	if (!(priv->config & CFG_ASSOCIATE) &&
7625	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7626		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7627		return 0;
7628	}
7629
7630	/* Protect our use of the network_list */
7631	spin_lock_irqsave(&priv->ieee->lock, flags);
7632	list_for_each_entry(network, &priv->ieee->network_list, list)
7633	    ipw_best_network(priv, &match, network, 0);
7634
7635	network = match.network;
7636	rates = &match.rates;
7637
7638	if (network == NULL &&
7639	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7640	    priv->config & CFG_ADHOC_CREATE &&
7641	    priv->config & CFG_STATIC_ESSID &&
7642	    priv->config & CFG_STATIC_CHANNEL) {
7643		/* Use oldest network if the free list is empty */
7644		if (list_empty(&priv->ieee->network_free_list)) {
7645			struct libipw_network *oldest = NULL;
7646			struct libipw_network *target;
7647
7648			list_for_each_entry(target, &priv->ieee->network_list, list) {
7649				if ((oldest == NULL) ||
7650				    (target->last_scanned < oldest->last_scanned))
7651					oldest = target;
7652			}
7653
7654			/* If there are no more slots, expire the oldest */
7655			list_del(&oldest->list);
7656			target = oldest;
7657			IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7658					"network list.\n",
7659					print_ssid(ssid, target->ssid,
7660						   target->ssid_len),
7661					target->bssid);
7662			list_add_tail(&target->list,
7663				      &priv->ieee->network_free_list);
7664		}
7665
7666		element = priv->ieee->network_free_list.next;
7667		network = list_entry(element, struct libipw_network, list);
7668		ipw_adhoc_create(priv, network);
7669		rates = &priv->rates;
7670		list_del(element);
7671		list_add_tail(&network->list, &priv->ieee->network_list);
7672	}
7673	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7674
7675	/* If we reached the end of the list, then we don't have any valid
7676	 * matching APs */
7677	if (!network) {
7678		ipw_debug_config(priv);
7679
7680		if (!(priv->status & STATUS_SCANNING)) {
7681			if (!(priv->config & CFG_SPEED_SCAN))
7682				queue_delayed_work(priv->workqueue,
7683						   &priv->request_scan,
7684						   SCAN_INTERVAL);
7685			else
7686				queue_delayed_work(priv->workqueue,
7687						   &priv->request_scan, 0);
7688		}
7689
7690		return 0;
7691	}
7692
7693	ipw_associate_network(priv, network, rates, 0);
7694
7695	return 1;
7696}
7697
7698static void ipw_bg_associate(struct work_struct *work)
7699{
7700	struct ipw_priv *priv =
7701		container_of(work, struct ipw_priv, associate);
7702	mutex_lock(&priv->mutex);
7703	ipw_associate(priv);
7704	mutex_unlock(&priv->mutex);
7705}
7706
7707static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7708				      struct sk_buff *skb)
7709{
7710	struct ieee80211_hdr *hdr;
7711	u16 fc;
7712
7713	hdr = (struct ieee80211_hdr *)skb->data;
7714	fc = le16_to_cpu(hdr->frame_control);
7715	if (!(fc & IEEE80211_FCTL_PROTECTED))
7716		return;
7717
7718	fc &= ~IEEE80211_FCTL_PROTECTED;
7719	hdr->frame_control = cpu_to_le16(fc);
7720	switch (priv->ieee->sec.level) {
7721	case SEC_LEVEL_3:
7722		/* Remove CCMP HDR */
7723		memmove(skb->data + LIBIPW_3ADDR_LEN,
7724			skb->data + LIBIPW_3ADDR_LEN + 8,
7725			skb->len - LIBIPW_3ADDR_LEN - 8);
7726		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7727		break;
7728	case SEC_LEVEL_2:
7729		break;
7730	case SEC_LEVEL_1:
7731		/* Remove IV */
7732		memmove(skb->data + LIBIPW_3ADDR_LEN,
7733			skb->data + LIBIPW_3ADDR_LEN + 4,
7734			skb->len - LIBIPW_3ADDR_LEN - 4);
7735		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7736		break;
7737	case SEC_LEVEL_0:
7738		break;
7739	default:
7740		printk(KERN_ERR "Unknow security level %d\n",
7741		       priv->ieee->sec.level);
7742		break;
7743	}
7744}
7745
7746static void ipw_handle_data_packet(struct ipw_priv *priv,
7747				   struct ipw_rx_mem_buffer *rxb,
7748				   struct libipw_rx_stats *stats)
7749{
7750	struct net_device *dev = priv->net_dev;
7751	struct libipw_hdr_4addr *hdr;
7752	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7753
7754	/* We received data from the HW, so stop the watchdog */
7755	dev->trans_start = jiffies;
7756
7757	/* We only process data packets if the
7758	 * interface is open */
7759	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7760		     skb_tailroom(rxb->skb))) {
7761		dev->stats.rx_errors++;
7762		priv->wstats.discard.misc++;
7763		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7764		return;
7765	} else if (unlikely(!netif_running(priv->net_dev))) {
7766		dev->stats.rx_dropped++;
7767		priv->wstats.discard.misc++;
7768		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7769		return;
7770	}
7771
7772	/* Advance skb->data to the start of the actual payload */
7773	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7774
7775	/* Set the size of the skb to the size of the frame */
7776	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7777
7778	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7779
7780	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7781	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7782	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7783	    (is_multicast_ether_addr(hdr->addr1) ?
7784	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7785		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7786
7787	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7788		dev->stats.rx_errors++;
7789	else {			/* libipw_rx succeeded, so it now owns the SKB */
7790		rxb->skb = NULL;
7791		__ipw_led_activity_on(priv);
7792	}
7793}
7794
7795#ifdef CONFIG_IPW2200_RADIOTAP
7796static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7797					   struct ipw_rx_mem_buffer *rxb,
7798					   struct libipw_rx_stats *stats)
7799{
7800	struct net_device *dev = priv->net_dev;
7801	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7802	struct ipw_rx_frame *frame = &pkt->u.frame;
7803
7804	/* initial pull of some data */
7805	u16 received_channel = frame->received_channel;
7806	u8 antennaAndPhy = frame->antennaAndPhy;
7807	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7808	u16 pktrate = frame->rate;
7809
7810	/* Magic struct that slots into the radiotap header -- no reason
7811	 * to build this manually element by element, we can write it much
7812	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7813	struct ipw_rt_hdr *ipw_rt;
7814
7815	short len = le16_to_cpu(pkt->u.frame.length);
7816
7817	/* We received data from the HW, so stop the watchdog */
7818	dev->trans_start = jiffies;
7819
7820	/* We only process data packets if the
7821	 * interface is open */
7822	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7823		     skb_tailroom(rxb->skb))) {
7824		dev->stats.rx_errors++;
7825		priv->wstats.discard.misc++;
7826		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7827		return;
7828	} else if (unlikely(!netif_running(priv->net_dev))) {
7829		dev->stats.rx_dropped++;
7830		priv->wstats.discard.misc++;
7831		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7832		return;
7833	}
7834
7835	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7836	 * that now */
7837	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7838		/* FIXME: Should alloc bigger skb instead */
7839		dev->stats.rx_dropped++;
7840		priv->wstats.discard.misc++;
7841		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7842		return;
7843	}
7844
7845	/* copy the frame itself */
7846	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7847		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7848
7849	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7850
7851	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7852	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7853	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7854
7855	/* Big bitfield of all the fields we provide in radiotap */
7856	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7857	     (1 << IEEE80211_RADIOTAP_TSFT) |
7858	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7859	     (1 << IEEE80211_RADIOTAP_RATE) |
7860	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7861	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7862	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7863	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7864
7865	/* Zero the flags, we'll add to them as we go */
7866	ipw_rt->rt_flags = 0;
7867	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7868			       frame->parent_tsf[2] << 16 |
7869			       frame->parent_tsf[1] << 8  |
7870			       frame->parent_tsf[0]);
7871
7872	/* Convert signal to DBM */
7873	ipw_rt->rt_dbmsignal = antsignal;
7874	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7875
7876	/* Convert the channel data and set the flags */
7877	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7878	if (received_channel > 14) {	/* 802.11a */
7879		ipw_rt->rt_chbitmask =
7880		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7881	} else if (antennaAndPhy & 32) {	/* 802.11b */
7882		ipw_rt->rt_chbitmask =
7883		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7884	} else {		/* 802.11g */
7885		ipw_rt->rt_chbitmask =
7886		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7887	}
7888
7889	/* set the rate in multiples of 500k/s */
7890	switch (pktrate) {
7891	case IPW_TX_RATE_1MB:
7892		ipw_rt->rt_rate = 2;
7893		break;
7894	case IPW_TX_RATE_2MB:
7895		ipw_rt->rt_rate = 4;
7896		break;
7897	case IPW_TX_RATE_5MB:
7898		ipw_rt->rt_rate = 10;
7899		break;
7900	case IPW_TX_RATE_6MB:
7901		ipw_rt->rt_rate = 12;
7902		break;
7903	case IPW_TX_RATE_9MB:
7904		ipw_rt->rt_rate = 18;
7905		break;
7906	case IPW_TX_RATE_11MB:
7907		ipw_rt->rt_rate = 22;
7908		break;
7909	case IPW_TX_RATE_12MB:
7910		ipw_rt->rt_rate = 24;
7911		break;
7912	case IPW_TX_RATE_18MB:
7913		ipw_rt->rt_rate = 36;
7914		break;
7915	case IPW_TX_RATE_24MB:
7916		ipw_rt->rt_rate = 48;
7917		break;
7918	case IPW_TX_RATE_36MB:
7919		ipw_rt->rt_rate = 72;
7920		break;
7921	case IPW_TX_RATE_48MB:
7922		ipw_rt->rt_rate = 96;
7923		break;
7924	case IPW_TX_RATE_54MB:
7925		ipw_rt->rt_rate = 108;
7926		break;
7927	default:
7928		ipw_rt->rt_rate = 0;
7929		break;
7930	}
7931
7932	/* antenna number */
7933	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7934
7935	/* set the preamble flag if we have it */
7936	if ((antennaAndPhy & 64))
7937		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7938
7939	/* Set the size of the skb to the size of the frame */
7940	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7941
7942	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7943
7944	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7945		dev->stats.rx_errors++;
7946	else {			/* libipw_rx succeeded, so it now owns the SKB */
7947		rxb->skb = NULL;
7948		/* no LED during capture */
7949	}
7950}
7951#endif
7952
7953#ifdef CONFIG_IPW2200_PROMISCUOUS
7954#define libipw_is_probe_response(fc) \
7955   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7956    (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7957
7958#define libipw_is_management(fc) \
7959   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7960
7961#define libipw_is_control(fc) \
7962   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7963
7964#define libipw_is_data(fc) \
7965   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7966
7967#define libipw_is_assoc_request(fc) \
7968   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7969
7970#define libipw_is_reassoc_request(fc) \
7971   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7972
7973static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7974				      struct ipw_rx_mem_buffer *rxb,
7975				      struct libipw_rx_stats *stats)
7976{
7977	struct net_device *dev = priv->prom_net_dev;
7978	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7979	struct ipw_rx_frame *frame = &pkt->u.frame;
7980	struct ipw_rt_hdr *ipw_rt;
7981
7982	/* First cache any information we need before we overwrite
7983	 * the information provided in the skb from the hardware */
7984	struct ieee80211_hdr *hdr;
7985	u16 channel = frame->received_channel;
7986	u8 phy_flags = frame->antennaAndPhy;
7987	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7988	s8 noise = (s8) le16_to_cpu(frame->noise);
7989	u8 rate = frame->rate;
7990	short len = le16_to_cpu(pkt->u.frame.length);
7991	struct sk_buff *skb;
7992	int hdr_only = 0;
7993	u16 filter = priv->prom_priv->filter;
7994
7995	/* If the filter is set to not include Rx frames then return */
7996	if (filter & IPW_PROM_NO_RX)
7997		return;
7998
7999	/* We received data from the HW, so stop the watchdog */
8000	dev->trans_start = jiffies;
8001
8002	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8003		dev->stats.rx_errors++;
8004		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8005		return;
8006	}
8007
8008	/* We only process data packets if the interface is open */
8009	if (unlikely(!netif_running(dev))) {
8010		dev->stats.rx_dropped++;
8011		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8012		return;
8013	}
8014
8015	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8016	 * that now */
8017	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8018		/* FIXME: Should alloc bigger skb instead */
8019		dev->stats.rx_dropped++;
8020		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8021		return;
8022	}
8023
8024	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8025	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8026		if (filter & IPW_PROM_NO_MGMT)
8027			return;
8028		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8029			hdr_only = 1;
8030	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8031		if (filter & IPW_PROM_NO_CTL)
8032			return;
8033		if (filter & IPW_PROM_CTL_HEADER_ONLY)
8034			hdr_only = 1;
8035	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8036		if (filter & IPW_PROM_NO_DATA)
8037			return;
8038		if (filter & IPW_PROM_DATA_HEADER_ONLY)
8039			hdr_only = 1;
8040	}
8041
8042	/* Copy the SKB since this is for the promiscuous side */
8043	skb = skb_copy(rxb->skb, GFP_ATOMIC);
8044	if (skb == NULL) {
8045		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8046		return;
8047	}
8048
8049	/* copy the frame data to write after where the radiotap header goes */
8050	ipw_rt = (void *)skb->data;
8051
8052	if (hdr_only)
8053		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8054
8055	memcpy(ipw_rt->payload, hdr, len);
8056
8057	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8058	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
8059	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
8060
8061	/* Set the size of the skb to the size of the frame */
8062	skb_put(skb, sizeof(*ipw_rt) + len);
8063
8064	/* Big bitfield of all the fields we provide in radiotap */
8065	ipw_rt->rt_hdr.it_present = cpu_to_le32(
8066	     (1 << IEEE80211_RADIOTAP_TSFT) |
8067	     (1 << IEEE80211_RADIOTAP_FLAGS) |
8068	     (1 << IEEE80211_RADIOTAP_RATE) |
8069	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
8070	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8071	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8072	     (1 << IEEE80211_RADIOTAP_ANTENNA));
8073
8074	/* Zero the flags, we'll add to them as we go */
8075	ipw_rt->rt_flags = 0;
8076	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8077			       frame->parent_tsf[2] << 16 |
8078			       frame->parent_tsf[1] << 8  |
8079			       frame->parent_tsf[0]);
8080
8081	/* Convert to DBM */
8082	ipw_rt->rt_dbmsignal = signal;
8083	ipw_rt->rt_dbmnoise = noise;
8084
8085	/* Convert the channel data and set the flags */
8086	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8087	if (channel > 14) {	/* 802.11a */
8088		ipw_rt->rt_chbitmask =
8089		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8090	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
8091		ipw_rt->rt_chbitmask =
8092		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8093	} else {		/* 802.11g */
8094		ipw_rt->rt_chbitmask =
8095		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8096	}
8097
8098	/* set the rate in multiples of 500k/s */
8099	switch (rate) {
8100	case IPW_TX_RATE_1MB:
8101		ipw_rt->rt_rate = 2;
8102		break;
8103	case IPW_TX_RATE_2MB:
8104		ipw_rt->rt_rate = 4;
8105		break;
8106	case IPW_TX_RATE_5MB:
8107		ipw_rt->rt_rate = 10;
8108		break;
8109	case IPW_TX_RATE_6MB:
8110		ipw_rt->rt_rate = 12;
8111		break;
8112	case IPW_TX_RATE_9MB:
8113		ipw_rt->rt_rate = 18;
8114		break;
8115	case IPW_TX_RATE_11MB:
8116		ipw_rt->rt_rate = 22;
8117		break;
8118	case IPW_TX_RATE_12MB:
8119		ipw_rt->rt_rate = 24;
8120		break;
8121	case IPW_TX_RATE_18MB:
8122		ipw_rt->rt_rate = 36;
8123		break;
8124	case IPW_TX_RATE_24MB:
8125		ipw_rt->rt_rate = 48;
8126		break;
8127	case IPW_TX_RATE_36MB:
8128		ipw_rt->rt_rate = 72;
8129		break;
8130	case IPW_TX_RATE_48MB:
8131		ipw_rt->rt_rate = 96;
8132		break;
8133	case IPW_TX_RATE_54MB:
8134		ipw_rt->rt_rate = 108;
8135		break;
8136	default:
8137		ipw_rt->rt_rate = 0;
8138		break;
8139	}
8140
8141	/* antenna number */
8142	ipw_rt->rt_antenna = (phy_flags & 3);
8143
8144	/* set the preamble flag if we have it */
8145	if (phy_flags & (1 << 6))
8146		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8147
8148	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8149
8150	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8151		dev->stats.rx_errors++;
8152		dev_kfree_skb_any(skb);
8153	}
8154}
8155#endif
8156
8157static int is_network_packet(struct ipw_priv *priv,
8158				    struct libipw_hdr_4addr *header)
8159{
8160	/* Filter incoming packets to determine if they are targetted toward
8161	 * this network, discarding packets coming from ourselves */
8162	switch (priv->ieee->iw_mode) {
8163	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8164		/* packets from our adapter are dropped (echo) */
8165		if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8166			return 0;
8167
8168		/* {broad,multi}cast packets to our BSSID go through */
8169		if (is_multicast_ether_addr(header->addr1))
8170			return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8171
8172		/* packets to our adapter go through */
8173		return !memcmp(header->addr1, priv->net_dev->dev_addr,
8174			       ETH_ALEN);
8175
8176	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8177		/* packets from our adapter are dropped (echo) */
8178		if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8179			return 0;
8180
8181		/* {broad,multi}cast packets to our BSS go through */
8182		if (is_multicast_ether_addr(header->addr1))
8183			return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8184
8185		/* packets to our adapter go through */
8186		return !memcmp(header->addr1, priv->net_dev->dev_addr,
8187			       ETH_ALEN);
8188	}
8189
8190	return 1;
8191}
8192
8193#define IPW_PACKET_RETRY_TIME HZ
8194
8195static  int is_duplicate_packet(struct ipw_priv *priv,
8196				      struct libipw_hdr_4addr *header)
8197{
8198	u16 sc = le16_to_cpu(header->seq_ctl);
8199	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8200	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8201	u16 *last_seq, *last_frag;
8202	unsigned long *last_time;
8203
8204	switch (priv->ieee->iw_mode) {
8205	case IW_MODE_ADHOC:
8206		{
8207			struct list_head *p;
8208			struct ipw_ibss_seq *entry = NULL;
8209			u8 *mac = header->addr2;
8210			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8211
8212			__list_for_each(p, &priv->ibss_mac_hash[index]) {
8213				entry =
8214				    list_entry(p, struct ipw_ibss_seq, list);
8215				if (!memcmp(entry->mac, mac, ETH_ALEN))
8216					break;
8217			}
8218			if (p == &priv->ibss_mac_hash[index]) {
8219				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8220				if (!entry) {
8221					IPW_ERROR
8222					    ("Cannot malloc new mac entry\n");
8223					return 0;
8224				}
8225				memcpy(entry->mac, mac, ETH_ALEN);
8226				entry->seq_num = seq;
8227				entry->frag_num = frag;
8228				entry->packet_time = jiffies;
8229				list_add(&entry->list,
8230					 &priv->ibss_mac_hash[index]);
8231				return 0;
8232			}
8233			last_seq = &entry->seq_num;
8234			last_frag = &entry->frag_num;
8235			last_time = &entry->packet_time;
8236			break;
8237		}
8238	case IW_MODE_INFRA:
8239		last_seq = &priv->last_seq_num;
8240		last_frag = &priv->last_frag_num;
8241		last_time = &priv->last_packet_time;
8242		break;
8243	default:
8244		return 0;
8245	}
8246	if ((*last_seq == seq) &&
8247	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8248		if (*last_frag == frag)
8249			goto drop;
8250		if (*last_frag + 1 != frag)
8251			/* out-of-order fragment */
8252			goto drop;
8253	} else
8254		*last_seq = seq;
8255
8256	*last_frag = frag;
8257	*last_time = jiffies;
8258	return 0;
8259
8260      drop:
8261	/* Comment this line now since we observed the card receives
8262	 * duplicate packets but the FCTL_RETRY bit is not set in the
8263	 * IBSS mode with fragmentation enabled.
8264	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8265	return 1;
8266}
8267
8268static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8269				   struct ipw_rx_mem_buffer *rxb,
8270				   struct libipw_rx_stats *stats)
8271{
8272	struct sk_buff *skb = rxb->skb;
8273	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8274	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8275	    (skb->data + IPW_RX_FRAME_SIZE);
8276
8277	libipw_rx_mgt(priv->ieee, header, stats);
8278
8279	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8280	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8281	      IEEE80211_STYPE_PROBE_RESP) ||
8282	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8283	      IEEE80211_STYPE_BEACON))) {
8284		if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8285			ipw_add_station(priv, header->addr2);
8286	}
8287
8288	if (priv->config & CFG_NET_STATS) {
8289		IPW_DEBUG_HC("sending stat packet\n");
8290
8291		/* Set the size of the skb to the size of the full
8292		 * ipw header and 802.11 frame */
8293		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8294			IPW_RX_FRAME_SIZE);
8295
8296		/* Advance past the ipw packet header to the 802.11 frame */
8297		skb_pull(skb, IPW_RX_FRAME_SIZE);
8298
8299		/* Push the libipw_rx_stats before the 802.11 frame */
8300		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8301
8302		skb->dev = priv->ieee->dev;
8303
8304		/* Point raw at the libipw_stats */
8305		skb_reset_mac_header(skb);
8306
8307		skb->pkt_type = PACKET_OTHERHOST;
8308		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8309		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8310		netif_rx(skb);
8311		rxb->skb = NULL;
8312	}
8313}
8314
8315/*
8316 * Main entry function for recieving a packet with 80211 headers.  This
8317 * should be called when ever the FW has notified us that there is a new
8318 * skb in the recieve queue.
8319 */
8320static void ipw_rx(struct ipw_priv *priv)
8321{
8322	struct ipw_rx_mem_buffer *rxb;
8323	struct ipw_rx_packet *pkt;
8324	struct libipw_hdr_4addr *header;
8325	u32 r, w, i;
8326	u8 network_packet;
8327	u8 fill_rx = 0;
8328
8329	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8330	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8331	i = priv->rxq->read;
8332
8333	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8334		fill_rx = 1;
8335
8336	while (i != r) {
8337		rxb = priv->rxq->queue[i];
8338		if (unlikely(rxb == NULL)) {
8339			printk(KERN_CRIT "Queue not allocated!\n");
8340			break;
8341		}
8342		priv->rxq->queue[i] = NULL;
8343
8344		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8345					    IPW_RX_BUF_SIZE,
8346					    PCI_DMA_FROMDEVICE);
8347
8348		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8349		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8350			     pkt->header.message_type,
8351			     pkt->header.rx_seq_num, pkt->header.control_bits);
8352
8353		switch (pkt->header.message_type) {
8354		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8355				struct libipw_rx_stats stats = {
8356					.rssi = pkt->u.frame.rssi_dbm -
8357					    IPW_RSSI_TO_DBM,
8358					.signal =
8359					    pkt->u.frame.rssi_dbm -
8360					    IPW_RSSI_TO_DBM + 0x100,
8361					.noise =
8362					    le16_to_cpu(pkt->u.frame.noise),
8363					.rate = pkt->u.frame.rate,
8364					.mac_time = jiffies,
8365					.received_channel =
8366					    pkt->u.frame.received_channel,
8367					.freq =
8368					    (pkt->u.frame.
8369					     control & (1 << 0)) ?
8370					    LIBIPW_24GHZ_BAND :
8371					    LIBIPW_52GHZ_BAND,
8372					.len = le16_to_cpu(pkt->u.frame.length),
8373				};
8374
8375				if (stats.rssi != 0)
8376					stats.mask |= LIBIPW_STATMASK_RSSI;
8377				if (stats.signal != 0)
8378					stats.mask |= LIBIPW_STATMASK_SIGNAL;
8379				if (stats.noise != 0)
8380					stats.mask |= LIBIPW_STATMASK_NOISE;
8381				if (stats.rate != 0)
8382					stats.mask |= LIBIPW_STATMASK_RATE;
8383
8384				priv->rx_packets++;
8385
8386#ifdef CONFIG_IPW2200_PROMISCUOUS
8387	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8388		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8389#endif
8390
8391#ifdef CONFIG_IPW2200_MONITOR
8392				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8393#ifdef CONFIG_IPW2200_RADIOTAP
8394
8395                ipw_handle_data_packet_monitor(priv,
8396					       rxb,
8397					       &stats);
8398#else
8399		ipw_handle_data_packet(priv, rxb,
8400				       &stats);
8401#endif
8402					break;
8403				}
8404#endif
8405
8406				header =
8407				    (struct libipw_hdr_4addr *)(rxb->skb->
8408								   data +
8409								   IPW_RX_FRAME_SIZE);
8410				/* TODO: Check Ad-Hoc dest/source and make sure
8411				 * that we are actually parsing these packets
8412				 * correctly -- we should probably use the
8413				 * frame control of the packet and disregard
8414				 * the current iw_mode */
8415
8416				network_packet =
8417				    is_network_packet(priv, header);
8418				if (network_packet && priv->assoc_network) {
8419					priv->assoc_network->stats.rssi =
8420					    stats.rssi;
8421					priv->exp_avg_rssi =
8422					    exponential_average(priv->exp_avg_rssi,
8423					    stats.rssi, DEPTH_RSSI);
8424				}
8425
8426				IPW_DEBUG_RX("Frame: len=%u\n",
8427					     le16_to_cpu(pkt->u.frame.length));
8428
8429				if (le16_to_cpu(pkt->u.frame.length) <
8430				    libipw_get_hdrlen(le16_to_cpu(
8431						    header->frame_ctl))) {
8432					IPW_DEBUG_DROP
8433					    ("Received packet is too small. "
8434					     "Dropping.\n");
8435					priv->net_dev->stats.rx_errors++;
8436					priv->wstats.discard.misc++;
8437					break;
8438				}
8439
8440				switch (WLAN_FC_GET_TYPE
8441					(le16_to_cpu(header->frame_ctl))) {
8442
8443				case IEEE80211_FTYPE_MGMT:
8444					ipw_handle_mgmt_packet(priv, rxb,
8445							       &stats);
8446					break;
8447
8448				case IEEE80211_FTYPE_CTL:
8449					break;
8450
8451				case IEEE80211_FTYPE_DATA:
8452					if (unlikely(!network_packet ||
8453						     is_duplicate_packet(priv,
8454									 header)))
8455					{
8456						IPW_DEBUG_DROP("Dropping: "
8457							       "%pM, "
8458							       "%pM, "
8459							       "%pM\n",
8460							       header->addr1,
8461							       header->addr2,
8462							       header->addr3);
8463						break;
8464					}
8465
8466					ipw_handle_data_packet(priv, rxb,
8467							       &stats);
8468
8469					break;
8470				}
8471				break;
8472			}
8473
8474		case RX_HOST_NOTIFICATION_TYPE:{
8475				IPW_DEBUG_RX
8476				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8477				     pkt->u.notification.subtype,
8478				     pkt->u.notification.flags,
8479				     le16_to_cpu(pkt->u.notification.size));
8480				ipw_rx_notification(priv, &pkt->u.notification);
8481				break;
8482			}
8483
8484		default:
8485			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8486				     pkt->header.message_type);
8487			break;
8488		}
8489
8490		/* For now we just don't re-use anything.  We can tweak this
8491		 * later to try and re-use notification packets and SKBs that
8492		 * fail to Rx correctly */
8493		if (rxb->skb != NULL) {
8494			dev_kfree_skb_any(rxb->skb);
8495			rxb->skb = NULL;
8496		}
8497
8498		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8499				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8500		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8501
8502		i = (i + 1) % RX_QUEUE_SIZE;
8503
8504		/* If there are a lot of unsued frames, restock the Rx queue
8505		 * so the ucode won't assert */
8506		if (fill_rx) {
8507			priv->rxq->read = i;
8508			ipw_rx_queue_replenish(priv);
8509		}
8510	}
8511
8512	/* Backtrack one entry */
8513	priv->rxq->read = i;
8514	ipw_rx_queue_restock(priv);
8515}
8516
8517#define DEFAULT_RTS_THRESHOLD     2304U
8518#define MIN_RTS_THRESHOLD         1U
8519#define MAX_RTS_THRESHOLD         2304U
8520#define DEFAULT_BEACON_INTERVAL   100U
8521#define	DEFAULT_SHORT_RETRY_LIMIT 7U
8522#define	DEFAULT_LONG_RETRY_LIMIT  4U
8523
8524/**
8525 * ipw_sw_reset
8526 * @option: options to control different reset behaviour
8527 * 	    0 = reset everything except the 'disable' module_param
8528 * 	    1 = reset everything and print out driver info (for probe only)
8529 * 	    2 = reset everything
8530 */
8531static int ipw_sw_reset(struct ipw_priv *priv, int option)
8532{
8533	int band, modulation;
8534	int old_mode = priv->ieee->iw_mode;
8535
8536	/* Initialize module parameter values here */
8537	priv->config = 0;
8538
8539	/* We default to disabling the LED code as right now it causes
8540	 * too many systems to lock up... */
8541	if (!led_support)
8542		priv->config |= CFG_NO_LED;
8543
8544	if (associate)
8545		priv->config |= CFG_ASSOCIATE;
8546	else
8547		IPW_DEBUG_INFO("Auto associate disabled.\n");
8548
8549	if (auto_create)
8550		priv->config |= CFG_ADHOC_CREATE;
8551	else
8552		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8553
8554	priv->config &= ~CFG_STATIC_ESSID;
8555	priv->essid_len = 0;
8556	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8557
8558	if (disable && option) {
8559		priv->status |= STATUS_RF_KILL_SW;
8560		IPW_DEBUG_INFO("Radio disabled.\n");
8561	}
8562
8563	if (default_channel != 0) {
8564		priv->config |= CFG_STATIC_CHANNEL;
8565		priv->channel = default_channel;
8566		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8567		/* TODO: Validate that provided channel is in range */
8568	}
8569#ifdef CONFIG_IPW2200_QOS
8570	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8571		     burst_duration_CCK, burst_duration_OFDM);
8572#endif				/* CONFIG_IPW2200_QOS */
8573
8574	switch (network_mode) {
8575	case 1:
8576		priv->ieee->iw_mode = IW_MODE_ADHOC;
8577		priv->net_dev->type = ARPHRD_ETHER;
8578
8579		break;
8580#ifdef CONFIG_IPW2200_MONITOR
8581	case 2:
8582		priv->ieee->iw_mode = IW_MODE_MONITOR;
8583#ifdef CONFIG_IPW2200_RADIOTAP
8584		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8585#else
8586		priv->net_dev->type = ARPHRD_IEEE80211;
8587#endif
8588		break;
8589#endif
8590	default:
8591	case 0:
8592		priv->net_dev->type = ARPHRD_ETHER;
8593		priv->ieee->iw_mode = IW_MODE_INFRA;
8594		break;
8595	}
8596
8597	if (hwcrypto) {
8598		priv->ieee->host_encrypt = 0;
8599		priv->ieee->host_encrypt_msdu = 0;
8600		priv->ieee->host_decrypt = 0;
8601		priv->ieee->host_mc_decrypt = 0;
8602	}
8603	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8604
8605	/* IPW2200/2915 is abled to do hardware fragmentation. */
8606	priv->ieee->host_open_frag = 0;
8607
8608	if ((priv->pci_dev->device == 0x4223) ||
8609	    (priv->pci_dev->device == 0x4224)) {
8610		if (option == 1)
8611			printk(KERN_INFO DRV_NAME
8612			       ": Detected Intel PRO/Wireless 2915ABG Network "
8613			       "Connection\n");
8614		priv->ieee->abg_true = 1;
8615		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8616		modulation = LIBIPW_OFDM_MODULATION |
8617		    LIBIPW_CCK_MODULATION;
8618		priv->adapter = IPW_2915ABG;
8619		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8620	} else {
8621		if (option == 1)
8622			printk(KERN_INFO DRV_NAME
8623			       ": Detected Intel PRO/Wireless 2200BG Network "
8624			       "Connection\n");
8625
8626		priv->ieee->abg_true = 0;
8627		band = LIBIPW_24GHZ_BAND;
8628		modulation = LIBIPW_OFDM_MODULATION |
8629		    LIBIPW_CCK_MODULATION;
8630		priv->adapter = IPW_2200BG;
8631		priv->ieee->mode = IEEE_G | IEEE_B;
8632	}
8633
8634	priv->ieee->freq_band = band;
8635	priv->ieee->modulation = modulation;
8636
8637	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8638
8639	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8640	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8641
8642	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8643	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8644	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8645
8646	/* If power management is turned on, default to AC mode */
8647	priv->power_mode = IPW_POWER_AC;
8648	priv->tx_power = IPW_TX_POWER_DEFAULT;
8649
8650	return old_mode == priv->ieee->iw_mode;
8651}
8652
8653/*
8654 * This file defines the Wireless Extension handlers.  It does not
8655 * define any methods of hardware manipulation and relies on the
8656 * functions defined in ipw_main to provide the HW interaction.
8657 *
8658 * The exception to this is the use of the ipw_get_ordinal()
8659 * function used to poll the hardware vs. making unecessary calls.
8660 *
8661 */
8662
8663static int ipw_wx_get_name(struct net_device *dev,
8664			   struct iw_request_info *info,
8665			   union iwreq_data *wrqu, char *extra)
8666{
8667	struct ipw_priv *priv = libipw_priv(dev);
8668	mutex_lock(&priv->mutex);
8669	if (priv->status & STATUS_RF_KILL_MASK)
8670		strcpy(wrqu->name, "radio off");
8671	else if (!(priv->status & STATUS_ASSOCIATED))
8672		strcpy(wrqu->name, "unassociated");
8673	else
8674		snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8675			 ipw_modes[priv->assoc_request.ieee_mode]);
8676	IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8677	mutex_unlock(&priv->mutex);
8678	return 0;
8679}
8680
8681static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8682{
8683	if (channel == 0) {
8684		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8685		priv->config &= ~CFG_STATIC_CHANNEL;
8686		IPW_DEBUG_ASSOC("Attempting to associate with new "
8687				"parameters.\n");
8688		ipw_associate(priv);
8689		return 0;
8690	}
8691
8692	priv->config |= CFG_STATIC_CHANNEL;
8693
8694	if (priv->channel == channel) {
8695		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8696			       channel);
8697		return 0;
8698	}
8699
8700	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8701	priv->channel = channel;
8702
8703#ifdef CONFIG_IPW2200_MONITOR
8704	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8705		int i;
8706		if (priv->status & STATUS_SCANNING) {
8707			IPW_DEBUG_SCAN("Scan abort triggered due to "
8708				       "channel change.\n");
8709			ipw_abort_scan(priv);
8710		}
8711
8712		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8713			udelay(10);
8714
8715		if (priv->status & STATUS_SCANNING)
8716			IPW_DEBUG_SCAN("Still scanning...\n");
8717		else
8718			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8719				       1000 - i);
8720
8721		return 0;
8722	}
8723#endif				/* CONFIG_IPW2200_MONITOR */
8724
8725	/* Network configuration changed -- force [re]association */
8726	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8727	if (!ipw_disassociate(priv))
8728		ipw_associate(priv);
8729
8730	return 0;
8731}
8732
8733static int ipw_wx_set_freq(struct net_device *dev,
8734			   struct iw_request_info *info,
8735			   union iwreq_data *wrqu, char *extra)
8736{
8737	struct ipw_priv *priv = libipw_priv(dev);
8738	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8739	struct iw_freq *fwrq = &wrqu->freq;
8740	int ret = 0, i;
8741	u8 channel, flags;
8742	int band;
8743
8744	if (fwrq->m == 0) {
8745		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8746		mutex_lock(&priv->mutex);
8747		ret = ipw_set_channel(priv, 0);
8748		mutex_unlock(&priv->mutex);
8749		return ret;
8750	}
8751	/* if setting by freq convert to channel */
8752	if (fwrq->e == 1) {
8753		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8754		if (channel == 0)
8755			return -EINVAL;
8756	} else
8757		channel = fwrq->m;
8758
8759	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8760		return -EINVAL;
8761
8762	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8763		i = libipw_channel_to_index(priv->ieee, channel);
8764		if (i == -1)
8765			return -EINVAL;
8766
8767		flags = (band == LIBIPW_24GHZ_BAND) ?
8768		    geo->bg[i].flags : geo->a[i].flags;
8769		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8770			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8771			return -EINVAL;
8772		}
8773	}
8774
8775	IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8776	mutex_lock(&priv->mutex);
8777	ret = ipw_set_channel(priv, channel);
8778	mutex_unlock(&priv->mutex);
8779	return ret;
8780}
8781
8782static int ipw_wx_get_freq(struct net_device *dev,
8783			   struct iw_request_info *info,
8784			   union iwreq_data *wrqu, char *extra)
8785{
8786	struct ipw_priv *priv = libipw_priv(dev);
8787
8788	wrqu->freq.e = 0;
8789
8790	/* If we are associated, trying to associate, or have a statically
8791	 * configured CHANNEL then return that; otherwise return ANY */
8792	mutex_lock(&priv->mutex);
8793	if (priv->config & CFG_STATIC_CHANNEL ||
8794	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8795		int i;
8796
8797		i = libipw_channel_to_index(priv->ieee, priv->channel);
8798		BUG_ON(i == -1);
8799		wrqu->freq.e = 1;
8800
8801		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8802		case LIBIPW_52GHZ_BAND:
8803			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8804			break;
8805
8806		case LIBIPW_24GHZ_BAND:
8807			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8808			break;
8809
8810		default:
8811			BUG();
8812		}
8813	} else
8814		wrqu->freq.m = 0;
8815
8816	mutex_unlock(&priv->mutex);
8817	IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8818	return 0;
8819}
8820
8821static int ipw_wx_set_mode(struct net_device *dev,
8822			   struct iw_request_info *info,
8823			   union iwreq_data *wrqu, char *extra)
8824{
8825	struct ipw_priv *priv = libipw_priv(dev);
8826	int err = 0;
8827
8828	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8829
8830	switch (wrqu->mode) {
8831#ifdef CONFIG_IPW2200_MONITOR
8832	case IW_MODE_MONITOR:
8833#endif
8834	case IW_MODE_ADHOC:
8835	case IW_MODE_INFRA:
8836		break;
8837	case IW_MODE_AUTO:
8838		wrqu->mode = IW_MODE_INFRA;
8839		break;
8840	default:
8841		return -EINVAL;
8842	}
8843	if (wrqu->mode == priv->ieee->iw_mode)
8844		return 0;
8845
8846	mutex_lock(&priv->mutex);
8847
8848	ipw_sw_reset(priv, 0);
8849
8850#ifdef CONFIG_IPW2200_MONITOR
8851	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8852		priv->net_dev->type = ARPHRD_ETHER;
8853
8854	if (wrqu->mode == IW_MODE_MONITOR)
8855#ifdef CONFIG_IPW2200_RADIOTAP
8856		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8857#else
8858		priv->net_dev->type = ARPHRD_IEEE80211;
8859#endif
8860#endif				/* CONFIG_IPW2200_MONITOR */
8861
8862	/* Free the existing firmware and reset the fw_loaded
8863	 * flag so ipw_load() will bring in the new firmware */
8864	free_firmware();
8865
8866	priv->ieee->iw_mode = wrqu->mode;
8867
8868	queue_work(priv->workqueue, &priv->adapter_restart);
8869	mutex_unlock(&priv->mutex);
8870	return err;
8871}
8872
8873static int ipw_wx_get_mode(struct net_device *dev,
8874			   struct iw_request_info *info,
8875			   union iwreq_data *wrqu, char *extra)
8876{
8877	struct ipw_priv *priv = libipw_priv(dev);
8878	mutex_lock(&priv->mutex);
8879	wrqu->mode = priv->ieee->iw_mode;
8880	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8881	mutex_unlock(&priv->mutex);
8882	return 0;
8883}
8884
8885/* Values are in microsecond */
8886static const s32 timeout_duration[] = {
8887	350000,
8888	250000,
8889	75000,
8890	37000,
8891	25000,
8892};
8893
8894static const s32 period_duration[] = {
8895	400000,
8896	700000,
8897	1000000,
8898	1000000,
8899	1000000
8900};
8901
8902static int ipw_wx_get_range(struct net_device *dev,
8903			    struct iw_request_info *info,
8904			    union iwreq_data *wrqu, char *extra)
8905{
8906	struct ipw_priv *priv = libipw_priv(dev);
8907	struct iw_range *range = (struct iw_range *)extra;
8908	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8909	int i = 0, j;
8910
8911	wrqu->data.length = sizeof(*range);
8912	memset(range, 0, sizeof(*range));
8913
8914	/* 54Mbs == ~27 Mb/s real (802.11g) */
8915	range->throughput = 27 * 1000 * 1000;
8916
8917	range->max_qual.qual = 100;
8918	/* TODO: Find real max RSSI and stick here */
8919	range->max_qual.level = 0;
8920	range->max_qual.noise = 0;
8921	range->max_qual.updated = 7;	/* Updated all three */
8922
8923	range->avg_qual.qual = 70;
8924	/* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8925	range->avg_qual.level = 0;	/* FIXME to real average level */
8926	range->avg_qual.noise = 0;
8927	range->avg_qual.updated = 7;	/* Updated all three */
8928	mutex_lock(&priv->mutex);
8929	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8930
8931	for (i = 0; i < range->num_bitrates; i++)
8932		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8933		    500000;
8934
8935	range->max_rts = DEFAULT_RTS_THRESHOLD;
8936	range->min_frag = MIN_FRAG_THRESHOLD;
8937	range->max_frag = MAX_FRAG_THRESHOLD;
8938
8939	range->encoding_size[0] = 5;
8940	range->encoding_size[1] = 13;
8941	range->num_encoding_sizes = 2;
8942	range->max_encoding_tokens = WEP_KEYS;
8943
8944	/* Set the Wireless Extension versions */
8945	range->we_version_compiled = WIRELESS_EXT;
8946	range->we_version_source = 18;
8947
8948	i = 0;
8949	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8950		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8951			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8952			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8953				continue;
8954
8955			range->freq[i].i = geo->bg[j].channel;
8956			range->freq[i].m = geo->bg[j].freq * 100000;
8957			range->freq[i].e = 1;
8958			i++;
8959		}
8960	}
8961
8962	if (priv->ieee->mode & IEEE_A) {
8963		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8964			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8965			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8966				continue;
8967
8968			range->freq[i].i = geo->a[j].channel;
8969			range->freq[i].m = geo->a[j].freq * 100000;
8970			range->freq[i].e = 1;
8971			i++;
8972		}
8973	}
8974
8975	range->num_channels = i;
8976	range->num_frequency = i;
8977
8978	mutex_unlock(&priv->mutex);
8979
8980	/* Event capability (kernel + driver) */
8981	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8982				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8983				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8984				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8985	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8986
8987	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8988		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8989
8990	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8991
8992	IPW_DEBUG_WX("GET Range\n");
8993	return 0;
8994}
8995
8996static int ipw_wx_set_wap(struct net_device *dev,
8997			  struct iw_request_info *info,
8998			  union iwreq_data *wrqu, char *extra)
8999{
9000	struct ipw_priv *priv = libipw_priv(dev);
9001
9002	static const unsigned char any[] = {
9003		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9004	};
9005	static const unsigned char off[] = {
9006		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9007	};
9008
9009	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9010		return -EINVAL;
9011	mutex_lock(&priv->mutex);
9012	if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9013	    !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9014		/* we disable mandatory BSSID association */
9015		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9016		priv->config &= ~CFG_STATIC_BSSID;
9017		IPW_DEBUG_ASSOC("Attempting to associate with new "
9018				"parameters.\n");
9019		ipw_associate(priv);
9020		mutex_unlock(&priv->mutex);
9021		return 0;
9022	}
9023
9024	priv->config |= CFG_STATIC_BSSID;
9025	if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9026		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9027		mutex_unlock(&priv->mutex);
9028		return 0;
9029	}
9030
9031	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9032		     wrqu->ap_addr.sa_data);
9033
9034	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9035
9036	/* Network configuration changed -- force [re]association */
9037	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9038	if (!ipw_disassociate(priv))
9039		ipw_associate(priv);
9040
9041	mutex_unlock(&priv->mutex);
9042	return 0;
9043}
9044
9045static int ipw_wx_get_wap(struct net_device *dev,
9046			  struct iw_request_info *info,
9047			  union iwreq_data *wrqu, char *extra)
9048{
9049	struct ipw_priv *priv = libipw_priv(dev);
9050
9051	/* If we are associated, trying to associate, or have a statically
9052	 * configured BSSID then return that; otherwise return ANY */
9053	mutex_lock(&priv->mutex);
9054	if (priv->config & CFG_STATIC_BSSID ||
9055	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9056		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9057		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9058	} else
9059		memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9060
9061	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9062		     wrqu->ap_addr.sa_data);
9063	mutex_unlock(&priv->mutex);
9064	return 0;
9065}
9066
9067static int ipw_wx_set_essid(struct net_device *dev,
9068			    struct iw_request_info *info,
9069			    union iwreq_data *wrqu, char *extra)
9070{
9071	struct ipw_priv *priv = libipw_priv(dev);
9072        int length;
9073	DECLARE_SSID_BUF(ssid);
9074
9075        mutex_lock(&priv->mutex);
9076
9077        if (!wrqu->essid.flags)
9078        {
9079                IPW_DEBUG_WX("Setting ESSID to ANY\n");
9080                ipw_disassociate(priv);
9081                priv->config &= ~CFG_STATIC_ESSID;
9082                ipw_associate(priv);
9083                mutex_unlock(&priv->mutex);
9084                return 0;
9085        }
9086
9087	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9088
9089	priv->config |= CFG_STATIC_ESSID;
9090
9091	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9092	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9093		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9094		mutex_unlock(&priv->mutex);
9095		return 0;
9096	}
9097
9098	IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9099		     print_ssid(ssid, extra, length), length);
9100
9101	priv->essid_len = length;
9102	memcpy(priv->essid, extra, priv->essid_len);
9103
9104	/* Network configuration changed -- force [re]association */
9105	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9106	if (!ipw_disassociate(priv))
9107		ipw_associate(priv);
9108
9109	mutex_unlock(&priv->mutex);
9110	return 0;
9111}
9112
9113static int ipw_wx_get_essid(struct net_device *dev,
9114			    struct iw_request_info *info,
9115			    union iwreq_data *wrqu, char *extra)
9116{
9117	struct ipw_priv *priv = libipw_priv(dev);
9118	DECLARE_SSID_BUF(ssid);
9119
9120	/* If we are associated, trying to associate, or have a statically
9121	 * configured ESSID then return that; otherwise return ANY */
9122	mutex_lock(&priv->mutex);
9123	if (priv->config & CFG_STATIC_ESSID ||
9124	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9125		IPW_DEBUG_WX("Getting essid: '%s'\n",
9126			     print_ssid(ssid, priv->essid, priv->essid_len));
9127		memcpy(extra, priv->essid, priv->essid_len);
9128		wrqu->essid.length = priv->essid_len;
9129		wrqu->essid.flags = 1;	/* active */
9130	} else {
9131		IPW_DEBUG_WX("Getting essid: ANY\n");
9132		wrqu->essid.length = 0;
9133		wrqu->essid.flags = 0;	/* active */
9134	}
9135	mutex_unlock(&priv->mutex);
9136	return 0;
9137}
9138
9139static int ipw_wx_set_nick(struct net_device *dev,
9140			   struct iw_request_info *info,
9141			   union iwreq_data *wrqu, char *extra)
9142{
9143	struct ipw_priv *priv = libipw_priv(dev);
9144
9145	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9146	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9147		return -E2BIG;
9148	mutex_lock(&priv->mutex);
9149	wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9150	memset(priv->nick, 0, sizeof(priv->nick));
9151	memcpy(priv->nick, extra, wrqu->data.length);
9152	IPW_DEBUG_TRACE("<<\n");
9153	mutex_unlock(&priv->mutex);
9154	return 0;
9155
9156}
9157
9158static int ipw_wx_get_nick(struct net_device *dev,
9159			   struct iw_request_info *info,
9160			   union iwreq_data *wrqu, char *extra)
9161{
9162	struct ipw_priv *priv = libipw_priv(dev);
9163	IPW_DEBUG_WX("Getting nick\n");
9164	mutex_lock(&priv->mutex);
9165	wrqu->data.length = strlen(priv->nick);
9166	memcpy(extra, priv->nick, wrqu->data.length);
9167	wrqu->data.flags = 1;	/* active */
9168	mutex_unlock(&priv->mutex);
9169	return 0;
9170}
9171
9172static int ipw_wx_set_sens(struct net_device *dev,
9173			    struct iw_request_info *info,
9174			    union iwreq_data *wrqu, char *extra)
9175{
9176	struct ipw_priv *priv = libipw_priv(dev);
9177	int err = 0;
9178
9179	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9180	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9181	mutex_lock(&priv->mutex);
9182
9183	if (wrqu->sens.fixed == 0)
9184	{
9185		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9186		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9187		goto out;
9188	}
9189	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9190	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9191		err = -EINVAL;
9192		goto out;
9193	}
9194
9195	priv->roaming_threshold = wrqu->sens.value;
9196	priv->disassociate_threshold = 3*wrqu->sens.value;
9197      out:
9198	mutex_unlock(&priv->mutex);
9199	return err;
9200}
9201
9202static int ipw_wx_get_sens(struct net_device *dev,
9203			    struct iw_request_info *info,
9204			    union iwreq_data *wrqu, char *extra)
9205{
9206	struct ipw_priv *priv = libipw_priv(dev);
9207	mutex_lock(&priv->mutex);
9208	wrqu->sens.fixed = 1;
9209	wrqu->sens.value = priv->roaming_threshold;
9210	mutex_unlock(&priv->mutex);
9211
9212	IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9213		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9214
9215	return 0;
9216}
9217
9218static int ipw_wx_set_rate(struct net_device *dev,
9219			   struct iw_request_info *info,
9220			   union iwreq_data *wrqu, char *extra)
9221{
9222	/* TODO: We should use semaphores or locks for access to priv */
9223	struct ipw_priv *priv = libipw_priv(dev);
9224	u32 target_rate = wrqu->bitrate.value;
9225	u32 fixed, mask;
9226
9227	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9228	/* value = X, fixed = 1 means only rate X */
9229	/* value = X, fixed = 0 means all rates lower equal X */
9230
9231	if (target_rate == -1) {
9232		fixed = 0;
9233		mask = LIBIPW_DEFAULT_RATES_MASK;
9234		/* Now we should reassociate */
9235		goto apply;
9236	}
9237
9238	mask = 0;
9239	fixed = wrqu->bitrate.fixed;
9240
9241	if (target_rate == 1000000 || !fixed)
9242		mask |= LIBIPW_CCK_RATE_1MB_MASK;
9243	if (target_rate == 1000000)
9244		goto apply;
9245
9246	if (target_rate == 2000000 || !fixed)
9247		mask |= LIBIPW_CCK_RATE_2MB_MASK;
9248	if (target_rate == 2000000)
9249		goto apply;
9250
9251	if (target_rate == 5500000 || !fixed)
9252		mask |= LIBIPW_CCK_RATE_5MB_MASK;
9253	if (target_rate == 5500000)
9254		goto apply;
9255
9256	if (target_rate == 6000000 || !fixed)
9257		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9258	if (target_rate == 6000000)
9259		goto apply;
9260
9261	if (target_rate == 9000000 || !fixed)
9262		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9263	if (target_rate == 9000000)
9264		goto apply;
9265
9266	if (target_rate == 11000000 || !fixed)
9267		mask |= LIBIPW_CCK_RATE_11MB_MASK;
9268	if (target_rate == 11000000)
9269		goto apply;
9270
9271	if (target_rate == 12000000 || !fixed)
9272		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9273	if (target_rate == 12000000)
9274		goto apply;
9275
9276	if (target_rate == 18000000 || !fixed)
9277		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9278	if (target_rate == 18000000)
9279		goto apply;
9280
9281	if (target_rate == 24000000 || !fixed)
9282		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9283	if (target_rate == 24000000)
9284		goto apply;
9285
9286	if (target_rate == 36000000 || !fixed)
9287		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9288	if (target_rate == 36000000)
9289		goto apply;
9290
9291	if (target_rate == 48000000 || !fixed)
9292		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9293	if (target_rate == 48000000)
9294		goto apply;
9295
9296	if (target_rate == 54000000 || !fixed)
9297		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9298	if (target_rate == 54000000)
9299		goto apply;
9300
9301	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9302	return -EINVAL;
9303
9304      apply:
9305	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9306		     mask, fixed ? "fixed" : "sub-rates");
9307	mutex_lock(&priv->mutex);
9308	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9309		priv->config &= ~CFG_FIXED_RATE;
9310		ipw_set_fixed_rate(priv, priv->ieee->mode);
9311	} else
9312		priv->config |= CFG_FIXED_RATE;
9313
9314	if (priv->rates_mask == mask) {
9315		IPW_DEBUG_WX("Mask set to current mask.\n");
9316		mutex_unlock(&priv->mutex);
9317		return 0;
9318	}
9319
9320	priv->rates_mask = mask;
9321
9322	/* Network configuration changed -- force [re]association */
9323	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9324	if (!ipw_disassociate(priv))
9325		ipw_associate(priv);
9326
9327	mutex_unlock(&priv->mutex);
9328	return 0;
9329}
9330
9331static int ipw_wx_get_rate(struct net_device *dev,
9332			   struct iw_request_info *info,
9333			   union iwreq_data *wrqu, char *extra)
9334{
9335	struct ipw_priv *priv = libipw_priv(dev);
9336	mutex_lock(&priv->mutex);
9337	wrqu->bitrate.value = priv->last_rate;
9338	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9339	mutex_unlock(&priv->mutex);
9340	IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9341	return 0;
9342}
9343
9344static int ipw_wx_set_rts(struct net_device *dev,
9345			  struct iw_request_info *info,
9346			  union iwreq_data *wrqu, char *extra)
9347{
9348	struct ipw_priv *priv = libipw_priv(dev);
9349	mutex_lock(&priv->mutex);
9350	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9351		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9352	else {
9353		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9354		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9355			mutex_unlock(&priv->mutex);
9356			return -EINVAL;
9357		}
9358		priv->rts_threshold = wrqu->rts.value;
9359	}
9360
9361	ipw_send_rts_threshold(priv, priv->rts_threshold);
9362	mutex_unlock(&priv->mutex);
9363	IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9364	return 0;
9365}
9366
9367static int ipw_wx_get_rts(struct net_device *dev,
9368			  struct iw_request_info *info,
9369			  union iwreq_data *wrqu, char *extra)
9370{
9371	struct ipw_priv *priv = libipw_priv(dev);
9372	mutex_lock(&priv->mutex);
9373	wrqu->rts.value = priv->rts_threshold;
9374	wrqu->rts.fixed = 0;	/* no auto select */
9375	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9376	mutex_unlock(&priv->mutex);
9377	IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9378	return 0;
9379}
9380
9381static int ipw_wx_set_txpow(struct net_device *dev,
9382			    struct iw_request_info *info,
9383			    union iwreq_data *wrqu, char *extra)
9384{
9385	struct ipw_priv *priv = libipw_priv(dev);
9386	int err = 0;
9387
9388	mutex_lock(&priv->mutex);
9389	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9390		err = -EINPROGRESS;
9391		goto out;
9392	}
9393
9394	if (!wrqu->power.fixed)
9395		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9396
9397	if (wrqu->power.flags != IW_TXPOW_DBM) {
9398		err = -EINVAL;
9399		goto out;
9400	}
9401
9402	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9403	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9404		err = -EINVAL;
9405		goto out;
9406	}
9407
9408	priv->tx_power = wrqu->power.value;
9409	err = ipw_set_tx_power(priv);
9410      out:
9411	mutex_unlock(&priv->mutex);
9412	return err;
9413}
9414
9415static int ipw_wx_get_txpow(struct net_device *dev,
9416			    struct iw_request_info *info,
9417			    union iwreq_data *wrqu, char *extra)
9418{
9419	struct ipw_priv *priv = libipw_priv(dev);
9420	mutex_lock(&priv->mutex);
9421	wrqu->power.value = priv->tx_power;
9422	wrqu->power.fixed = 1;
9423	wrqu->power.flags = IW_TXPOW_DBM;
9424	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9425	mutex_unlock(&priv->mutex);
9426
9427	IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9428		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9429
9430	return 0;
9431}
9432
9433static int ipw_wx_set_frag(struct net_device *dev,
9434			   struct iw_request_info *info,
9435			   union iwreq_data *wrqu, char *extra)
9436{
9437	struct ipw_priv *priv = libipw_priv(dev);
9438	mutex_lock(&priv->mutex);
9439	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9440		priv->ieee->fts = DEFAULT_FTS;
9441	else {
9442		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9443		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9444			mutex_unlock(&priv->mutex);
9445			return -EINVAL;
9446		}
9447
9448		priv->ieee->fts = wrqu->frag.value & ~0x1;
9449	}
9450
9451	ipw_send_frag_threshold(priv, wrqu->frag.value);
9452	mutex_unlock(&priv->mutex);
9453	IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9454	return 0;
9455}
9456
9457static int ipw_wx_get_frag(struct net_device *dev,
9458			   struct iw_request_info *info,
9459			   union iwreq_data *wrqu, char *extra)
9460{
9461	struct ipw_priv *priv = libipw_priv(dev);
9462	mutex_lock(&priv->mutex);
9463	wrqu->frag.value = priv->ieee->fts;
9464	wrqu->frag.fixed = 0;	/* no auto select */
9465	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9466	mutex_unlock(&priv->mutex);
9467	IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9468
9469	return 0;
9470}
9471
9472static int ipw_wx_set_retry(struct net_device *dev,
9473			    struct iw_request_info *info,
9474			    union iwreq_data *wrqu, char *extra)
9475{
9476	struct ipw_priv *priv = libipw_priv(dev);
9477
9478	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9479		return -EINVAL;
9480
9481	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9482		return 0;
9483
9484	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9485		return -EINVAL;
9486
9487	mutex_lock(&priv->mutex);
9488	if (wrqu->retry.flags & IW_RETRY_SHORT)
9489		priv->short_retry_limit = (u8) wrqu->retry.value;
9490	else if (wrqu->retry.flags & IW_RETRY_LONG)
9491		priv->long_retry_limit = (u8) wrqu->retry.value;
9492	else {
9493		priv->short_retry_limit = (u8) wrqu->retry.value;
9494		priv->long_retry_limit = (u8) wrqu->retry.value;
9495	}
9496
9497	ipw_send_retry_limit(priv, priv->short_retry_limit,
9498			     priv->long_retry_limit);
9499	mutex_unlock(&priv->mutex);
9500	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9501		     priv->short_retry_limit, priv->long_retry_limit);
9502	return 0;
9503}
9504
9505static int ipw_wx_get_retry(struct net_device *dev,
9506			    struct iw_request_info *info,
9507			    union iwreq_data *wrqu, char *extra)
9508{
9509	struct ipw_priv *priv = libipw_priv(dev);
9510
9511	mutex_lock(&priv->mutex);
9512	wrqu->retry.disabled = 0;
9513
9514	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9515		mutex_unlock(&priv->mutex);
9516		return -EINVAL;
9517	}
9518
9519	if (wrqu->retry.flags & IW_RETRY_LONG) {
9520		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9521		wrqu->retry.value = priv->long_retry_limit;
9522	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9523		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9524		wrqu->retry.value = priv->short_retry_limit;
9525	} else {
9526		wrqu->retry.flags = IW_RETRY_LIMIT;
9527		wrqu->retry.value = priv->short_retry_limit;
9528	}
9529	mutex_unlock(&priv->mutex);
9530
9531	IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9532
9533	return 0;
9534}
9535
9536static int ipw_wx_set_scan(struct net_device *dev,
9537			   struct iw_request_info *info,
9538			   union iwreq_data *wrqu, char *extra)
9539{
9540	struct ipw_priv *priv = libipw_priv(dev);
9541	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9542	struct delayed_work *work = NULL;
9543
9544	mutex_lock(&priv->mutex);
9545
9546	priv->user_requested_scan = 1;
9547
9548	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9549		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9550			int len = min((int)req->essid_len,
9551			              (int)sizeof(priv->direct_scan_ssid));
9552			memcpy(priv->direct_scan_ssid, req->essid, len);
9553			priv->direct_scan_ssid_len = len;
9554			work = &priv->request_direct_scan;
9555		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9556			work = &priv->request_passive_scan;
9557		}
9558	} else {
9559		/* Normal active broadcast scan */
9560		work = &priv->request_scan;
9561	}
9562
9563	mutex_unlock(&priv->mutex);
9564
9565	IPW_DEBUG_WX("Start scan\n");
9566
9567	queue_delayed_work(priv->workqueue, work, 0);
9568
9569	return 0;
9570}
9571
9572static int ipw_wx_get_scan(struct net_device *dev,
9573			   struct iw_request_info *info,
9574			   union iwreq_data *wrqu, char *extra)
9575{
9576	struct ipw_priv *priv = libipw_priv(dev);
9577	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9578}
9579
9580static int ipw_wx_set_encode(struct net_device *dev,
9581			     struct iw_request_info *info,
9582			     union iwreq_data *wrqu, char *key)
9583{
9584	struct ipw_priv *priv = libipw_priv(dev);
9585	int ret;
9586	u32 cap = priv->capability;
9587
9588	mutex_lock(&priv->mutex);
9589	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9590
9591	/* In IBSS mode, we need to notify the firmware to update
9592	 * the beacon info after we changed the capability. */
9593	if (cap != priv->capability &&
9594	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9595	    priv->status & STATUS_ASSOCIATED)
9596		ipw_disassociate(priv);
9597
9598	mutex_unlock(&priv->mutex);
9599	return ret;
9600}
9601
9602static int ipw_wx_get_encode(struct net_device *dev,
9603			     struct iw_request_info *info,
9604			     union iwreq_data *wrqu, char *key)
9605{
9606	struct ipw_priv *priv = libipw_priv(dev);
9607	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9608}
9609
9610static int ipw_wx_set_power(struct net_device *dev,
9611			    struct iw_request_info *info,
9612			    union iwreq_data *wrqu, char *extra)
9613{
9614	struct ipw_priv *priv = libipw_priv(dev);
9615	int err;
9616	mutex_lock(&priv->mutex);
9617	if (wrqu->power.disabled) {
9618		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9619		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9620		if (err) {
9621			IPW_DEBUG_WX("failed setting power mode.\n");
9622			mutex_unlock(&priv->mutex);
9623			return err;
9624		}
9625		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9626		mutex_unlock(&priv->mutex);
9627		return 0;
9628	}
9629
9630	switch (wrqu->power.flags & IW_POWER_MODE) {
9631	case IW_POWER_ON:	/* If not specified */
9632	case IW_POWER_MODE:	/* If set all mask */
9633	case IW_POWER_ALL_R:	/* If explicitly state all */
9634		break;
9635	default:		/* Otherwise we don't support it */
9636		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9637			     wrqu->power.flags);
9638		mutex_unlock(&priv->mutex);
9639		return -EOPNOTSUPP;
9640	}
9641
9642	/* If the user hasn't specified a power management mode yet, default
9643	 * to BATTERY */
9644	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9645		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9646	else
9647		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9648
9649	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9650	if (err) {
9651		IPW_DEBUG_WX("failed setting power mode.\n");
9652		mutex_unlock(&priv->mutex);
9653		return err;
9654	}
9655
9656	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9657	mutex_unlock(&priv->mutex);
9658	return 0;
9659}
9660
9661static int ipw_wx_get_power(struct net_device *dev,
9662			    struct iw_request_info *info,
9663			    union iwreq_data *wrqu, char *extra)
9664{
9665	struct ipw_priv *priv = libipw_priv(dev);
9666	mutex_lock(&priv->mutex);
9667	if (!(priv->power_mode & IPW_POWER_ENABLED))
9668		wrqu->power.disabled = 1;
9669	else
9670		wrqu->power.disabled = 0;
9671
9672	mutex_unlock(&priv->mutex);
9673	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9674
9675	return 0;
9676}
9677
9678static int ipw_wx_set_powermode(struct net_device *dev,
9679				struct iw_request_info *info,
9680				union iwreq_data *wrqu, char *extra)
9681{
9682	struct ipw_priv *priv = libipw_priv(dev);
9683	int mode = *(int *)extra;
9684	int err;
9685
9686	mutex_lock(&priv->mutex);
9687	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9688		mode = IPW_POWER_AC;
9689
9690	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9691		err = ipw_send_power_mode(priv, mode);
9692		if (err) {
9693			IPW_DEBUG_WX("failed setting power mode.\n");
9694			mutex_unlock(&priv->mutex);
9695			return err;
9696		}
9697		priv->power_mode = IPW_POWER_ENABLED | mode;
9698	}
9699	mutex_unlock(&priv->mutex);
9700	return 0;
9701}
9702
9703#define MAX_WX_STRING 80
9704static int ipw_wx_get_powermode(struct net_device *dev,
9705				struct iw_request_info *info,
9706				union iwreq_data *wrqu, char *extra)
9707{
9708	struct ipw_priv *priv = libipw_priv(dev);
9709	int level = IPW_POWER_LEVEL(priv->power_mode);
9710	char *p = extra;
9711
9712	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9713
9714	switch (level) {
9715	case IPW_POWER_AC:
9716		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9717		break;
9718	case IPW_POWER_BATTERY:
9719		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9720		break;
9721	default:
9722		p += snprintf(p, MAX_WX_STRING - (p - extra),
9723			      "(Timeout %dms, Period %dms)",
9724			      timeout_duration[level - 1] / 1000,
9725			      period_duration[level - 1] / 1000);
9726	}
9727
9728	if (!(priv->power_mode & IPW_POWER_ENABLED))
9729		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9730
9731	wrqu->data.length = p - extra + 1;
9732
9733	return 0;
9734}
9735
9736static int ipw_wx_set_wireless_mode(struct net_device *dev,
9737				    struct iw_request_info *info,
9738				    union iwreq_data *wrqu, char *extra)
9739{
9740	struct ipw_priv *priv = libipw_priv(dev);
9741	int mode = *(int *)extra;
9742	u8 band = 0, modulation = 0;
9743
9744	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9745		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9746		return -EINVAL;
9747	}
9748	mutex_lock(&priv->mutex);
9749	if (priv->adapter == IPW_2915ABG) {
9750		priv->ieee->abg_true = 1;
9751		if (mode & IEEE_A) {
9752			band |= LIBIPW_52GHZ_BAND;
9753			modulation |= LIBIPW_OFDM_MODULATION;
9754		} else
9755			priv->ieee->abg_true = 0;
9756	} else {
9757		if (mode & IEEE_A) {
9758			IPW_WARNING("Attempt to set 2200BG into "
9759				    "802.11a mode\n");
9760			mutex_unlock(&priv->mutex);
9761			return -EINVAL;
9762		}
9763
9764		priv->ieee->abg_true = 0;
9765	}
9766
9767	if (mode & IEEE_B) {
9768		band |= LIBIPW_24GHZ_BAND;
9769		modulation |= LIBIPW_CCK_MODULATION;
9770	} else
9771		priv->ieee->abg_true = 0;
9772
9773	if (mode & IEEE_G) {
9774		band |= LIBIPW_24GHZ_BAND;
9775		modulation |= LIBIPW_OFDM_MODULATION;
9776	} else
9777		priv->ieee->abg_true = 0;
9778
9779	priv->ieee->mode = mode;
9780	priv->ieee->freq_band = band;
9781	priv->ieee->modulation = modulation;
9782	init_supported_rates(priv, &priv->rates);
9783
9784	/* Network configuration changed -- force [re]association */
9785	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9786	if (!ipw_disassociate(priv)) {
9787		ipw_send_supported_rates(priv, &priv->rates);
9788		ipw_associate(priv);
9789	}
9790
9791	/* Update the band LEDs */
9792	ipw_led_band_on(priv);
9793
9794	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9795		     mode & IEEE_A ? 'a' : '.',
9796		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9797	mutex_unlock(&priv->mutex);
9798	return 0;
9799}
9800
9801static int ipw_wx_get_wireless_mode(struct net_device *dev,
9802				    struct iw_request_info *info,
9803				    union iwreq_data *wrqu, char *extra)
9804{
9805	struct ipw_priv *priv = libipw_priv(dev);
9806	mutex_lock(&priv->mutex);
9807	switch (priv->ieee->mode) {
9808	case IEEE_A:
9809		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9810		break;
9811	case IEEE_B:
9812		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9813		break;
9814	case IEEE_A | IEEE_B:
9815		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9816		break;
9817	case IEEE_G:
9818		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9819		break;
9820	case IEEE_A | IEEE_G:
9821		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9822		break;
9823	case IEEE_B | IEEE_G:
9824		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9825		break;
9826	case IEEE_A | IEEE_B | IEEE_G:
9827		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9828		break;
9829	default:
9830		strncpy(extra, "unknown", MAX_WX_STRING);
9831		break;
9832	}
9833
9834	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9835
9836	wrqu->data.length = strlen(extra) + 1;
9837	mutex_unlock(&priv->mutex);
9838
9839	return 0;
9840}
9841
9842static int ipw_wx_set_preamble(struct net_device *dev,
9843			       struct iw_request_info *info,
9844			       union iwreq_data *wrqu, char *extra)
9845{
9846	struct ipw_priv *priv = libipw_priv(dev);
9847	int mode = *(int *)extra;
9848	mutex_lock(&priv->mutex);
9849	/* Switching from SHORT -> LONG requires a disassociation */
9850	if (mode == 1) {
9851		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9852			priv->config |= CFG_PREAMBLE_LONG;
9853
9854			/* Network configuration changed -- force [re]association */
9855			IPW_DEBUG_ASSOC
9856			    ("[re]association triggered due to preamble change.\n");
9857			if (!ipw_disassociate(priv))
9858				ipw_associate(priv);
9859		}
9860		goto done;
9861	}
9862
9863	if (mode == 0) {
9864		priv->config &= ~CFG_PREAMBLE_LONG;
9865		goto done;
9866	}
9867	mutex_unlock(&priv->mutex);
9868	return -EINVAL;
9869
9870      done:
9871	mutex_unlock(&priv->mutex);
9872	return 0;
9873}
9874
9875static int ipw_wx_get_preamble(struct net_device *dev,
9876			       struct iw_request_info *info,
9877			       union iwreq_data *wrqu, char *extra)
9878{
9879	struct ipw_priv *priv = libipw_priv(dev);
9880	mutex_lock(&priv->mutex);
9881	if (priv->config & CFG_PREAMBLE_LONG)
9882		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9883	else
9884		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9885	mutex_unlock(&priv->mutex);
9886	return 0;
9887}
9888
9889#ifdef CONFIG_IPW2200_MONITOR
9890static int ipw_wx_set_monitor(struct net_device *dev,
9891			      struct iw_request_info *info,
9892			      union iwreq_data *wrqu, char *extra)
9893{
9894	struct ipw_priv *priv = libipw_priv(dev);
9895	int *parms = (int *)extra;
9896	int enable = (parms[0] > 0);
9897	mutex_lock(&priv->mutex);
9898	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9899	if (enable) {
9900		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9901#ifdef CONFIG_IPW2200_RADIOTAP
9902			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9903#else
9904			priv->net_dev->type = ARPHRD_IEEE80211;
9905#endif
9906			queue_work(priv->workqueue, &priv->adapter_restart);
9907		}
9908
9909		ipw_set_channel(priv, parms[1]);
9910	} else {
9911		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9912			mutex_unlock(&priv->mutex);
9913			return 0;
9914		}
9915		priv->net_dev->type = ARPHRD_ETHER;
9916		queue_work(priv->workqueue, &priv->adapter_restart);
9917	}
9918	mutex_unlock(&priv->mutex);
9919	return 0;
9920}
9921
9922#endif				/* CONFIG_IPW2200_MONITOR */
9923
9924static int ipw_wx_reset(struct net_device *dev,
9925			struct iw_request_info *info,
9926			union iwreq_data *wrqu, char *extra)
9927{
9928	struct ipw_priv *priv = libipw_priv(dev);
9929	IPW_DEBUG_WX("RESET\n");
9930	queue_work(priv->workqueue, &priv->adapter_restart);
9931	return 0;
9932}
9933
9934static int ipw_wx_sw_reset(struct net_device *dev,
9935			   struct iw_request_info *info,
9936			   union iwreq_data *wrqu, char *extra)
9937{
9938	struct ipw_priv *priv = libipw_priv(dev);
9939	union iwreq_data wrqu_sec = {
9940		.encoding = {
9941			     .flags = IW_ENCODE_DISABLED,
9942			     },
9943	};
9944	int ret;
9945
9946	IPW_DEBUG_WX("SW_RESET\n");
9947
9948	mutex_lock(&priv->mutex);
9949
9950	ret = ipw_sw_reset(priv, 2);
9951	if (!ret) {
9952		free_firmware();
9953		ipw_adapter_restart(priv);
9954	}
9955
9956	/* The SW reset bit might have been toggled on by the 'disable'
9957	 * module parameter, so take appropriate action */
9958	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9959
9960	mutex_unlock(&priv->mutex);
9961	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9962	mutex_lock(&priv->mutex);
9963
9964	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9965		/* Configuration likely changed -- force [re]association */
9966		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9967				"reset.\n");
9968		if (!ipw_disassociate(priv))
9969			ipw_associate(priv);
9970	}
9971
9972	mutex_unlock(&priv->mutex);
9973
9974	return 0;
9975}
9976
9977/* Rebase the WE IOCTLs to zero for the handler array */
9978#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9979static iw_handler ipw_wx_handlers[] = {
9980	IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9981	IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9982	IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9983	IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9984	IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9985	IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9986	IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9987	IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9988	IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9989	IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9990	IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9991	IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9992	IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9993	IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9994	IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9995	IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9996	IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9997	IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9998	IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9999	IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
10000	IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
10001	IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
10002	IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
10003	IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
10004	IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
10005	IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
10006	IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
10007	IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
10008	IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
10009	IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
10010	IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
10011	IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
10012	IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
10013	IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
10014	IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
10015	IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
10016	IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
10017	IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
10018	IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
10019	IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
10020	IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
10021};
10022
10023enum {
10024	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10025	IPW_PRIV_GET_POWER,
10026	IPW_PRIV_SET_MODE,
10027	IPW_PRIV_GET_MODE,
10028	IPW_PRIV_SET_PREAMBLE,
10029	IPW_PRIV_GET_PREAMBLE,
10030	IPW_PRIV_RESET,
10031	IPW_PRIV_SW_RESET,
10032#ifdef CONFIG_IPW2200_MONITOR
10033	IPW_PRIV_SET_MONITOR,
10034#endif
10035};
10036
10037static struct iw_priv_args ipw_priv_args[] = {
10038	{
10039	 .cmd = IPW_PRIV_SET_POWER,
10040	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10041	 .name = "set_power"},
10042	{
10043	 .cmd = IPW_PRIV_GET_POWER,
10044	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10045	 .name = "get_power"},
10046	{
10047	 .cmd = IPW_PRIV_SET_MODE,
10048	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10049	 .name = "set_mode"},
10050	{
10051	 .cmd = IPW_PRIV_GET_MODE,
10052	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10053	 .name = "get_mode"},
10054	{
10055	 .cmd = IPW_PRIV_SET_PREAMBLE,
10056	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10057	 .name = "set_preamble"},
10058	{
10059	 .cmd = IPW_PRIV_GET_PREAMBLE,
10060	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10061	 .name = "get_preamble"},
10062	{
10063	 IPW_PRIV_RESET,
10064	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10065	{
10066	 IPW_PRIV_SW_RESET,
10067	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10068#ifdef CONFIG_IPW2200_MONITOR
10069	{
10070	 IPW_PRIV_SET_MONITOR,
10071	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10072#endif				/* CONFIG_IPW2200_MONITOR */
10073};
10074
10075static iw_handler ipw_priv_handler[] = {
10076	ipw_wx_set_powermode,
10077	ipw_wx_get_powermode,
10078	ipw_wx_set_wireless_mode,
10079	ipw_wx_get_wireless_mode,
10080	ipw_wx_set_preamble,
10081	ipw_wx_get_preamble,
10082	ipw_wx_reset,
10083	ipw_wx_sw_reset,
10084#ifdef CONFIG_IPW2200_MONITOR
10085	ipw_wx_set_monitor,
10086#endif
10087};
10088
10089static struct iw_handler_def ipw_wx_handler_def = {
10090	.standard = ipw_wx_handlers,
10091	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
10092	.num_private = ARRAY_SIZE(ipw_priv_handler),
10093	.num_private_args = ARRAY_SIZE(ipw_priv_args),
10094	.private = ipw_priv_handler,
10095	.private_args = ipw_priv_args,
10096	.get_wireless_stats = ipw_get_wireless_stats,
10097};
10098
10099/*
10100 * Get wireless statistics.
10101 * Called by /proc/net/wireless
10102 * Also called by SIOCGIWSTATS
10103 */
10104static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10105{
10106	struct ipw_priv *priv = libipw_priv(dev);
10107	struct iw_statistics *wstats;
10108
10109	wstats = &priv->wstats;
10110
10111	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10112	 * netdev->get_wireless_stats seems to be called before fw is
10113	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10114	 * and associated; if not associcated, the values are all meaningless
10115	 * anyway, so set them all to NULL and INVALID */
10116	if (!(priv->status & STATUS_ASSOCIATED)) {
10117		wstats->miss.beacon = 0;
10118		wstats->discard.retries = 0;
10119		wstats->qual.qual = 0;
10120		wstats->qual.level = 0;
10121		wstats->qual.noise = 0;
10122		wstats->qual.updated = 7;
10123		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10124		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10125		return wstats;
10126	}
10127
10128	wstats->qual.qual = priv->quality;
10129	wstats->qual.level = priv->exp_avg_rssi;
10130	wstats->qual.noise = priv->exp_avg_noise;
10131	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10132	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10133
10134	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10135	wstats->discard.retries = priv->last_tx_failures;
10136	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10137
10138/*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10139	goto fail_get_ordinal;
10140	wstats->discard.retries += tx_retry; */
10141
10142	return wstats;
10143}
10144
10145/* net device stuff */
10146
10147static  void init_sys_config(struct ipw_sys_config *sys_config)
10148{
10149	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10150	sys_config->bt_coexistence = 0;
10151	sys_config->answer_broadcast_ssid_probe = 0;
10152	sys_config->accept_all_data_frames = 0;
10153	sys_config->accept_non_directed_frames = 1;
10154	sys_config->exclude_unicast_unencrypted = 0;
10155	sys_config->disable_unicast_decryption = 1;
10156	sys_config->exclude_multicast_unencrypted = 0;
10157	sys_config->disable_multicast_decryption = 1;
10158	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10159		antenna = CFG_SYS_ANTENNA_BOTH;
10160	sys_config->antenna_diversity = antenna;
10161	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10162	sys_config->dot11g_auto_detection = 0;
10163	sys_config->enable_cts_to_self = 0;
10164	sys_config->bt_coexist_collision_thr = 0;
10165	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10166	sys_config->silence_threshold = 0x1e;
10167}
10168
10169static int ipw_net_open(struct net_device *dev)
10170{
10171	IPW_DEBUG_INFO("dev->open\n");
10172	netif_start_queue(dev);
10173	return 0;
10174}
10175
10176static int ipw_net_stop(struct net_device *dev)
10177{
10178	IPW_DEBUG_INFO("dev->close\n");
10179	netif_stop_queue(dev);
10180	return 0;
10181}
10182
10183/*
10184todo:
10185
10186modify to send one tfd per fragment instead of using chunking.  otherwise
10187we need to heavily modify the libipw_skb_to_txb.
10188*/
10189
10190static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10191			     int pri)
10192{
10193	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10194	    txb->fragments[0]->data;
10195	int i = 0;
10196	struct tfd_frame *tfd;
10197#ifdef CONFIG_IPW2200_QOS
10198	int tx_id = ipw_get_tx_queue_number(priv, pri);
10199	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10200#else
10201	struct clx2_tx_queue *txq = &priv->txq[0];
10202#endif
10203	struct clx2_queue *q = &txq->q;
10204	u8 id, hdr_len, unicast;
10205	int fc;
10206
10207	if (!(priv->status & STATUS_ASSOCIATED))
10208		goto drop;
10209
10210	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10211	switch (priv->ieee->iw_mode) {
10212	case IW_MODE_ADHOC:
10213		unicast = !is_multicast_ether_addr(hdr->addr1);
10214		id = ipw_find_station(priv, hdr->addr1);
10215		if (id == IPW_INVALID_STATION) {
10216			id = ipw_add_station(priv, hdr->addr1);
10217			if (id == IPW_INVALID_STATION) {
10218				IPW_WARNING("Attempt to send data to "
10219					    "invalid cell: %pM\n",
10220					    hdr->addr1);
10221				goto drop;
10222			}
10223		}
10224		break;
10225
10226	case IW_MODE_INFRA:
10227	default:
10228		unicast = !is_multicast_ether_addr(hdr->addr3);
10229		id = 0;
10230		break;
10231	}
10232
10233	tfd = &txq->bd[q->first_empty];
10234	txq->txb[q->first_empty] = txb;
10235	memset(tfd, 0, sizeof(*tfd));
10236	tfd->u.data.station_number = id;
10237
10238	tfd->control_flags.message_type = TX_FRAME_TYPE;
10239	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10240
10241	tfd->u.data.cmd_id = DINO_CMD_TX;
10242	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10243
10244	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10245		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10246	else
10247		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10248
10249	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10250		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10251
10252	fc = le16_to_cpu(hdr->frame_ctl);
10253	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10254
10255	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10256
10257	if (likely(unicast))
10258		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10259
10260	if (txb->encrypted && !priv->ieee->host_encrypt) {
10261		switch (priv->ieee->sec.level) {
10262		case SEC_LEVEL_3:
10263			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10264			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10265			/* XXX: ACK flag must be set for CCMP even if it
10266			 * is a multicast/broadcast packet, because CCMP
10267			 * group communication encrypted by GTK is
10268			 * actually done by the AP. */
10269			if (!unicast)
10270				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10271
10272			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10273			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10274			tfd->u.data.key_index = 0;
10275			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10276			break;
10277		case SEC_LEVEL_2:
10278			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10279			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10280			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10281			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10282			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10283			break;
10284		case SEC_LEVEL_1:
10285			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10286			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10287			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10288			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10289			    40)
10290				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10291			else
10292				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10293			break;
10294		case SEC_LEVEL_0:
10295			break;
10296		default:
10297			printk(KERN_ERR "Unknow security level %d\n",
10298			       priv->ieee->sec.level);
10299			break;
10300		}
10301	} else
10302		/* No hardware encryption */
10303		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10304
10305#ifdef CONFIG_IPW2200_QOS
10306	if (fc & IEEE80211_STYPE_QOS_DATA)
10307		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10308#endif				/* CONFIG_IPW2200_QOS */
10309
10310	/* payload */
10311	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10312						 txb->nr_frags));
10313	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10314		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10315	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10316		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10317			       i, le32_to_cpu(tfd->u.data.num_chunks),
10318			       txb->fragments[i]->len - hdr_len);
10319		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10320			     i, tfd->u.data.num_chunks,
10321			     txb->fragments[i]->len - hdr_len);
10322		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10323			   txb->fragments[i]->len - hdr_len);
10324
10325		tfd->u.data.chunk_ptr[i] =
10326		    cpu_to_le32(pci_map_single
10327				(priv->pci_dev,
10328				 txb->fragments[i]->data + hdr_len,
10329				 txb->fragments[i]->len - hdr_len,
10330				 PCI_DMA_TODEVICE));
10331		tfd->u.data.chunk_len[i] =
10332		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10333	}
10334
10335	if (i != txb->nr_frags) {
10336		struct sk_buff *skb;
10337		u16 remaining_bytes = 0;
10338		int j;
10339
10340		for (j = i; j < txb->nr_frags; j++)
10341			remaining_bytes += txb->fragments[j]->len - hdr_len;
10342
10343		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10344		       remaining_bytes);
10345		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10346		if (skb != NULL) {
10347			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10348			for (j = i; j < txb->nr_frags; j++) {
10349				int size = txb->fragments[j]->len - hdr_len;
10350
10351				printk(KERN_INFO "Adding frag %d %d...\n",
10352				       j, size);
10353				memcpy(skb_put(skb, size),
10354				       txb->fragments[j]->data + hdr_len, size);
10355			}
10356			dev_kfree_skb_any(txb->fragments[i]);
10357			txb->fragments[i] = skb;
10358			tfd->u.data.chunk_ptr[i] =
10359			    cpu_to_le32(pci_map_single
10360					(priv->pci_dev, skb->data,
10361					 remaining_bytes,
10362					 PCI_DMA_TODEVICE));
10363
10364			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10365		}
10366	}
10367
10368	/* kick DMA */
10369	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10370	ipw_write32(priv, q->reg_w, q->first_empty);
10371
10372	if (ipw_tx_queue_space(q) < q->high_mark)
10373		netif_stop_queue(priv->net_dev);
10374
10375	return NETDEV_TX_OK;
10376
10377      drop:
10378	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10379	libipw_txb_free(txb);
10380	return NETDEV_TX_OK;
10381}
10382
10383static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10384{
10385	struct ipw_priv *priv = libipw_priv(dev);
10386#ifdef CONFIG_IPW2200_QOS
10387	int tx_id = ipw_get_tx_queue_number(priv, pri);
10388	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10389#else
10390	struct clx2_tx_queue *txq = &priv->txq[0];
10391#endif				/* CONFIG_IPW2200_QOS */
10392
10393	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10394		return 1;
10395
10396	return 0;
10397}
10398
10399#ifdef CONFIG_IPW2200_PROMISCUOUS
10400static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10401				      struct libipw_txb *txb)
10402{
10403	struct libipw_rx_stats dummystats;
10404	struct ieee80211_hdr *hdr;
10405	u8 n;
10406	u16 filter = priv->prom_priv->filter;
10407	int hdr_only = 0;
10408
10409	if (filter & IPW_PROM_NO_TX)
10410		return;
10411
10412	memset(&dummystats, 0, sizeof(dummystats));
10413
10414	/* Filtering of fragment chains is done agains the first fragment */
10415	hdr = (void *)txb->fragments[0]->data;
10416	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10417		if (filter & IPW_PROM_NO_MGMT)
10418			return;
10419		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10420			hdr_only = 1;
10421	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10422		if (filter & IPW_PROM_NO_CTL)
10423			return;
10424		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10425			hdr_only = 1;
10426	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10427		if (filter & IPW_PROM_NO_DATA)
10428			return;
10429		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10430			hdr_only = 1;
10431	}
10432
10433	for(n=0; n<txb->nr_frags; ++n) {
10434		struct sk_buff *src = txb->fragments[n];
10435		struct sk_buff *dst;
10436		struct ieee80211_radiotap_header *rt_hdr;
10437		int len;
10438
10439		if (hdr_only) {
10440			hdr = (void *)src->data;
10441			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10442		} else
10443			len = src->len;
10444
10445		dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10446		if (!dst)
10447			continue;
10448
10449		rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10450
10451		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10452		rt_hdr->it_pad = 0;
10453		rt_hdr->it_present = 0; /* after all, it's just an idea */
10454		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10455
10456		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10457			ieee80211chan2mhz(priv->channel));
10458		if (priv->channel > 14) 	/* 802.11a */
10459			*(__le16*)skb_put(dst, sizeof(u16)) =
10460				cpu_to_le16(IEEE80211_CHAN_OFDM |
10461					     IEEE80211_CHAN_5GHZ);
10462		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10463			*(__le16*)skb_put(dst, sizeof(u16)) =
10464				cpu_to_le16(IEEE80211_CHAN_CCK |
10465					     IEEE80211_CHAN_2GHZ);
10466		else 		/* 802.11g */
10467			*(__le16*)skb_put(dst, sizeof(u16)) =
10468				cpu_to_le16(IEEE80211_CHAN_OFDM |
10469				 IEEE80211_CHAN_2GHZ);
10470
10471		rt_hdr->it_len = cpu_to_le16(dst->len);
10472
10473		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10474
10475		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10476			dev_kfree_skb_any(dst);
10477	}
10478}
10479#endif
10480
10481static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10482					   struct net_device *dev, int pri)
10483{
10484	struct ipw_priv *priv = libipw_priv(dev);
10485	unsigned long flags;
10486	netdev_tx_t ret;
10487
10488	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10489	spin_lock_irqsave(&priv->lock, flags);
10490
10491#ifdef CONFIG_IPW2200_PROMISCUOUS
10492	if (rtap_iface && netif_running(priv->prom_net_dev))
10493		ipw_handle_promiscuous_tx(priv, txb);
10494#endif
10495
10496	ret = ipw_tx_skb(priv, txb, pri);
10497	if (ret == NETDEV_TX_OK)
10498		__ipw_led_activity_on(priv);
10499	spin_unlock_irqrestore(&priv->lock, flags);
10500
10501	return ret;
10502}
10503
10504static void ipw_net_set_multicast_list(struct net_device *dev)
10505{
10506
10507}
10508
10509static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10510{
10511	struct ipw_priv *priv = libipw_priv(dev);
10512	struct sockaddr *addr = p;
10513
10514	if (!is_valid_ether_addr(addr->sa_data))
10515		return -EADDRNOTAVAIL;
10516	mutex_lock(&priv->mutex);
10517	priv->config |= CFG_CUSTOM_MAC;
10518	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10519	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10520	       priv->net_dev->name, priv->mac_addr);
10521	queue_work(priv->workqueue, &priv->adapter_restart);
10522	mutex_unlock(&priv->mutex);
10523	return 0;
10524}
10525
10526static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10527				    struct ethtool_drvinfo *info)
10528{
10529	struct ipw_priv *p = libipw_priv(dev);
10530	char vers[64];
10531	char date[32];
10532	u32 len;
10533
10534	strcpy(info->driver, DRV_NAME);
10535	strcpy(info->version, DRV_VERSION);
10536
10537	len = sizeof(vers);
10538	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10539	len = sizeof(date);
10540	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10541
10542	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10543		 vers, date);
10544	strcpy(info->bus_info, pci_name(p->pci_dev));
10545	info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10546}
10547
10548static u32 ipw_ethtool_get_link(struct net_device *dev)
10549{
10550	struct ipw_priv *priv = libipw_priv(dev);
10551	return (priv->status & STATUS_ASSOCIATED) != 0;
10552}
10553
10554static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10555{
10556	return IPW_EEPROM_IMAGE_SIZE;
10557}
10558
10559static int ipw_ethtool_get_eeprom(struct net_device *dev,
10560				  struct ethtool_eeprom *eeprom, u8 * bytes)
10561{
10562	struct ipw_priv *p = libipw_priv(dev);
10563
10564	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10565		return -EINVAL;
10566	mutex_lock(&p->mutex);
10567	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10568	mutex_unlock(&p->mutex);
10569	return 0;
10570}
10571
10572static int ipw_ethtool_set_eeprom(struct net_device *dev,
10573				  struct ethtool_eeprom *eeprom, u8 * bytes)
10574{
10575	struct ipw_priv *p = libipw_priv(dev);
10576	int i;
10577
10578	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10579		return -EINVAL;
10580	mutex_lock(&p->mutex);
10581	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10582	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10583		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10584	mutex_unlock(&p->mutex);
10585	return 0;
10586}
10587
10588static const struct ethtool_ops ipw_ethtool_ops = {
10589	.get_link = ipw_ethtool_get_link,
10590	.get_drvinfo = ipw_ethtool_get_drvinfo,
10591	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10592	.get_eeprom = ipw_ethtool_get_eeprom,
10593	.set_eeprom = ipw_ethtool_set_eeprom,
10594};
10595
10596static irqreturn_t ipw_isr(int irq, void *data)
10597{
10598	struct ipw_priv *priv = data;
10599	u32 inta, inta_mask;
10600
10601	if (!priv)
10602		return IRQ_NONE;
10603
10604	spin_lock(&priv->irq_lock);
10605
10606	if (!(priv->status & STATUS_INT_ENABLED)) {
10607		/* IRQ is disabled */
10608		goto none;
10609	}
10610
10611	inta = ipw_read32(priv, IPW_INTA_RW);
10612	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10613
10614	if (inta == 0xFFFFFFFF) {
10615		/* Hardware disappeared */
10616		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10617		goto none;
10618	}
10619
10620	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10621		/* Shared interrupt */
10622		goto none;
10623	}
10624
10625	/* tell the device to stop sending interrupts */
10626	__ipw_disable_interrupts(priv);
10627
10628	/* ack current interrupts */
10629	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10630	ipw_write32(priv, IPW_INTA_RW, inta);
10631
10632	/* Cache INTA value for our tasklet */
10633	priv->isr_inta = inta;
10634
10635	tasklet_schedule(&priv->irq_tasklet);
10636
10637	spin_unlock(&priv->irq_lock);
10638
10639	return IRQ_HANDLED;
10640      none:
10641	spin_unlock(&priv->irq_lock);
10642	return IRQ_NONE;
10643}
10644
10645static void ipw_rf_kill(void *adapter)
10646{
10647	struct ipw_priv *priv = adapter;
10648	unsigned long flags;
10649
10650	spin_lock_irqsave(&priv->lock, flags);
10651
10652	if (rf_kill_active(priv)) {
10653		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10654		if (priv->workqueue)
10655			queue_delayed_work(priv->workqueue,
10656					   &priv->rf_kill, 2 * HZ);
10657		goto exit_unlock;
10658	}
10659
10660	/* RF Kill is now disabled, so bring the device back up */
10661
10662	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10663		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10664				  "device\n");
10665
10666		/* we can not do an adapter restart while inside an irq lock */
10667		queue_work(priv->workqueue, &priv->adapter_restart);
10668	} else
10669		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10670				  "enabled\n");
10671
10672      exit_unlock:
10673	spin_unlock_irqrestore(&priv->lock, flags);
10674}
10675
10676static void ipw_bg_rf_kill(struct work_struct *work)
10677{
10678	struct ipw_priv *priv =
10679		container_of(work, struct ipw_priv, rf_kill.work);
10680	mutex_lock(&priv->mutex);
10681	ipw_rf_kill(priv);
10682	mutex_unlock(&priv->mutex);
10683}
10684
10685static void ipw_link_up(struct ipw_priv *priv)
10686{
10687	priv->last_seq_num = -1;
10688	priv->last_frag_num = -1;
10689	priv->last_packet_time = 0;
10690
10691	netif_carrier_on(priv->net_dev);
10692
10693	cancel_delayed_work(&priv->request_scan);
10694	cancel_delayed_work(&priv->request_direct_scan);
10695	cancel_delayed_work(&priv->request_passive_scan);
10696	cancel_delayed_work(&priv->scan_event);
10697	ipw_reset_stats(priv);
10698	/* Ensure the rate is updated immediately */
10699	priv->last_rate = ipw_get_current_rate(priv);
10700	ipw_gather_stats(priv);
10701	ipw_led_link_up(priv);
10702	notify_wx_assoc_event(priv);
10703
10704	if (priv->config & CFG_BACKGROUND_SCAN)
10705		queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10706}
10707
10708static void ipw_bg_link_up(struct work_struct *work)
10709{
10710	struct ipw_priv *priv =
10711		container_of(work, struct ipw_priv, link_up);
10712	mutex_lock(&priv->mutex);
10713	ipw_link_up(priv);
10714	mutex_unlock(&priv->mutex);
10715}
10716
10717static void ipw_link_down(struct ipw_priv *priv)
10718{
10719	ipw_led_link_down(priv);
10720	netif_carrier_off(priv->net_dev);
10721	notify_wx_assoc_event(priv);
10722
10723	/* Cancel any queued work ... */
10724	cancel_delayed_work(&priv->request_scan);
10725	cancel_delayed_work(&priv->request_direct_scan);
10726	cancel_delayed_work(&priv->request_passive_scan);
10727	cancel_delayed_work(&priv->adhoc_check);
10728	cancel_delayed_work(&priv->gather_stats);
10729
10730	ipw_reset_stats(priv);
10731
10732	if (!(priv->status & STATUS_EXIT_PENDING)) {
10733		/* Queue up another scan... */
10734		queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10735	} else
10736		cancel_delayed_work(&priv->scan_event);
10737}
10738
10739static void ipw_bg_link_down(struct work_struct *work)
10740{
10741	struct ipw_priv *priv =
10742		container_of(work, struct ipw_priv, link_down);
10743	mutex_lock(&priv->mutex);
10744	ipw_link_down(priv);
10745	mutex_unlock(&priv->mutex);
10746}
10747
10748static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10749{
10750	int ret = 0;
10751
10752	priv->workqueue = create_workqueue(DRV_NAME);
10753	init_waitqueue_head(&priv->wait_command_queue);
10754	init_waitqueue_head(&priv->wait_state);
10755
10756	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10757	INIT_WORK(&priv->associate, ipw_bg_associate);
10758	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10759	INIT_WORK(&priv->system_config, ipw_system_config);
10760	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10761	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10762	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10763	INIT_WORK(&priv->up, ipw_bg_up);
10764	INIT_WORK(&priv->down, ipw_bg_down);
10765	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10766	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10767	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10768	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10769	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10770	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10771	INIT_WORK(&priv->roam, ipw_bg_roam);
10772	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10773	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10774	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10775	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10776	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10777	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10778	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10779
10780#ifdef CONFIG_IPW2200_QOS
10781	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10782#endif				/* CONFIG_IPW2200_QOS */
10783
10784	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10785		     ipw_irq_tasklet, (unsigned long)priv);
10786
10787	return ret;
10788}
10789
10790static void shim__set_security(struct net_device *dev,
10791			       struct libipw_security *sec)
10792{
10793	struct ipw_priv *priv = libipw_priv(dev);
10794	int i;
10795	for (i = 0; i < 4; i++) {
10796		if (sec->flags & (1 << i)) {
10797			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10798			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10799			if (sec->key_sizes[i] == 0)
10800				priv->ieee->sec.flags &= ~(1 << i);
10801			else {
10802				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10803				       sec->key_sizes[i]);
10804				priv->ieee->sec.flags |= (1 << i);
10805			}
10806			priv->status |= STATUS_SECURITY_UPDATED;
10807		} else if (sec->level != SEC_LEVEL_1)
10808			priv->ieee->sec.flags &= ~(1 << i);
10809	}
10810
10811	if (sec->flags & SEC_ACTIVE_KEY) {
10812		if (sec->active_key <= 3) {
10813			priv->ieee->sec.active_key = sec->active_key;
10814			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10815		} else
10816			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10817		priv->status |= STATUS_SECURITY_UPDATED;
10818	} else
10819		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10820
10821	if ((sec->flags & SEC_AUTH_MODE) &&
10822	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10823		priv->ieee->sec.auth_mode = sec->auth_mode;
10824		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10825		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10826			priv->capability |= CAP_SHARED_KEY;
10827		else
10828			priv->capability &= ~CAP_SHARED_KEY;
10829		priv->status |= STATUS_SECURITY_UPDATED;
10830	}
10831
10832	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10833		priv->ieee->sec.flags |= SEC_ENABLED;
10834		priv->ieee->sec.enabled = sec->enabled;
10835		priv->status |= STATUS_SECURITY_UPDATED;
10836		if (sec->enabled)
10837			priv->capability |= CAP_PRIVACY_ON;
10838		else
10839			priv->capability &= ~CAP_PRIVACY_ON;
10840	}
10841
10842	if (sec->flags & SEC_ENCRYPT)
10843		priv->ieee->sec.encrypt = sec->encrypt;
10844
10845	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10846		priv->ieee->sec.level = sec->level;
10847		priv->ieee->sec.flags |= SEC_LEVEL;
10848		priv->status |= STATUS_SECURITY_UPDATED;
10849	}
10850
10851	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10852		ipw_set_hwcrypto_keys(priv);
10853
10854	/* To match current functionality of ipw2100 (which works well w/
10855	 * various supplicants, we don't force a disassociate if the
10856	 * privacy capability changes ... */
10857#if 0
10858	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10859	    (((priv->assoc_request.capability &
10860	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10861	     (!(priv->assoc_request.capability &
10862		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10863		IPW_DEBUG_ASSOC("Disassociating due to capability "
10864				"change.\n");
10865		ipw_disassociate(priv);
10866	}
10867#endif
10868}
10869
10870static int init_supported_rates(struct ipw_priv *priv,
10871				struct ipw_supported_rates *rates)
10872{
10873	/* TODO: Mask out rates based on priv->rates_mask */
10874
10875	memset(rates, 0, sizeof(*rates));
10876	/* configure supported rates */
10877	switch (priv->ieee->freq_band) {
10878	case LIBIPW_52GHZ_BAND:
10879		rates->ieee_mode = IPW_A_MODE;
10880		rates->purpose = IPW_RATE_CAPABILITIES;
10881		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10882					LIBIPW_OFDM_DEFAULT_RATES_MASK);
10883		break;
10884
10885	default:		/* Mixed or 2.4Ghz */
10886		rates->ieee_mode = IPW_G_MODE;
10887		rates->purpose = IPW_RATE_CAPABILITIES;
10888		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10889				       LIBIPW_CCK_DEFAULT_RATES_MASK);
10890		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10891			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10892						LIBIPW_OFDM_DEFAULT_RATES_MASK);
10893		}
10894		break;
10895	}
10896
10897	return 0;
10898}
10899
10900static int ipw_config(struct ipw_priv *priv)
10901{
10902	/* This is only called from ipw_up, which resets/reloads the firmware
10903	   so, we don't need to first disable the card before we configure
10904	   it */
10905	if (ipw_set_tx_power(priv))
10906		goto error;
10907
10908	/* initialize adapter address */
10909	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10910		goto error;
10911
10912	/* set basic system config settings */
10913	init_sys_config(&priv->sys_config);
10914
10915	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10916	 * Does not support BT priority yet (don't abort or defer our Tx) */
10917	if (bt_coexist) {
10918		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10919
10920		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10921			priv->sys_config.bt_coexistence
10922			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10923		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10924			priv->sys_config.bt_coexistence
10925			    |= CFG_BT_COEXISTENCE_OOB;
10926	}
10927
10928#ifdef CONFIG_IPW2200_PROMISCUOUS
10929	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10930		priv->sys_config.accept_all_data_frames = 1;
10931		priv->sys_config.accept_non_directed_frames = 1;
10932		priv->sys_config.accept_all_mgmt_bcpr = 1;
10933		priv->sys_config.accept_all_mgmt_frames = 1;
10934	}
10935#endif
10936
10937	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10938		priv->sys_config.answer_broadcast_ssid_probe = 1;
10939	else
10940		priv->sys_config.answer_broadcast_ssid_probe = 0;
10941
10942	if (ipw_send_system_config(priv))
10943		goto error;
10944
10945	init_supported_rates(priv, &priv->rates);
10946	if (ipw_send_supported_rates(priv, &priv->rates))
10947		goto error;
10948
10949	/* Set request-to-send threshold */
10950	if (priv->rts_threshold) {
10951		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10952			goto error;
10953	}
10954#ifdef CONFIG_IPW2200_QOS
10955	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10956	ipw_qos_activate(priv, NULL);
10957#endif				/* CONFIG_IPW2200_QOS */
10958
10959	if (ipw_set_random_seed(priv))
10960		goto error;
10961
10962	/* final state transition to the RUN state */
10963	if (ipw_send_host_complete(priv))
10964		goto error;
10965
10966	priv->status |= STATUS_INIT;
10967
10968	ipw_led_init(priv);
10969	ipw_led_radio_on(priv);
10970	priv->notif_missed_beacons = 0;
10971
10972	/* Set hardware WEP key if it is configured. */
10973	if ((priv->capability & CAP_PRIVACY_ON) &&
10974	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10975	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10976		ipw_set_hwcrypto_keys(priv);
10977
10978	return 0;
10979
10980      error:
10981	return -EIO;
10982}
10983
10984/*
10985 * NOTE:
10986 *
10987 * These tables have been tested in conjunction with the
10988 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10989 *
10990 * Altering this values, using it on other hardware, or in geographies
10991 * not intended for resale of the above mentioned Intel adapters has
10992 * not been tested.
10993 *
10994 * Remember to update the table in README.ipw2200 when changing this
10995 * table.
10996 *
10997 */
10998static const struct libipw_geo ipw_geos[] = {
10999	{			/* Restricted */
11000	 "---",
11001	 .bg_channels = 11,
11002	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11003		{2427, 4}, {2432, 5}, {2437, 6},
11004		{2442, 7}, {2447, 8}, {2452, 9},
11005		{2457, 10}, {2462, 11}},
11006	 },
11007
11008	{			/* Custom US/Canada */
11009	 "ZZF",
11010	 .bg_channels = 11,
11011	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11012		{2427, 4}, {2432, 5}, {2437, 6},
11013		{2442, 7}, {2447, 8}, {2452, 9},
11014		{2457, 10}, {2462, 11}},
11015	 .a_channels = 8,
11016	 .a = {{5180, 36},
11017	       {5200, 40},
11018	       {5220, 44},
11019	       {5240, 48},
11020	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11021	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11022	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11023	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11024	 },
11025
11026	{			/* Rest of World */
11027	 "ZZD",
11028	 .bg_channels = 13,
11029	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11030		{2427, 4}, {2432, 5}, {2437, 6},
11031		{2442, 7}, {2447, 8}, {2452, 9},
11032		{2457, 10}, {2462, 11}, {2467, 12},
11033		{2472, 13}},
11034	 },
11035
11036	{			/* Custom USA & Europe & High */
11037	 "ZZA",
11038	 .bg_channels = 11,
11039	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11040		{2427, 4}, {2432, 5}, {2437, 6},
11041		{2442, 7}, {2447, 8}, {2452, 9},
11042		{2457, 10}, {2462, 11}},
11043	 .a_channels = 13,
11044	 .a = {{5180, 36},
11045	       {5200, 40},
11046	       {5220, 44},
11047	       {5240, 48},
11048	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11049	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11050	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11051	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11052	       {5745, 149},
11053	       {5765, 153},
11054	       {5785, 157},
11055	       {5805, 161},
11056	       {5825, 165}},
11057	 },
11058
11059	{			/* Custom NA & Europe */
11060	 "ZZB",
11061	 .bg_channels = 11,
11062	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11063		{2427, 4}, {2432, 5}, {2437, 6},
11064		{2442, 7}, {2447, 8}, {2452, 9},
11065		{2457, 10}, {2462, 11}},
11066	 .a_channels = 13,
11067	 .a = {{5180, 36},
11068	       {5200, 40},
11069	       {5220, 44},
11070	       {5240, 48},
11071	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11072	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11073	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11074	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11075	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11076	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11077	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11078	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11079	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11080	 },
11081
11082	{			/* Custom Japan */
11083	 "ZZC",
11084	 .bg_channels = 11,
11085	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11086		{2427, 4}, {2432, 5}, {2437, 6},
11087		{2442, 7}, {2447, 8}, {2452, 9},
11088		{2457, 10}, {2462, 11}},
11089	 .a_channels = 4,
11090	 .a = {{5170, 34}, {5190, 38},
11091	       {5210, 42}, {5230, 46}},
11092	 },
11093
11094	{			/* Custom */
11095	 "ZZM",
11096	 .bg_channels = 11,
11097	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11098		{2427, 4}, {2432, 5}, {2437, 6},
11099		{2442, 7}, {2447, 8}, {2452, 9},
11100		{2457, 10}, {2462, 11}},
11101	 },
11102
11103	{			/* Europe */
11104	 "ZZE",
11105	 .bg_channels = 13,
11106	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11107		{2427, 4}, {2432, 5}, {2437, 6},
11108		{2442, 7}, {2447, 8}, {2452, 9},
11109		{2457, 10}, {2462, 11}, {2467, 12},
11110		{2472, 13}},
11111	 .a_channels = 19,
11112	 .a = {{5180, 36},
11113	       {5200, 40},
11114	       {5220, 44},
11115	       {5240, 48},
11116	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11117	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11118	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11119	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11120	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11121	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11122	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11123	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11124	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11125	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11126	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11127	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11128	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11129	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11130	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11131	 },
11132
11133	{			/* Custom Japan */
11134	 "ZZJ",
11135	 .bg_channels = 14,
11136	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11137		{2427, 4}, {2432, 5}, {2437, 6},
11138		{2442, 7}, {2447, 8}, {2452, 9},
11139		{2457, 10}, {2462, 11}, {2467, 12},
11140		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11141	 .a_channels = 4,
11142	 .a = {{5170, 34}, {5190, 38},
11143	       {5210, 42}, {5230, 46}},
11144	 },
11145
11146	{			/* Rest of World */
11147	 "ZZR",
11148	 .bg_channels = 14,
11149	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11150		{2427, 4}, {2432, 5}, {2437, 6},
11151		{2442, 7}, {2447, 8}, {2452, 9},
11152		{2457, 10}, {2462, 11}, {2467, 12},
11153		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11154			     LIBIPW_CH_PASSIVE_ONLY}},
11155	 },
11156
11157	{			/* High Band */
11158	 "ZZH",
11159	 .bg_channels = 13,
11160	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11161		{2427, 4}, {2432, 5}, {2437, 6},
11162		{2442, 7}, {2447, 8}, {2452, 9},
11163		{2457, 10}, {2462, 11},
11164		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11165		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11166	 .a_channels = 4,
11167	 .a = {{5745, 149}, {5765, 153},
11168	       {5785, 157}, {5805, 161}},
11169	 },
11170
11171	{			/* Custom Europe */
11172	 "ZZG",
11173	 .bg_channels = 13,
11174	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11175		{2427, 4}, {2432, 5}, {2437, 6},
11176		{2442, 7}, {2447, 8}, {2452, 9},
11177		{2457, 10}, {2462, 11},
11178		{2467, 12}, {2472, 13}},
11179	 .a_channels = 4,
11180	 .a = {{5180, 36}, {5200, 40},
11181	       {5220, 44}, {5240, 48}},
11182	 },
11183
11184	{			/* Europe */
11185	 "ZZK",
11186	 .bg_channels = 13,
11187	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11188		{2427, 4}, {2432, 5}, {2437, 6},
11189		{2442, 7}, {2447, 8}, {2452, 9},
11190		{2457, 10}, {2462, 11},
11191		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11192		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11193	 .a_channels = 24,
11194	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11195	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11196	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11197	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11198	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11199	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11200	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11201	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11202	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11203	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11204	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11205	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11206	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11207	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11208	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11209	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11210	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11211	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11212	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11213	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11214	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11215	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11216	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11217	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11218	 },
11219
11220	{			/* Europe */
11221	 "ZZL",
11222	 .bg_channels = 11,
11223	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11224		{2427, 4}, {2432, 5}, {2437, 6},
11225		{2442, 7}, {2447, 8}, {2452, 9},
11226		{2457, 10}, {2462, 11}},
11227	 .a_channels = 13,
11228	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11229	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11230	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11231	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11232	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11233	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11234	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11235	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11236	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11237	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11238	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11239	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11240	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11241	 }
11242};
11243
11244#define MAX_HW_RESTARTS 5
11245static int ipw_up(struct ipw_priv *priv)
11246{
11247	int rc, i, j;
11248
11249	/* Age scan list entries found before suspend */
11250	if (priv->suspend_time) {
11251		libipw_networks_age(priv->ieee, priv->suspend_time);
11252		priv->suspend_time = 0;
11253	}
11254
11255	if (priv->status & STATUS_EXIT_PENDING)
11256		return -EIO;
11257
11258	if (cmdlog && !priv->cmdlog) {
11259		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11260				       GFP_KERNEL);
11261		if (priv->cmdlog == NULL) {
11262			IPW_ERROR("Error allocating %d command log entries.\n",
11263				  cmdlog);
11264			return -ENOMEM;
11265		} else {
11266			priv->cmdlog_len = cmdlog;
11267		}
11268	}
11269
11270	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11271		/* Load the microcode, firmware, and eeprom.
11272		 * Also start the clocks. */
11273		rc = ipw_load(priv);
11274		if (rc) {
11275			IPW_ERROR("Unable to load firmware: %d\n", rc);
11276			return rc;
11277		}
11278
11279		ipw_init_ordinals(priv);
11280		if (!(priv->config & CFG_CUSTOM_MAC))
11281			eeprom_parse_mac(priv, priv->mac_addr);
11282		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11283		memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11284
11285		for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11286			if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11287				    ipw_geos[j].name, 3))
11288				break;
11289		}
11290		if (j == ARRAY_SIZE(ipw_geos)) {
11291			IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11292				    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11293				    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11294				    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11295			j = 0;
11296		}
11297		if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11298			IPW_WARNING("Could not set geography.");
11299			return 0;
11300		}
11301
11302		if (priv->status & STATUS_RF_KILL_SW) {
11303			IPW_WARNING("Radio disabled by module parameter.\n");
11304			return 0;
11305		} else if (rf_kill_active(priv)) {
11306			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11307				    "Kill switch must be turned off for "
11308				    "wireless networking to work.\n");
11309			queue_delayed_work(priv->workqueue, &priv->rf_kill,
11310					   2 * HZ);
11311			return 0;
11312		}
11313
11314		rc = ipw_config(priv);
11315		if (!rc) {
11316			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11317
11318			/* If configure to try and auto-associate, kick
11319			 * off a scan. */
11320			queue_delayed_work(priv->workqueue,
11321					   &priv->request_scan, 0);
11322
11323			return 0;
11324		}
11325
11326		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11327		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11328			       i, MAX_HW_RESTARTS);
11329
11330		/* We had an error bringing up the hardware, so take it
11331		 * all the way back down so we can try again */
11332		ipw_down(priv);
11333	}
11334
11335	/* tried to restart and config the device for as long as our
11336	 * patience could withstand */
11337	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11338
11339	return -EIO;
11340}
11341
11342static void ipw_bg_up(struct work_struct *work)
11343{
11344	struct ipw_priv *priv =
11345		container_of(work, struct ipw_priv, up);
11346	mutex_lock(&priv->mutex);
11347	ipw_up(priv);
11348	mutex_unlock(&priv->mutex);
11349}
11350
11351static void ipw_deinit(struct ipw_priv *priv)
11352{
11353	int i;
11354
11355	if (priv->status & STATUS_SCANNING) {
11356		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11357		ipw_abort_scan(priv);
11358	}
11359
11360	if (priv->status & STATUS_ASSOCIATED) {
11361		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11362		ipw_disassociate(priv);
11363	}
11364
11365	ipw_led_shutdown(priv);
11366
11367	/* Wait up to 1s for status to change to not scanning and not
11368	 * associated (disassociation can take a while for a ful 802.11
11369	 * exchange */
11370	for (i = 1000; i && (priv->status &
11371			     (STATUS_DISASSOCIATING |
11372			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11373		udelay(10);
11374
11375	if (priv->status & (STATUS_DISASSOCIATING |
11376			    STATUS_ASSOCIATED | STATUS_SCANNING))
11377		IPW_DEBUG_INFO("Still associated or scanning...\n");
11378	else
11379		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11380
11381	/* Attempt to disable the card */
11382	ipw_send_card_disable(priv, 0);
11383
11384	priv->status &= ~STATUS_INIT;
11385}
11386
11387static void ipw_down(struct ipw_priv *priv)
11388{
11389	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11390
11391	priv->status |= STATUS_EXIT_PENDING;
11392
11393	if (ipw_is_init(priv))
11394		ipw_deinit(priv);
11395
11396	/* Wipe out the EXIT_PENDING status bit if we are not actually
11397	 * exiting the module */
11398	if (!exit_pending)
11399		priv->status &= ~STATUS_EXIT_PENDING;
11400
11401	/* tell the device to stop sending interrupts */
11402	ipw_disable_interrupts(priv);
11403
11404	/* Clear all bits but the RF Kill */
11405	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11406	netif_carrier_off(priv->net_dev);
11407
11408	ipw_stop_nic(priv);
11409
11410	ipw_led_radio_off(priv);
11411}
11412
11413static void ipw_bg_down(struct work_struct *work)
11414{
11415	struct ipw_priv *priv =
11416		container_of(work, struct ipw_priv, down);
11417	mutex_lock(&priv->mutex);
11418	ipw_down(priv);
11419	mutex_unlock(&priv->mutex);
11420}
11421
11422/* Called by register_netdev() */
11423static int ipw_net_init(struct net_device *dev)
11424{
11425	struct ipw_priv *priv = libipw_priv(dev);
11426	mutex_lock(&priv->mutex);
11427
11428	if (ipw_up(priv)) {
11429		mutex_unlock(&priv->mutex);
11430		return -EIO;
11431	}
11432
11433	mutex_unlock(&priv->mutex);
11434	return 0;
11435}
11436
11437/* PCI driver stuff */
11438static struct pci_device_id card_ids[] = {
11439	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11440	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11441	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11442	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11443	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11444	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11445	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11446	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11447	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11448	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11449	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11450	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11451	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11452	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11453	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11454	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11455	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11456	{PCI_VDEVICE(INTEL, 0x104f), 0},
11457	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
11458	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
11459	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
11460	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
11461
11462	/* required last entry */
11463	{0,}
11464};
11465
11466MODULE_DEVICE_TABLE(pci, card_ids);
11467
11468static struct attribute *ipw_sysfs_entries[] = {
11469	&dev_attr_rf_kill.attr,
11470	&dev_attr_direct_dword.attr,
11471	&dev_attr_indirect_byte.attr,
11472	&dev_attr_indirect_dword.attr,
11473	&dev_attr_mem_gpio_reg.attr,
11474	&dev_attr_command_event_reg.attr,
11475	&dev_attr_nic_type.attr,
11476	&dev_attr_status.attr,
11477	&dev_attr_cfg.attr,
11478	&dev_attr_error.attr,
11479	&dev_attr_event_log.attr,
11480	&dev_attr_cmd_log.attr,
11481	&dev_attr_eeprom_delay.attr,
11482	&dev_attr_ucode_version.attr,
11483	&dev_attr_rtc.attr,
11484	&dev_attr_scan_age.attr,
11485	&dev_attr_led.attr,
11486	&dev_attr_speed_scan.attr,
11487	&dev_attr_net_stats.attr,
11488	&dev_attr_channels.attr,
11489#ifdef CONFIG_IPW2200_PROMISCUOUS
11490	&dev_attr_rtap_iface.attr,
11491	&dev_attr_rtap_filter.attr,
11492#endif
11493	NULL
11494};
11495
11496static struct attribute_group ipw_attribute_group = {
11497	.name = NULL,		/* put in device directory */
11498	.attrs = ipw_sysfs_entries,
11499};
11500
11501#ifdef CONFIG_IPW2200_PROMISCUOUS
11502static int ipw_prom_open(struct net_device *dev)
11503{
11504	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11505	struct ipw_priv *priv = prom_priv->priv;
11506
11507	IPW_DEBUG_INFO("prom dev->open\n");
11508	netif_carrier_off(dev);
11509
11510	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11511		priv->sys_config.accept_all_data_frames = 1;
11512		priv->sys_config.accept_non_directed_frames = 1;
11513		priv->sys_config.accept_all_mgmt_bcpr = 1;
11514		priv->sys_config.accept_all_mgmt_frames = 1;
11515
11516		ipw_send_system_config(priv);
11517	}
11518
11519	return 0;
11520}
11521
11522static int ipw_prom_stop(struct net_device *dev)
11523{
11524	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11525	struct ipw_priv *priv = prom_priv->priv;
11526
11527	IPW_DEBUG_INFO("prom dev->stop\n");
11528
11529	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11530		priv->sys_config.accept_all_data_frames = 0;
11531		priv->sys_config.accept_non_directed_frames = 0;
11532		priv->sys_config.accept_all_mgmt_bcpr = 0;
11533		priv->sys_config.accept_all_mgmt_frames = 0;
11534
11535		ipw_send_system_config(priv);
11536	}
11537
11538	return 0;
11539}
11540
11541static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11542					    struct net_device *dev)
11543{
11544	IPW_DEBUG_INFO("prom dev->xmit\n");
11545	dev_kfree_skb(skb);
11546	return NETDEV_TX_OK;
11547}
11548
11549static const struct net_device_ops ipw_prom_netdev_ops = {
11550	.ndo_open 		= ipw_prom_open,
11551	.ndo_stop		= ipw_prom_stop,
11552	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
11553	.ndo_change_mtu		= libipw_change_mtu,
11554	.ndo_set_mac_address 	= eth_mac_addr,
11555	.ndo_validate_addr	= eth_validate_addr,
11556};
11557
11558static int ipw_prom_alloc(struct ipw_priv *priv)
11559{
11560	int rc = 0;
11561
11562	if (priv->prom_net_dev)
11563		return -EPERM;
11564
11565	priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11566	if (priv->prom_net_dev == NULL)
11567		return -ENOMEM;
11568
11569	priv->prom_priv = libipw_priv(priv->prom_net_dev);
11570	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11571	priv->prom_priv->priv = priv;
11572
11573	strcpy(priv->prom_net_dev->name, "rtap%d");
11574	memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11575
11576	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11577	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11578
11579	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11580	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11581
11582	rc = register_netdev(priv->prom_net_dev);
11583	if (rc) {
11584		free_ieee80211(priv->prom_net_dev);
11585		priv->prom_net_dev = NULL;
11586		return rc;
11587	}
11588
11589	return 0;
11590}
11591
11592static void ipw_prom_free(struct ipw_priv *priv)
11593{
11594	if (!priv->prom_net_dev)
11595		return;
11596
11597	unregister_netdev(priv->prom_net_dev);
11598	free_ieee80211(priv->prom_net_dev);
11599
11600	priv->prom_net_dev = NULL;
11601}
11602
11603#endif
11604
11605static const struct net_device_ops ipw_netdev_ops = {
11606	.ndo_init		= ipw_net_init,
11607	.ndo_open		= ipw_net_open,
11608	.ndo_stop		= ipw_net_stop,
11609	.ndo_set_multicast_list	= ipw_net_set_multicast_list,
11610	.ndo_set_mac_address	= ipw_net_set_mac_address,
11611	.ndo_start_xmit		= libipw_xmit,
11612	.ndo_change_mtu		= libipw_change_mtu,
11613	.ndo_validate_addr	= eth_validate_addr,
11614};
11615
11616static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11617				   const struct pci_device_id *ent)
11618{
11619	int err = 0;
11620	struct net_device *net_dev;
11621	void __iomem *base;
11622	u32 length, val;
11623	struct ipw_priv *priv;
11624	int i;
11625
11626	net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11627	if (net_dev == NULL) {
11628		err = -ENOMEM;
11629		goto out;
11630	}
11631
11632	priv = libipw_priv(net_dev);
11633	priv->ieee = netdev_priv(net_dev);
11634
11635	priv->net_dev = net_dev;
11636	priv->pci_dev = pdev;
11637	ipw_debug_level = debug;
11638	spin_lock_init(&priv->irq_lock);
11639	spin_lock_init(&priv->lock);
11640	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11641		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11642
11643	mutex_init(&priv->mutex);
11644	if (pci_enable_device(pdev)) {
11645		err = -ENODEV;
11646		goto out_free_ieee80211;
11647	}
11648
11649	pci_set_master(pdev);
11650
11651	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11652	if (!err)
11653		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11654	if (err) {
11655		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11656		goto out_pci_disable_device;
11657	}
11658
11659	pci_set_drvdata(pdev, priv);
11660
11661	err = pci_request_regions(pdev, DRV_NAME);
11662	if (err)
11663		goto out_pci_disable_device;
11664
11665	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11666	 * PCI Tx retries from interfering with C3 CPU state */
11667	pci_read_config_dword(pdev, 0x40, &val);
11668	if ((val & 0x0000ff00) != 0)
11669		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11670
11671	length = pci_resource_len(pdev, 0);
11672	priv->hw_len = length;
11673
11674	base = pci_ioremap_bar(pdev, 0);
11675	if (!base) {
11676		err = -ENODEV;
11677		goto out_pci_release_regions;
11678	}
11679
11680	priv->hw_base = base;
11681	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11682	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11683
11684	err = ipw_setup_deferred_work(priv);
11685	if (err) {
11686		IPW_ERROR("Unable to setup deferred work\n");
11687		goto out_iounmap;
11688	}
11689
11690	ipw_sw_reset(priv, 1);
11691
11692	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11693	if (err) {
11694		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11695		goto out_destroy_workqueue;
11696	}
11697
11698	SET_NETDEV_DEV(net_dev, &pdev->dev);
11699
11700	mutex_lock(&priv->mutex);
11701
11702	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11703	priv->ieee->set_security = shim__set_security;
11704	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11705
11706#ifdef CONFIG_IPW2200_QOS
11707	priv->ieee->is_qos_active = ipw_is_qos_active;
11708	priv->ieee->handle_probe_response = ipw_handle_beacon;
11709	priv->ieee->handle_beacon = ipw_handle_probe_response;
11710	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11711#endif				/* CONFIG_IPW2200_QOS */
11712
11713	priv->ieee->perfect_rssi = -20;
11714	priv->ieee->worst_rssi = -85;
11715
11716	net_dev->netdev_ops = &ipw_netdev_ops;
11717	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11718	net_dev->wireless_data = &priv->wireless_data;
11719	net_dev->wireless_handlers = &ipw_wx_handler_def;
11720	net_dev->ethtool_ops = &ipw_ethtool_ops;
11721	net_dev->irq = pdev->irq;
11722	net_dev->base_addr = (unsigned long)priv->hw_base;
11723	net_dev->mem_start = pci_resource_start(pdev, 0);
11724	net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11725
11726	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11727	if (err) {
11728		IPW_ERROR("failed to create sysfs device attributes\n");
11729		mutex_unlock(&priv->mutex);
11730		goto out_release_irq;
11731	}
11732
11733	mutex_unlock(&priv->mutex);
11734	err = register_netdev(net_dev);
11735	if (err) {
11736		IPW_ERROR("failed to register network device\n");
11737		goto out_remove_sysfs;
11738	}
11739
11740#ifdef CONFIG_IPW2200_PROMISCUOUS
11741	if (rtap_iface) {
11742	        err = ipw_prom_alloc(priv);
11743		if (err) {
11744			IPW_ERROR("Failed to register promiscuous network "
11745				  "device (error %d).\n", err);
11746			unregister_netdev(priv->net_dev);
11747			goto out_remove_sysfs;
11748		}
11749	}
11750#endif
11751
11752	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11753	       "channels, %d 802.11a channels)\n",
11754	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11755	       priv->ieee->geo.a_channels);
11756
11757	return 0;
11758
11759      out_remove_sysfs:
11760	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11761      out_release_irq:
11762	free_irq(pdev->irq, priv);
11763      out_destroy_workqueue:
11764	destroy_workqueue(priv->workqueue);
11765	priv->workqueue = NULL;
11766      out_iounmap:
11767	iounmap(priv->hw_base);
11768      out_pci_release_regions:
11769	pci_release_regions(pdev);
11770      out_pci_disable_device:
11771	pci_disable_device(pdev);
11772	pci_set_drvdata(pdev, NULL);
11773      out_free_ieee80211:
11774	free_ieee80211(priv->net_dev);
11775      out:
11776	return err;
11777}
11778
11779static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11780{
11781	struct ipw_priv *priv = pci_get_drvdata(pdev);
11782	struct list_head *p, *q;
11783	int i;
11784
11785	if (!priv)
11786		return;
11787
11788	mutex_lock(&priv->mutex);
11789
11790	priv->status |= STATUS_EXIT_PENDING;
11791	ipw_down(priv);
11792	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11793
11794	mutex_unlock(&priv->mutex);
11795
11796	unregister_netdev(priv->net_dev);
11797
11798	if (priv->rxq) {
11799		ipw_rx_queue_free(priv, priv->rxq);
11800		priv->rxq = NULL;
11801	}
11802	ipw_tx_queue_free(priv);
11803
11804	if (priv->cmdlog) {
11805		kfree(priv->cmdlog);
11806		priv->cmdlog = NULL;
11807	}
11808	/* ipw_down will ensure that there is no more pending work
11809	 * in the workqueue's, so we can safely remove them now. */
11810	cancel_delayed_work(&priv->adhoc_check);
11811	cancel_delayed_work(&priv->gather_stats);
11812	cancel_delayed_work(&priv->request_scan);
11813	cancel_delayed_work(&priv->request_direct_scan);
11814	cancel_delayed_work(&priv->request_passive_scan);
11815	cancel_delayed_work(&priv->scan_event);
11816	cancel_delayed_work(&priv->rf_kill);
11817	cancel_delayed_work(&priv->scan_check);
11818	destroy_workqueue(priv->workqueue);
11819	priv->workqueue = NULL;
11820
11821	/* Free MAC hash list for ADHOC */
11822	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11823		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11824			list_del(p);
11825			kfree(list_entry(p, struct ipw_ibss_seq, list));
11826		}
11827	}
11828
11829	kfree(priv->error);
11830	priv->error = NULL;
11831
11832#ifdef CONFIG_IPW2200_PROMISCUOUS
11833	ipw_prom_free(priv);
11834#endif
11835
11836	free_irq(pdev->irq, priv);
11837	iounmap(priv->hw_base);
11838	pci_release_regions(pdev);
11839	pci_disable_device(pdev);
11840	pci_set_drvdata(pdev, NULL);
11841	free_ieee80211(priv->net_dev);
11842	free_firmware();
11843}
11844
11845#ifdef CONFIG_PM
11846static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11847{
11848	struct ipw_priv *priv = pci_get_drvdata(pdev);
11849	struct net_device *dev = priv->net_dev;
11850
11851	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11852
11853	/* Take down the device; powers it off, etc. */
11854	ipw_down(priv);
11855
11856	/* Remove the PRESENT state of the device */
11857	netif_device_detach(dev);
11858
11859	pci_save_state(pdev);
11860	pci_disable_device(pdev);
11861	pci_set_power_state(pdev, pci_choose_state(pdev, state));
11862
11863	priv->suspend_at = get_seconds();
11864
11865	return 0;
11866}
11867
11868static int ipw_pci_resume(struct pci_dev *pdev)
11869{
11870	struct ipw_priv *priv = pci_get_drvdata(pdev);
11871	struct net_device *dev = priv->net_dev;
11872	int err;
11873	u32 val;
11874
11875	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11876
11877	pci_set_power_state(pdev, PCI_D0);
11878	err = pci_enable_device(pdev);
11879	if (err) {
11880		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11881		       dev->name);
11882		return err;
11883	}
11884	pci_restore_state(pdev);
11885
11886	/*
11887	 * Suspend/Resume resets the PCI configuration space, so we have to
11888	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11889	 * from interfering with C3 CPU state. pci_restore_state won't help
11890	 * here since it only restores the first 64 bytes pci config header.
11891	 */
11892	pci_read_config_dword(pdev, 0x40, &val);
11893	if ((val & 0x0000ff00) != 0)
11894		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11895
11896	/* Set the device back into the PRESENT state; this will also wake
11897	 * the queue of needed */
11898	netif_device_attach(dev);
11899
11900	priv->suspend_time = get_seconds() - priv->suspend_at;
11901
11902	/* Bring the device back up */
11903	queue_work(priv->workqueue, &priv->up);
11904
11905	return 0;
11906}
11907#endif
11908
11909static void ipw_pci_shutdown(struct pci_dev *pdev)
11910{
11911	struct ipw_priv *priv = pci_get_drvdata(pdev);
11912
11913	/* Take down the device; powers it off, etc. */
11914	ipw_down(priv);
11915
11916	pci_disable_device(pdev);
11917}
11918
11919/* driver initialization stuff */
11920static struct pci_driver ipw_driver = {
11921	.name = DRV_NAME,
11922	.id_table = card_ids,
11923	.probe = ipw_pci_probe,
11924	.remove = __devexit_p(ipw_pci_remove),
11925#ifdef CONFIG_PM
11926	.suspend = ipw_pci_suspend,
11927	.resume = ipw_pci_resume,
11928#endif
11929	.shutdown = ipw_pci_shutdown,
11930};
11931
11932static int __init ipw_init(void)
11933{
11934	int ret;
11935
11936	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11937	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11938
11939	ret = pci_register_driver(&ipw_driver);
11940	if (ret) {
11941		IPW_ERROR("Unable to initialize PCI module\n");
11942		return ret;
11943	}
11944
11945	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11946	if (ret) {
11947		IPW_ERROR("Unable to create driver sysfs file\n");
11948		pci_unregister_driver(&ipw_driver);
11949		return ret;
11950	}
11951
11952	return ret;
11953}
11954
11955static void __exit ipw_exit(void)
11956{
11957	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11958	pci_unregister_driver(&ipw_driver);
11959}
11960
11961module_param(disable, int, 0444);
11962MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11963
11964module_param(associate, int, 0444);
11965MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11966
11967module_param(auto_create, int, 0444);
11968MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11969
11970module_param_named(led, led_support, int, 0444);
11971MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
11972
11973module_param(debug, int, 0444);
11974MODULE_PARM_DESC(debug, "debug output mask");
11975
11976module_param_named(channel, default_channel, int, 0444);
11977MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11978
11979#ifdef CONFIG_IPW2200_PROMISCUOUS
11980module_param(rtap_iface, int, 0444);
11981MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11982#endif
11983
11984#ifdef CONFIG_IPW2200_QOS
11985module_param(qos_enable, int, 0444);
11986MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11987
11988module_param(qos_burst_enable, int, 0444);
11989MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11990
11991module_param(qos_no_ack_mask, int, 0444);
11992MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11993
11994module_param(burst_duration_CCK, int, 0444);
11995MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11996
11997module_param(burst_duration_OFDM, int, 0444);
11998MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11999#endif				/* CONFIG_IPW2200_QOS */
12000
12001#ifdef CONFIG_IPW2200_MONITOR
12002module_param_named(mode, network_mode, int, 0444);
12003MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12004#else
12005module_param_named(mode, network_mode, int, 0444);
12006MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12007#endif
12008
12009module_param(bt_coexist, int, 0444);
12010MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12011
12012module_param(hwcrypto, int, 0444);
12013MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12014
12015module_param(cmdlog, int, 0444);
12016MODULE_PARM_DESC(cmdlog,
12017		 "allocate a ring buffer for logging firmware commands");
12018
12019module_param(roaming, int, 0444);
12020MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12021
12022module_param(antenna, int, 0444);
12023MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12024
12025module_exit(ipw_exit);
12026module_init(ipw_init);
12027