ipw2200.c revision baeb2ffab4e67bb9174e6166e070a9a8ec94b0f6
1/******************************************************************************
2
3  Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5  802.11 status code portion of this file from ethereal-0.10.6:
6    Copyright 2000, Axis Communications AB
7    Ethereal - Network traffic analyzer
8    By Gerald Combs <gerald@ethereal.com>
9    Copyright 1998 Gerald Combs
10
11  This program is free software; you can redistribute it and/or modify it
12  under the terms of version 2 of the GNU General Public License as
13  published by the Free Software Foundation.
14
15  This program is distributed in the hope that it will be useful, but WITHOUT
16  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  more details.
19
20  You should have received a copy of the GNU General Public License along with
21  this program; if not, write to the Free Software Foundation, Inc., 59
22  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23
24  The full GNU General Public License is included in this distribution in the
25  file called LICENSE.
26
27  Contact Information:
28  Intel Linux Wireless <ilw@linux.intel.com>
29  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include "ipw2200.h"
36
37
38#ifndef KBUILD_EXTMOD
39#define VK "k"
40#else
41#define VK
42#endif
43
44#ifdef CONFIG_IPW2200_DEBUG
45#define VD "d"
46#else
47#define VD
48#endif
49
50#ifdef CONFIG_IPW2200_MONITOR
51#define VM "m"
52#else
53#define VM
54#endif
55
56#ifdef CONFIG_IPW2200_PROMISCUOUS
57#define VP "p"
58#else
59#define VP
60#endif
61
62#ifdef CONFIG_IPW2200_RADIOTAP
63#define VR "r"
64#else
65#define VR
66#endif
67
68#ifdef CONFIG_IPW2200_QOS
69#define VQ "q"
70#else
71#define VQ
72#endif
73
74#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
75#define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
76#define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
77#define DRV_VERSION     IPW2200_VERSION
78
79#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
80
81MODULE_DESCRIPTION(DRV_DESCRIPTION);
82MODULE_VERSION(DRV_VERSION);
83MODULE_AUTHOR(DRV_COPYRIGHT);
84MODULE_LICENSE("GPL");
85MODULE_FIRMWARE("ipw2200-ibss.fw");
86#ifdef CONFIG_IPW2200_MONITOR
87MODULE_FIRMWARE("ipw2200-sniffer.fw");
88#endif
89MODULE_FIRMWARE("ipw2200-bss.fw");
90
91static int cmdlog = 0;
92static int debug = 0;
93static int default_channel = 0;
94static int network_mode = 0;
95
96static u32 ipw_debug_level;
97static int associate;
98static int auto_create = 1;
99static int led_support = 1;
100static int disable = 0;
101static int bt_coexist = 0;
102static int hwcrypto = 0;
103static int roaming = 1;
104static const char ipw_modes[] = {
105	'a', 'b', 'g', '?'
106};
107static int antenna = CFG_SYS_ANTENNA_BOTH;
108
109#ifdef CONFIG_IPW2200_PROMISCUOUS
110static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
111#endif
112
113static struct ieee80211_rate ipw2200_rates[] = {
114	{ .bitrate = 10 },
115	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
116	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
117	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118	{ .bitrate = 60 },
119	{ .bitrate = 90 },
120	{ .bitrate = 120 },
121	{ .bitrate = 180 },
122	{ .bitrate = 240 },
123	{ .bitrate = 360 },
124	{ .bitrate = 480 },
125	{ .bitrate = 540 }
126};
127
128#define ipw2200_a_rates		(ipw2200_rates + 4)
129#define ipw2200_num_a_rates	8
130#define ipw2200_bg_rates	(ipw2200_rates + 0)
131#define ipw2200_num_bg_rates	12
132
133#ifdef CONFIG_IPW2200_QOS
134static int qos_enable = 0;
135static int qos_burst_enable = 0;
136static int qos_no_ack_mask = 0;
137static int burst_duration_CCK = 0;
138static int burst_duration_OFDM = 0;
139
140static struct libipw_qos_parameters def_qos_parameters_OFDM = {
141	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
142	 QOS_TX3_CW_MIN_OFDM},
143	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
144	 QOS_TX3_CW_MAX_OFDM},
145	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
146	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
147	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
148	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
149};
150
151static struct libipw_qos_parameters def_qos_parameters_CCK = {
152	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
153	 QOS_TX3_CW_MIN_CCK},
154	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
155	 QOS_TX3_CW_MAX_CCK},
156	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
157	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
158	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
159	 QOS_TX3_TXOP_LIMIT_CCK}
160};
161
162static struct libipw_qos_parameters def_parameters_OFDM = {
163	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
164	 DEF_TX3_CW_MIN_OFDM},
165	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
166	 DEF_TX3_CW_MAX_OFDM},
167	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
168	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
169	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
170	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
171};
172
173static struct libipw_qos_parameters def_parameters_CCK = {
174	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
175	 DEF_TX3_CW_MIN_CCK},
176	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
177	 DEF_TX3_CW_MAX_CCK},
178	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
179	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
180	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
181	 DEF_TX3_TXOP_LIMIT_CCK}
182};
183
184static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
185
186static int from_priority_to_tx_queue[] = {
187	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
188	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
189};
190
191static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
192
193static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
194				       *qos_param);
195static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
196				     *qos_param);
197#endif				/* CONFIG_IPW2200_QOS */
198
199static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
200static void ipw_remove_current_network(struct ipw_priv *priv);
201static void ipw_rx(struct ipw_priv *priv);
202static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
203				struct clx2_tx_queue *txq, int qindex);
204static int ipw_queue_reset(struct ipw_priv *priv);
205
206static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
207			     int len, int sync);
208
209static void ipw_tx_queue_free(struct ipw_priv *);
210
211static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
212static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
213static void ipw_rx_queue_replenish(void *);
214static int ipw_up(struct ipw_priv *);
215static void ipw_bg_up(struct work_struct *work);
216static void ipw_down(struct ipw_priv *);
217static void ipw_bg_down(struct work_struct *work);
218static int ipw_config(struct ipw_priv *);
219static int init_supported_rates(struct ipw_priv *priv,
220				struct ipw_supported_rates *prates);
221static void ipw_set_hwcrypto_keys(struct ipw_priv *);
222static void ipw_send_wep_keys(struct ipw_priv *, int);
223
224static int snprint_line(char *buf, size_t count,
225			const u8 * data, u32 len, u32 ofs)
226{
227	int out, i, j, l;
228	char c;
229
230	out = snprintf(buf, count, "%08X", ofs);
231
232	for (l = 0, i = 0; i < 2; i++) {
233		out += snprintf(buf + out, count - out, " ");
234		for (j = 0; j < 8 && l < len; j++, l++)
235			out += snprintf(buf + out, count - out, "%02X ",
236					data[(i * 8 + j)]);
237		for (; j < 8; j++)
238			out += snprintf(buf + out, count - out, "   ");
239	}
240
241	out += snprintf(buf + out, count - out, " ");
242	for (l = 0, i = 0; i < 2; i++) {
243		out += snprintf(buf + out, count - out, " ");
244		for (j = 0; j < 8 && l < len; j++, l++) {
245			c = data[(i * 8 + j)];
246			if (!isascii(c) || !isprint(c))
247				c = '.';
248
249			out += snprintf(buf + out, count - out, "%c", c);
250		}
251
252		for (; j < 8; j++)
253			out += snprintf(buf + out, count - out, " ");
254	}
255
256	return out;
257}
258
259static void printk_buf(int level, const u8 * data, u32 len)
260{
261	char line[81];
262	u32 ofs = 0;
263	if (!(ipw_debug_level & level))
264		return;
265
266	while (len) {
267		snprint_line(line, sizeof(line), &data[ofs],
268			     min(len, 16U), ofs);
269		printk(KERN_DEBUG "%s\n", line);
270		ofs += 16;
271		len -= min(len, 16U);
272	}
273}
274
275static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
276{
277	size_t out = size;
278	u32 ofs = 0;
279	int total = 0;
280
281	while (size && len) {
282		out = snprint_line(output, size, &data[ofs],
283				   min_t(size_t, len, 16U), ofs);
284
285		ofs += 16;
286		output += out;
287		size -= out;
288		len -= min_t(size_t, len, 16U);
289		total += out;
290	}
291	return total;
292}
293
294/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
295static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
296#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
297
298/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
299static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
300#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
301
302/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
303static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
304static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
305{
306	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
307		     __LINE__, (u32) (b), (u32) (c));
308	_ipw_write_reg8(a, b, c);
309}
310
311/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
312static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
313static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
314{
315	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
316		     __LINE__, (u32) (b), (u32) (c));
317	_ipw_write_reg16(a, b, c);
318}
319
320/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
321static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
322static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
323{
324	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
325		     __LINE__, (u32) (b), (u32) (c));
326	_ipw_write_reg32(a, b, c);
327}
328
329/* 8-bit direct write (low 4K) */
330static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
331		u8 val)
332{
333	writeb(val, ipw->hw_base + ofs);
334}
335
336/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
337#define ipw_write8(ipw, ofs, val) do { \
338	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
339			__LINE__, (u32)(ofs), (u32)(val)); \
340	_ipw_write8(ipw, ofs, val); \
341} while (0)
342
343/* 16-bit direct write (low 4K) */
344static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
345		u16 val)
346{
347	writew(val, ipw->hw_base + ofs);
348}
349
350/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
351#define ipw_write16(ipw, ofs, val) do { \
352	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
353			__LINE__, (u32)(ofs), (u32)(val)); \
354	_ipw_write16(ipw, ofs, val); \
355} while (0)
356
357/* 32-bit direct write (low 4K) */
358static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
359		u32 val)
360{
361	writel(val, ipw->hw_base + ofs);
362}
363
364/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
365#define ipw_write32(ipw, ofs, val) do { \
366	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
367			__LINE__, (u32)(ofs), (u32)(val)); \
368	_ipw_write32(ipw, ofs, val); \
369} while (0)
370
371/* 8-bit direct read (low 4K) */
372static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
373{
374	return readb(ipw->hw_base + ofs);
375}
376
377/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
378#define ipw_read8(ipw, ofs) ({ \
379	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
380			(u32)(ofs)); \
381	_ipw_read8(ipw, ofs); \
382})
383
384/* 16-bit direct read (low 4K) */
385static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
386{
387	return readw(ipw->hw_base + ofs);
388}
389
390/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
391#define ipw_read16(ipw, ofs) ({ \
392	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
393			(u32)(ofs)); \
394	_ipw_read16(ipw, ofs); \
395})
396
397/* 32-bit direct read (low 4K) */
398static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
399{
400	return readl(ipw->hw_base + ofs);
401}
402
403/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
404#define ipw_read32(ipw, ofs) ({ \
405	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
406			(u32)(ofs)); \
407	_ipw_read32(ipw, ofs); \
408})
409
410static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
411/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
412#define ipw_read_indirect(a, b, c, d) ({ \
413	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
414			__LINE__, (u32)(b), (u32)(d)); \
415	_ipw_read_indirect(a, b, c, d); \
416})
417
418/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
419static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
420				int num);
421#define ipw_write_indirect(a, b, c, d) do { \
422	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
423			__LINE__, (u32)(b), (u32)(d)); \
424	_ipw_write_indirect(a, b, c, d); \
425} while (0)
426
427/* 32-bit indirect write (above 4K) */
428static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
429{
430	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
431	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
432	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
433}
434
435/* 8-bit indirect write (above 4K) */
436static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
437{
438	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
439	u32 dif_len = reg - aligned_addr;
440
441	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
442	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
443	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
444}
445
446/* 16-bit indirect write (above 4K) */
447static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
448{
449	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
450	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
451
452	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
453	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
454	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
455}
456
457/* 8-bit indirect read (above 4K) */
458static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
459{
460	u32 word;
461	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
462	IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
463	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
464	return (word >> ((reg & 0x3) * 8)) & 0xff;
465}
466
467/* 32-bit indirect read (above 4K) */
468static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
469{
470	u32 value;
471
472	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
473
474	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
475	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
476	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
477	return value;
478}
479
480/* General purpose, no alignment requirement, iterative (multi-byte) read, */
481/*    for area above 1st 4K of SRAM/reg space */
482static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
483			       int num)
484{
485	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
486	u32 dif_len = addr - aligned_addr;
487	u32 i;
488
489	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
490
491	if (num <= 0) {
492		return;
493	}
494
495	/* Read the first dword (or portion) byte by byte */
496	if (unlikely(dif_len)) {
497		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
498		/* Start reading at aligned_addr + dif_len */
499		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
500			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
501		aligned_addr += 4;
502	}
503
504	/* Read all of the middle dwords as dwords, with auto-increment */
505	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
506	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
507		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
508
509	/* Read the last dword (or portion) byte by byte */
510	if (unlikely(num)) {
511		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
512		for (i = 0; num > 0; i++, num--)
513			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
514	}
515}
516
517/* General purpose, no alignment requirement, iterative (multi-byte) write, */
518/*    for area above 1st 4K of SRAM/reg space */
519static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
520				int num)
521{
522	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
523	u32 dif_len = addr - aligned_addr;
524	u32 i;
525
526	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
527
528	if (num <= 0) {
529		return;
530	}
531
532	/* Write the first dword (or portion) byte by byte */
533	if (unlikely(dif_len)) {
534		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
535		/* Start writing at aligned_addr + dif_len */
536		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
537			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
538		aligned_addr += 4;
539	}
540
541	/* Write all of the middle dwords as dwords, with auto-increment */
542	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
543	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
544		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
545
546	/* Write the last dword (or portion) byte by byte */
547	if (unlikely(num)) {
548		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
549		for (i = 0; num > 0; i++, num--, buf++)
550			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
551	}
552}
553
554/* General purpose, no alignment requirement, iterative (multi-byte) write, */
555/*    for 1st 4K of SRAM/regs space */
556static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
557			     int num)
558{
559	memcpy_toio((priv->hw_base + addr), buf, num);
560}
561
562/* Set bit(s) in low 4K of SRAM/regs */
563static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
564{
565	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
566}
567
568/* Clear bit(s) in low 4K of SRAM/regs */
569static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
570{
571	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
572}
573
574static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
575{
576	if (priv->status & STATUS_INT_ENABLED)
577		return;
578	priv->status |= STATUS_INT_ENABLED;
579	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
580}
581
582static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
583{
584	if (!(priv->status & STATUS_INT_ENABLED))
585		return;
586	priv->status &= ~STATUS_INT_ENABLED;
587	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
588}
589
590static inline void ipw_enable_interrupts(struct ipw_priv *priv)
591{
592	unsigned long flags;
593
594	spin_lock_irqsave(&priv->irq_lock, flags);
595	__ipw_enable_interrupts(priv);
596	spin_unlock_irqrestore(&priv->irq_lock, flags);
597}
598
599static inline void ipw_disable_interrupts(struct ipw_priv *priv)
600{
601	unsigned long flags;
602
603	spin_lock_irqsave(&priv->irq_lock, flags);
604	__ipw_disable_interrupts(priv);
605	spin_unlock_irqrestore(&priv->irq_lock, flags);
606}
607
608static char *ipw_error_desc(u32 val)
609{
610	switch (val) {
611	case IPW_FW_ERROR_OK:
612		return "ERROR_OK";
613	case IPW_FW_ERROR_FAIL:
614		return "ERROR_FAIL";
615	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
616		return "MEMORY_UNDERFLOW";
617	case IPW_FW_ERROR_MEMORY_OVERFLOW:
618		return "MEMORY_OVERFLOW";
619	case IPW_FW_ERROR_BAD_PARAM:
620		return "BAD_PARAM";
621	case IPW_FW_ERROR_BAD_CHECKSUM:
622		return "BAD_CHECKSUM";
623	case IPW_FW_ERROR_NMI_INTERRUPT:
624		return "NMI_INTERRUPT";
625	case IPW_FW_ERROR_BAD_DATABASE:
626		return "BAD_DATABASE";
627	case IPW_FW_ERROR_ALLOC_FAIL:
628		return "ALLOC_FAIL";
629	case IPW_FW_ERROR_DMA_UNDERRUN:
630		return "DMA_UNDERRUN";
631	case IPW_FW_ERROR_DMA_STATUS:
632		return "DMA_STATUS";
633	case IPW_FW_ERROR_DINO_ERROR:
634		return "DINO_ERROR";
635	case IPW_FW_ERROR_EEPROM_ERROR:
636		return "EEPROM_ERROR";
637	case IPW_FW_ERROR_SYSASSERT:
638		return "SYSASSERT";
639	case IPW_FW_ERROR_FATAL_ERROR:
640		return "FATAL_ERROR";
641	default:
642		return "UNKNOWN_ERROR";
643	}
644}
645
646static void ipw_dump_error_log(struct ipw_priv *priv,
647			       struct ipw_fw_error *error)
648{
649	u32 i;
650
651	if (!error) {
652		IPW_ERROR("Error allocating and capturing error log.  "
653			  "Nothing to dump.\n");
654		return;
655	}
656
657	IPW_ERROR("Start IPW Error Log Dump:\n");
658	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
659		  error->status, error->config);
660
661	for (i = 0; i < error->elem_len; i++)
662		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
663			  ipw_error_desc(error->elem[i].desc),
664			  error->elem[i].time,
665			  error->elem[i].blink1,
666			  error->elem[i].blink2,
667			  error->elem[i].link1,
668			  error->elem[i].link2, error->elem[i].data);
669	for (i = 0; i < error->log_len; i++)
670		IPW_ERROR("%i\t0x%08x\t%i\n",
671			  error->log[i].time,
672			  error->log[i].data, error->log[i].event);
673}
674
675static inline int ipw_is_init(struct ipw_priv *priv)
676{
677	return (priv->status & STATUS_INIT) ? 1 : 0;
678}
679
680static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
681{
682	u32 addr, field_info, field_len, field_count, total_len;
683
684	IPW_DEBUG_ORD("ordinal = %i\n", ord);
685
686	if (!priv || !val || !len) {
687		IPW_DEBUG_ORD("Invalid argument\n");
688		return -EINVAL;
689	}
690
691	/* verify device ordinal tables have been initialized */
692	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
693		IPW_DEBUG_ORD("Access ordinals before initialization\n");
694		return -EINVAL;
695	}
696
697	switch (IPW_ORD_TABLE_ID_MASK & ord) {
698	case IPW_ORD_TABLE_0_MASK:
699		/*
700		 * TABLE 0: Direct access to a table of 32 bit values
701		 *
702		 * This is a very simple table with the data directly
703		 * read from the table
704		 */
705
706		/* remove the table id from the ordinal */
707		ord &= IPW_ORD_TABLE_VALUE_MASK;
708
709		/* boundary check */
710		if (ord > priv->table0_len) {
711			IPW_DEBUG_ORD("ordinal value (%i) longer then "
712				      "max (%i)\n", ord, priv->table0_len);
713			return -EINVAL;
714		}
715
716		/* verify we have enough room to store the value */
717		if (*len < sizeof(u32)) {
718			IPW_DEBUG_ORD("ordinal buffer length too small, "
719				      "need %zd\n", sizeof(u32));
720			return -EINVAL;
721		}
722
723		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
724			      ord, priv->table0_addr + (ord << 2));
725
726		*len = sizeof(u32);
727		ord <<= 2;
728		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
729		break;
730
731	case IPW_ORD_TABLE_1_MASK:
732		/*
733		 * TABLE 1: Indirect access to a table of 32 bit values
734		 *
735		 * This is a fairly large table of u32 values each
736		 * representing starting addr for the data (which is
737		 * also a u32)
738		 */
739
740		/* remove the table id from the ordinal */
741		ord &= IPW_ORD_TABLE_VALUE_MASK;
742
743		/* boundary check */
744		if (ord > priv->table1_len) {
745			IPW_DEBUG_ORD("ordinal value too long\n");
746			return -EINVAL;
747		}
748
749		/* verify we have enough room to store the value */
750		if (*len < sizeof(u32)) {
751			IPW_DEBUG_ORD("ordinal buffer length too small, "
752				      "need %zd\n", sizeof(u32));
753			return -EINVAL;
754		}
755
756		*((u32 *) val) =
757		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
758		*len = sizeof(u32);
759		break;
760
761	case IPW_ORD_TABLE_2_MASK:
762		/*
763		 * TABLE 2: Indirect access to a table of variable sized values
764		 *
765		 * This table consist of six values, each containing
766		 *     - dword containing the starting offset of the data
767		 *     - dword containing the lengh in the first 16bits
768		 *       and the count in the second 16bits
769		 */
770
771		/* remove the table id from the ordinal */
772		ord &= IPW_ORD_TABLE_VALUE_MASK;
773
774		/* boundary check */
775		if (ord > priv->table2_len) {
776			IPW_DEBUG_ORD("ordinal value too long\n");
777			return -EINVAL;
778		}
779
780		/* get the address of statistic */
781		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
782
783		/* get the second DW of statistics ;
784		 * two 16-bit words - first is length, second is count */
785		field_info =
786		    ipw_read_reg32(priv,
787				   priv->table2_addr + (ord << 3) +
788				   sizeof(u32));
789
790		/* get each entry length */
791		field_len = *((u16 *) & field_info);
792
793		/* get number of entries */
794		field_count = *(((u16 *) & field_info) + 1);
795
796		/* abort if not enough memory */
797		total_len = field_len * field_count;
798		if (total_len > *len) {
799			*len = total_len;
800			return -EINVAL;
801		}
802
803		*len = total_len;
804		if (!total_len)
805			return 0;
806
807		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
808			      "field_info = 0x%08x\n",
809			      addr, total_len, field_info);
810		ipw_read_indirect(priv, addr, val, total_len);
811		break;
812
813	default:
814		IPW_DEBUG_ORD("Invalid ordinal!\n");
815		return -EINVAL;
816
817	}
818
819	return 0;
820}
821
822static void ipw_init_ordinals(struct ipw_priv *priv)
823{
824	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
825	priv->table0_len = ipw_read32(priv, priv->table0_addr);
826
827	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
828		      priv->table0_addr, priv->table0_len);
829
830	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
831	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
832
833	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
834		      priv->table1_addr, priv->table1_len);
835
836	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
837	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
838	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
839
840	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
841		      priv->table2_addr, priv->table2_len);
842
843}
844
845static u32 ipw_register_toggle(u32 reg)
846{
847	reg &= ~IPW_START_STANDBY;
848	if (reg & IPW_GATE_ODMA)
849		reg &= ~IPW_GATE_ODMA;
850	if (reg & IPW_GATE_IDMA)
851		reg &= ~IPW_GATE_IDMA;
852	if (reg & IPW_GATE_ADMA)
853		reg &= ~IPW_GATE_ADMA;
854	return reg;
855}
856
857/*
858 * LED behavior:
859 * - On radio ON, turn on any LEDs that require to be on during start
860 * - On initialization, start unassociated blink
861 * - On association, disable unassociated blink
862 * - On disassociation, start unassociated blink
863 * - On radio OFF, turn off any LEDs started during radio on
864 *
865 */
866#define LD_TIME_LINK_ON msecs_to_jiffies(300)
867#define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
868#define LD_TIME_ACT_ON msecs_to_jiffies(250)
869
870static void ipw_led_link_on(struct ipw_priv *priv)
871{
872	unsigned long flags;
873	u32 led;
874
875	/* If configured to not use LEDs, or nic_type is 1,
876	 * then we don't toggle a LINK led */
877	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
878		return;
879
880	spin_lock_irqsave(&priv->lock, flags);
881
882	if (!(priv->status & STATUS_RF_KILL_MASK) &&
883	    !(priv->status & STATUS_LED_LINK_ON)) {
884		IPW_DEBUG_LED("Link LED On\n");
885		led = ipw_read_reg32(priv, IPW_EVENT_REG);
886		led |= priv->led_association_on;
887
888		led = ipw_register_toggle(led);
889
890		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
891		ipw_write_reg32(priv, IPW_EVENT_REG, led);
892
893		priv->status |= STATUS_LED_LINK_ON;
894
895		/* If we aren't associated, schedule turning the LED off */
896		if (!(priv->status & STATUS_ASSOCIATED))
897			queue_delayed_work(priv->workqueue,
898					   &priv->led_link_off,
899					   LD_TIME_LINK_ON);
900	}
901
902	spin_unlock_irqrestore(&priv->lock, flags);
903}
904
905static void ipw_bg_led_link_on(struct work_struct *work)
906{
907	struct ipw_priv *priv =
908		container_of(work, struct ipw_priv, led_link_on.work);
909	mutex_lock(&priv->mutex);
910	ipw_led_link_on(priv);
911	mutex_unlock(&priv->mutex);
912}
913
914static void ipw_led_link_off(struct ipw_priv *priv)
915{
916	unsigned long flags;
917	u32 led;
918
919	/* If configured not to use LEDs, or nic type is 1,
920	 * then we don't goggle the LINK led. */
921	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
922		return;
923
924	spin_lock_irqsave(&priv->lock, flags);
925
926	if (priv->status & STATUS_LED_LINK_ON) {
927		led = ipw_read_reg32(priv, IPW_EVENT_REG);
928		led &= priv->led_association_off;
929		led = ipw_register_toggle(led);
930
931		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
932		ipw_write_reg32(priv, IPW_EVENT_REG, led);
933
934		IPW_DEBUG_LED("Link LED Off\n");
935
936		priv->status &= ~STATUS_LED_LINK_ON;
937
938		/* If we aren't associated and the radio is on, schedule
939		 * turning the LED on (blink while unassociated) */
940		if (!(priv->status & STATUS_RF_KILL_MASK) &&
941		    !(priv->status & STATUS_ASSOCIATED))
942			queue_delayed_work(priv->workqueue, &priv->led_link_on,
943					   LD_TIME_LINK_OFF);
944
945	}
946
947	spin_unlock_irqrestore(&priv->lock, flags);
948}
949
950static void ipw_bg_led_link_off(struct work_struct *work)
951{
952	struct ipw_priv *priv =
953		container_of(work, struct ipw_priv, led_link_off.work);
954	mutex_lock(&priv->mutex);
955	ipw_led_link_off(priv);
956	mutex_unlock(&priv->mutex);
957}
958
959static void __ipw_led_activity_on(struct ipw_priv *priv)
960{
961	u32 led;
962
963	if (priv->config & CFG_NO_LED)
964		return;
965
966	if (priv->status & STATUS_RF_KILL_MASK)
967		return;
968
969	if (!(priv->status & STATUS_LED_ACT_ON)) {
970		led = ipw_read_reg32(priv, IPW_EVENT_REG);
971		led |= priv->led_activity_on;
972
973		led = ipw_register_toggle(led);
974
975		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
976		ipw_write_reg32(priv, IPW_EVENT_REG, led);
977
978		IPW_DEBUG_LED("Activity LED On\n");
979
980		priv->status |= STATUS_LED_ACT_ON;
981
982		cancel_delayed_work(&priv->led_act_off);
983		queue_delayed_work(priv->workqueue, &priv->led_act_off,
984				   LD_TIME_ACT_ON);
985	} else {
986		/* Reschedule LED off for full time period */
987		cancel_delayed_work(&priv->led_act_off);
988		queue_delayed_work(priv->workqueue, &priv->led_act_off,
989				   LD_TIME_ACT_ON);
990	}
991}
992
993#if 0
994void ipw_led_activity_on(struct ipw_priv *priv)
995{
996	unsigned long flags;
997	spin_lock_irqsave(&priv->lock, flags);
998	__ipw_led_activity_on(priv);
999	spin_unlock_irqrestore(&priv->lock, flags);
1000}
1001#endif  /*  0  */
1002
1003static void ipw_led_activity_off(struct ipw_priv *priv)
1004{
1005	unsigned long flags;
1006	u32 led;
1007
1008	if (priv->config & CFG_NO_LED)
1009		return;
1010
1011	spin_lock_irqsave(&priv->lock, flags);
1012
1013	if (priv->status & STATUS_LED_ACT_ON) {
1014		led = ipw_read_reg32(priv, IPW_EVENT_REG);
1015		led &= priv->led_activity_off;
1016
1017		led = ipw_register_toggle(led);
1018
1019		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1020		ipw_write_reg32(priv, IPW_EVENT_REG, led);
1021
1022		IPW_DEBUG_LED("Activity LED Off\n");
1023
1024		priv->status &= ~STATUS_LED_ACT_ON;
1025	}
1026
1027	spin_unlock_irqrestore(&priv->lock, flags);
1028}
1029
1030static void ipw_bg_led_activity_off(struct work_struct *work)
1031{
1032	struct ipw_priv *priv =
1033		container_of(work, struct ipw_priv, led_act_off.work);
1034	mutex_lock(&priv->mutex);
1035	ipw_led_activity_off(priv);
1036	mutex_unlock(&priv->mutex);
1037}
1038
1039static void ipw_led_band_on(struct ipw_priv *priv)
1040{
1041	unsigned long flags;
1042	u32 led;
1043
1044	/* Only nic type 1 supports mode LEDs */
1045	if (priv->config & CFG_NO_LED ||
1046	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1047		return;
1048
1049	spin_lock_irqsave(&priv->lock, flags);
1050
1051	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1052	if (priv->assoc_network->mode == IEEE_A) {
1053		led |= priv->led_ofdm_on;
1054		led &= priv->led_association_off;
1055		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1056	} else if (priv->assoc_network->mode == IEEE_G) {
1057		led |= priv->led_ofdm_on;
1058		led |= priv->led_association_on;
1059		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1060	} else {
1061		led &= priv->led_ofdm_off;
1062		led |= priv->led_association_on;
1063		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1064	}
1065
1066	led = ipw_register_toggle(led);
1067
1068	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1069	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1070
1071	spin_unlock_irqrestore(&priv->lock, flags);
1072}
1073
1074static void ipw_led_band_off(struct ipw_priv *priv)
1075{
1076	unsigned long flags;
1077	u32 led;
1078
1079	/* Only nic type 1 supports mode LEDs */
1080	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1081		return;
1082
1083	spin_lock_irqsave(&priv->lock, flags);
1084
1085	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1086	led &= priv->led_ofdm_off;
1087	led &= priv->led_association_off;
1088
1089	led = ipw_register_toggle(led);
1090
1091	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1092	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1093
1094	spin_unlock_irqrestore(&priv->lock, flags);
1095}
1096
1097static void ipw_led_radio_on(struct ipw_priv *priv)
1098{
1099	ipw_led_link_on(priv);
1100}
1101
1102static void ipw_led_radio_off(struct ipw_priv *priv)
1103{
1104	ipw_led_activity_off(priv);
1105	ipw_led_link_off(priv);
1106}
1107
1108static void ipw_led_link_up(struct ipw_priv *priv)
1109{
1110	/* Set the Link Led on for all nic types */
1111	ipw_led_link_on(priv);
1112}
1113
1114static void ipw_led_link_down(struct ipw_priv *priv)
1115{
1116	ipw_led_activity_off(priv);
1117	ipw_led_link_off(priv);
1118
1119	if (priv->status & STATUS_RF_KILL_MASK)
1120		ipw_led_radio_off(priv);
1121}
1122
1123static void ipw_led_init(struct ipw_priv *priv)
1124{
1125	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1126
1127	/* Set the default PINs for the link and activity leds */
1128	priv->led_activity_on = IPW_ACTIVITY_LED;
1129	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1130
1131	priv->led_association_on = IPW_ASSOCIATED_LED;
1132	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1133
1134	/* Set the default PINs for the OFDM leds */
1135	priv->led_ofdm_on = IPW_OFDM_LED;
1136	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1137
1138	switch (priv->nic_type) {
1139	case EEPROM_NIC_TYPE_1:
1140		/* In this NIC type, the LEDs are reversed.... */
1141		priv->led_activity_on = IPW_ASSOCIATED_LED;
1142		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1143		priv->led_association_on = IPW_ACTIVITY_LED;
1144		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1145
1146		if (!(priv->config & CFG_NO_LED))
1147			ipw_led_band_on(priv);
1148
1149		/* And we don't blink link LEDs for this nic, so
1150		 * just return here */
1151		return;
1152
1153	case EEPROM_NIC_TYPE_3:
1154	case EEPROM_NIC_TYPE_2:
1155	case EEPROM_NIC_TYPE_4:
1156	case EEPROM_NIC_TYPE_0:
1157		break;
1158
1159	default:
1160		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1161			       priv->nic_type);
1162		priv->nic_type = EEPROM_NIC_TYPE_0;
1163		break;
1164	}
1165
1166	if (!(priv->config & CFG_NO_LED)) {
1167		if (priv->status & STATUS_ASSOCIATED)
1168			ipw_led_link_on(priv);
1169		else
1170			ipw_led_link_off(priv);
1171	}
1172}
1173
1174static void ipw_led_shutdown(struct ipw_priv *priv)
1175{
1176	ipw_led_activity_off(priv);
1177	ipw_led_link_off(priv);
1178	ipw_led_band_off(priv);
1179	cancel_delayed_work(&priv->led_link_on);
1180	cancel_delayed_work(&priv->led_link_off);
1181	cancel_delayed_work(&priv->led_act_off);
1182}
1183
1184/*
1185 * The following adds a new attribute to the sysfs representation
1186 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1187 * used for controling the debug level.
1188 *
1189 * See the level definitions in ipw for details.
1190 */
1191static ssize_t show_debug_level(struct device_driver *d, char *buf)
1192{
1193	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1194}
1195
1196static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1197				 size_t count)
1198{
1199	char *p = (char *)buf;
1200	u32 val;
1201
1202	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1203		p++;
1204		if (p[0] == 'x' || p[0] == 'X')
1205			p++;
1206		val = simple_strtoul(p, &p, 16);
1207	} else
1208		val = simple_strtoul(p, &p, 10);
1209	if (p == buf)
1210		printk(KERN_INFO DRV_NAME
1211		       ": %s is not in hex or decimal form.\n", buf);
1212	else
1213		ipw_debug_level = val;
1214
1215	return strnlen(buf, count);
1216}
1217
1218static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1219		   show_debug_level, store_debug_level);
1220
1221static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1222{
1223	/* length = 1st dword in log */
1224	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1225}
1226
1227static void ipw_capture_event_log(struct ipw_priv *priv,
1228				  u32 log_len, struct ipw_event *log)
1229{
1230	u32 base;
1231
1232	if (log_len) {
1233		base = ipw_read32(priv, IPW_EVENT_LOG);
1234		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1235				  (u8 *) log, sizeof(*log) * log_len);
1236	}
1237}
1238
1239static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1240{
1241	struct ipw_fw_error *error;
1242	u32 log_len = ipw_get_event_log_len(priv);
1243	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1244	u32 elem_len = ipw_read_reg32(priv, base);
1245
1246	error = kmalloc(sizeof(*error) +
1247			sizeof(*error->elem) * elem_len +
1248			sizeof(*error->log) * log_len, GFP_ATOMIC);
1249	if (!error) {
1250		IPW_ERROR("Memory allocation for firmware error log "
1251			  "failed.\n");
1252		return NULL;
1253	}
1254	error->jiffies = jiffies;
1255	error->status = priv->status;
1256	error->config = priv->config;
1257	error->elem_len = elem_len;
1258	error->log_len = log_len;
1259	error->elem = (struct ipw_error_elem *)error->payload;
1260	error->log = (struct ipw_event *)(error->elem + elem_len);
1261
1262	ipw_capture_event_log(priv, log_len, error->log);
1263
1264	if (elem_len)
1265		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1266				  sizeof(*error->elem) * elem_len);
1267
1268	return error;
1269}
1270
1271static ssize_t show_event_log(struct device *d,
1272			      struct device_attribute *attr, char *buf)
1273{
1274	struct ipw_priv *priv = dev_get_drvdata(d);
1275	u32 log_len = ipw_get_event_log_len(priv);
1276	u32 log_size;
1277	struct ipw_event *log;
1278	u32 len = 0, i;
1279
1280	/* not using min() because of its strict type checking */
1281	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1282			sizeof(*log) * log_len : PAGE_SIZE;
1283	log = kzalloc(log_size, GFP_KERNEL);
1284	if (!log) {
1285		IPW_ERROR("Unable to allocate memory for log\n");
1286		return 0;
1287	}
1288	log_len = log_size / sizeof(*log);
1289	ipw_capture_event_log(priv, log_len, log);
1290
1291	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1292	for (i = 0; i < log_len; i++)
1293		len += snprintf(buf + len, PAGE_SIZE - len,
1294				"\n%08X%08X%08X",
1295				log[i].time, log[i].event, log[i].data);
1296	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1297	kfree(log);
1298	return len;
1299}
1300
1301static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1302
1303static ssize_t show_error(struct device *d,
1304			  struct device_attribute *attr, char *buf)
1305{
1306	struct ipw_priv *priv = dev_get_drvdata(d);
1307	u32 len = 0, i;
1308	if (!priv->error)
1309		return 0;
1310	len += snprintf(buf + len, PAGE_SIZE - len,
1311			"%08lX%08X%08X%08X",
1312			priv->error->jiffies,
1313			priv->error->status,
1314			priv->error->config, priv->error->elem_len);
1315	for (i = 0; i < priv->error->elem_len; i++)
1316		len += snprintf(buf + len, PAGE_SIZE - len,
1317				"\n%08X%08X%08X%08X%08X%08X%08X",
1318				priv->error->elem[i].time,
1319				priv->error->elem[i].desc,
1320				priv->error->elem[i].blink1,
1321				priv->error->elem[i].blink2,
1322				priv->error->elem[i].link1,
1323				priv->error->elem[i].link2,
1324				priv->error->elem[i].data);
1325
1326	len += snprintf(buf + len, PAGE_SIZE - len,
1327			"\n%08X", priv->error->log_len);
1328	for (i = 0; i < priv->error->log_len; i++)
1329		len += snprintf(buf + len, PAGE_SIZE - len,
1330				"\n%08X%08X%08X",
1331				priv->error->log[i].time,
1332				priv->error->log[i].event,
1333				priv->error->log[i].data);
1334	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1335	return len;
1336}
1337
1338static ssize_t clear_error(struct device *d,
1339			   struct device_attribute *attr,
1340			   const char *buf, size_t count)
1341{
1342	struct ipw_priv *priv = dev_get_drvdata(d);
1343
1344	kfree(priv->error);
1345	priv->error = NULL;
1346	return count;
1347}
1348
1349static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1350
1351static ssize_t show_cmd_log(struct device *d,
1352			    struct device_attribute *attr, char *buf)
1353{
1354	struct ipw_priv *priv = dev_get_drvdata(d);
1355	u32 len = 0, i;
1356	if (!priv->cmdlog)
1357		return 0;
1358	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1359	     (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1360	     i = (i + 1) % priv->cmdlog_len) {
1361		len +=
1362		    snprintf(buf + len, PAGE_SIZE - len,
1363			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1364			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1365			     priv->cmdlog[i].cmd.len);
1366		len +=
1367		    snprintk_buf(buf + len, PAGE_SIZE - len,
1368				 (u8 *) priv->cmdlog[i].cmd.param,
1369				 priv->cmdlog[i].cmd.len);
1370		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1371	}
1372	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1373	return len;
1374}
1375
1376static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1377
1378#ifdef CONFIG_IPW2200_PROMISCUOUS
1379static void ipw_prom_free(struct ipw_priv *priv);
1380static int ipw_prom_alloc(struct ipw_priv *priv);
1381static ssize_t store_rtap_iface(struct device *d,
1382			 struct device_attribute *attr,
1383			 const char *buf, size_t count)
1384{
1385	struct ipw_priv *priv = dev_get_drvdata(d);
1386	int rc = 0;
1387
1388	if (count < 1)
1389		return -EINVAL;
1390
1391	switch (buf[0]) {
1392	case '0':
1393		if (!rtap_iface)
1394			return count;
1395
1396		if (netif_running(priv->prom_net_dev)) {
1397			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1398			return count;
1399		}
1400
1401		ipw_prom_free(priv);
1402		rtap_iface = 0;
1403		break;
1404
1405	case '1':
1406		if (rtap_iface)
1407			return count;
1408
1409		rc = ipw_prom_alloc(priv);
1410		if (!rc)
1411			rtap_iface = 1;
1412		break;
1413
1414	default:
1415		return -EINVAL;
1416	}
1417
1418	if (rc) {
1419		IPW_ERROR("Failed to register promiscuous network "
1420			  "device (error %d).\n", rc);
1421	}
1422
1423	return count;
1424}
1425
1426static ssize_t show_rtap_iface(struct device *d,
1427			struct device_attribute *attr,
1428			char *buf)
1429{
1430	struct ipw_priv *priv = dev_get_drvdata(d);
1431	if (rtap_iface)
1432		return sprintf(buf, "%s", priv->prom_net_dev->name);
1433	else {
1434		buf[0] = '-';
1435		buf[1] = '1';
1436		buf[2] = '\0';
1437		return 3;
1438	}
1439}
1440
1441static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1442		   store_rtap_iface);
1443
1444static ssize_t store_rtap_filter(struct device *d,
1445			 struct device_attribute *attr,
1446			 const char *buf, size_t count)
1447{
1448	struct ipw_priv *priv = dev_get_drvdata(d);
1449
1450	if (!priv->prom_priv) {
1451		IPW_ERROR("Attempting to set filter without "
1452			  "rtap_iface enabled.\n");
1453		return -EPERM;
1454	}
1455
1456	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1457
1458	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1459		       BIT_ARG16(priv->prom_priv->filter));
1460
1461	return count;
1462}
1463
1464static ssize_t show_rtap_filter(struct device *d,
1465			struct device_attribute *attr,
1466			char *buf)
1467{
1468	struct ipw_priv *priv = dev_get_drvdata(d);
1469	return sprintf(buf, "0x%04X",
1470		       priv->prom_priv ? priv->prom_priv->filter : 0);
1471}
1472
1473static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1474		   store_rtap_filter);
1475#endif
1476
1477static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1478			     char *buf)
1479{
1480	struct ipw_priv *priv = dev_get_drvdata(d);
1481	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1482}
1483
1484static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1485			      const char *buf, size_t count)
1486{
1487	struct ipw_priv *priv = dev_get_drvdata(d);
1488	struct net_device *dev = priv->net_dev;
1489	char buffer[] = "00000000";
1490	unsigned long len =
1491	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1492	unsigned long val;
1493	char *p = buffer;
1494
1495	IPW_DEBUG_INFO("enter\n");
1496
1497	strncpy(buffer, buf, len);
1498	buffer[len] = 0;
1499
1500	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1501		p++;
1502		if (p[0] == 'x' || p[0] == 'X')
1503			p++;
1504		val = simple_strtoul(p, &p, 16);
1505	} else
1506		val = simple_strtoul(p, &p, 10);
1507	if (p == buffer) {
1508		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1509	} else {
1510		priv->ieee->scan_age = val;
1511		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1512	}
1513
1514	IPW_DEBUG_INFO("exit\n");
1515	return len;
1516}
1517
1518static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1519
1520static ssize_t show_led(struct device *d, struct device_attribute *attr,
1521			char *buf)
1522{
1523	struct ipw_priv *priv = dev_get_drvdata(d);
1524	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1525}
1526
1527static ssize_t store_led(struct device *d, struct device_attribute *attr,
1528			 const char *buf, size_t count)
1529{
1530	struct ipw_priv *priv = dev_get_drvdata(d);
1531
1532	IPW_DEBUG_INFO("enter\n");
1533
1534	if (count == 0)
1535		return 0;
1536
1537	if (*buf == 0) {
1538		IPW_DEBUG_LED("Disabling LED control.\n");
1539		priv->config |= CFG_NO_LED;
1540		ipw_led_shutdown(priv);
1541	} else {
1542		IPW_DEBUG_LED("Enabling LED control.\n");
1543		priv->config &= ~CFG_NO_LED;
1544		ipw_led_init(priv);
1545	}
1546
1547	IPW_DEBUG_INFO("exit\n");
1548	return count;
1549}
1550
1551static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1552
1553static ssize_t show_status(struct device *d,
1554			   struct device_attribute *attr, char *buf)
1555{
1556	struct ipw_priv *p = dev_get_drvdata(d);
1557	return sprintf(buf, "0x%08x\n", (int)p->status);
1558}
1559
1560static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1561
1562static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1563			char *buf)
1564{
1565	struct ipw_priv *p = dev_get_drvdata(d);
1566	return sprintf(buf, "0x%08x\n", (int)p->config);
1567}
1568
1569static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1570
1571static ssize_t show_nic_type(struct device *d,
1572			     struct device_attribute *attr, char *buf)
1573{
1574	struct ipw_priv *priv = dev_get_drvdata(d);
1575	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1576}
1577
1578static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1579
1580static ssize_t show_ucode_version(struct device *d,
1581				  struct device_attribute *attr, char *buf)
1582{
1583	u32 len = sizeof(u32), tmp = 0;
1584	struct ipw_priv *p = dev_get_drvdata(d);
1585
1586	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1587		return 0;
1588
1589	return sprintf(buf, "0x%08x\n", tmp);
1590}
1591
1592static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1593
1594static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1595			char *buf)
1596{
1597	u32 len = sizeof(u32), tmp = 0;
1598	struct ipw_priv *p = dev_get_drvdata(d);
1599
1600	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1601		return 0;
1602
1603	return sprintf(buf, "0x%08x\n", tmp);
1604}
1605
1606static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1607
1608/*
1609 * Add a device attribute to view/control the delay between eeprom
1610 * operations.
1611 */
1612static ssize_t show_eeprom_delay(struct device *d,
1613				 struct device_attribute *attr, char *buf)
1614{
1615	struct ipw_priv *p = dev_get_drvdata(d);
1616	int n = p->eeprom_delay;
1617	return sprintf(buf, "%i\n", n);
1618}
1619static ssize_t store_eeprom_delay(struct device *d,
1620				  struct device_attribute *attr,
1621				  const char *buf, size_t count)
1622{
1623	struct ipw_priv *p = dev_get_drvdata(d);
1624	sscanf(buf, "%i", &p->eeprom_delay);
1625	return strnlen(buf, count);
1626}
1627
1628static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1629		   show_eeprom_delay, store_eeprom_delay);
1630
1631static ssize_t show_command_event_reg(struct device *d,
1632				      struct device_attribute *attr, char *buf)
1633{
1634	u32 reg = 0;
1635	struct ipw_priv *p = dev_get_drvdata(d);
1636
1637	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1638	return sprintf(buf, "0x%08x\n", reg);
1639}
1640static ssize_t store_command_event_reg(struct device *d,
1641				       struct device_attribute *attr,
1642				       const char *buf, size_t count)
1643{
1644	u32 reg;
1645	struct ipw_priv *p = dev_get_drvdata(d);
1646
1647	sscanf(buf, "%x", &reg);
1648	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1649	return strnlen(buf, count);
1650}
1651
1652static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1653		   show_command_event_reg, store_command_event_reg);
1654
1655static ssize_t show_mem_gpio_reg(struct device *d,
1656				 struct device_attribute *attr, char *buf)
1657{
1658	u32 reg = 0;
1659	struct ipw_priv *p = dev_get_drvdata(d);
1660
1661	reg = ipw_read_reg32(p, 0x301100);
1662	return sprintf(buf, "0x%08x\n", reg);
1663}
1664static ssize_t store_mem_gpio_reg(struct device *d,
1665				  struct device_attribute *attr,
1666				  const char *buf, size_t count)
1667{
1668	u32 reg;
1669	struct ipw_priv *p = dev_get_drvdata(d);
1670
1671	sscanf(buf, "%x", &reg);
1672	ipw_write_reg32(p, 0x301100, reg);
1673	return strnlen(buf, count);
1674}
1675
1676static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1677		   show_mem_gpio_reg, store_mem_gpio_reg);
1678
1679static ssize_t show_indirect_dword(struct device *d,
1680				   struct device_attribute *attr, char *buf)
1681{
1682	u32 reg = 0;
1683	struct ipw_priv *priv = dev_get_drvdata(d);
1684
1685	if (priv->status & STATUS_INDIRECT_DWORD)
1686		reg = ipw_read_reg32(priv, priv->indirect_dword);
1687	else
1688		reg = 0;
1689
1690	return sprintf(buf, "0x%08x\n", reg);
1691}
1692static ssize_t store_indirect_dword(struct device *d,
1693				    struct device_attribute *attr,
1694				    const char *buf, size_t count)
1695{
1696	struct ipw_priv *priv = dev_get_drvdata(d);
1697
1698	sscanf(buf, "%x", &priv->indirect_dword);
1699	priv->status |= STATUS_INDIRECT_DWORD;
1700	return strnlen(buf, count);
1701}
1702
1703static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1704		   show_indirect_dword, store_indirect_dword);
1705
1706static ssize_t show_indirect_byte(struct device *d,
1707				  struct device_attribute *attr, char *buf)
1708{
1709	u8 reg = 0;
1710	struct ipw_priv *priv = dev_get_drvdata(d);
1711
1712	if (priv->status & STATUS_INDIRECT_BYTE)
1713		reg = ipw_read_reg8(priv, priv->indirect_byte);
1714	else
1715		reg = 0;
1716
1717	return sprintf(buf, "0x%02x\n", reg);
1718}
1719static ssize_t store_indirect_byte(struct device *d,
1720				   struct device_attribute *attr,
1721				   const char *buf, size_t count)
1722{
1723	struct ipw_priv *priv = dev_get_drvdata(d);
1724
1725	sscanf(buf, "%x", &priv->indirect_byte);
1726	priv->status |= STATUS_INDIRECT_BYTE;
1727	return strnlen(buf, count);
1728}
1729
1730static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1731		   show_indirect_byte, store_indirect_byte);
1732
1733static ssize_t show_direct_dword(struct device *d,
1734				 struct device_attribute *attr, char *buf)
1735{
1736	u32 reg = 0;
1737	struct ipw_priv *priv = dev_get_drvdata(d);
1738
1739	if (priv->status & STATUS_DIRECT_DWORD)
1740		reg = ipw_read32(priv, priv->direct_dword);
1741	else
1742		reg = 0;
1743
1744	return sprintf(buf, "0x%08x\n", reg);
1745}
1746static ssize_t store_direct_dword(struct device *d,
1747				  struct device_attribute *attr,
1748				  const char *buf, size_t count)
1749{
1750	struct ipw_priv *priv = dev_get_drvdata(d);
1751
1752	sscanf(buf, "%x", &priv->direct_dword);
1753	priv->status |= STATUS_DIRECT_DWORD;
1754	return strnlen(buf, count);
1755}
1756
1757static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1758		   show_direct_dword, store_direct_dword);
1759
1760static int rf_kill_active(struct ipw_priv *priv)
1761{
1762	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1763		priv->status |= STATUS_RF_KILL_HW;
1764		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1765	} else {
1766		priv->status &= ~STATUS_RF_KILL_HW;
1767		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1768	}
1769
1770	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1771}
1772
1773static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1774			    char *buf)
1775{
1776	/* 0 - RF kill not enabled
1777	   1 - SW based RF kill active (sysfs)
1778	   2 - HW based RF kill active
1779	   3 - Both HW and SW baed RF kill active */
1780	struct ipw_priv *priv = dev_get_drvdata(d);
1781	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1782	    (rf_kill_active(priv) ? 0x2 : 0x0);
1783	return sprintf(buf, "%i\n", val);
1784}
1785
1786static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1787{
1788	if ((disable_radio ? 1 : 0) ==
1789	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1790		return 0;
1791
1792	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1793			  disable_radio ? "OFF" : "ON");
1794
1795	if (disable_radio) {
1796		priv->status |= STATUS_RF_KILL_SW;
1797
1798		if (priv->workqueue) {
1799			cancel_delayed_work(&priv->request_scan);
1800			cancel_delayed_work(&priv->request_direct_scan);
1801			cancel_delayed_work(&priv->request_passive_scan);
1802			cancel_delayed_work(&priv->scan_event);
1803		}
1804		queue_work(priv->workqueue, &priv->down);
1805	} else {
1806		priv->status &= ~STATUS_RF_KILL_SW;
1807		if (rf_kill_active(priv)) {
1808			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1809					  "disabled by HW switch\n");
1810			/* Make sure the RF_KILL check timer is running */
1811			cancel_delayed_work(&priv->rf_kill);
1812			queue_delayed_work(priv->workqueue, &priv->rf_kill,
1813					   round_jiffies_relative(2 * HZ));
1814		} else
1815			queue_work(priv->workqueue, &priv->up);
1816	}
1817
1818	return 1;
1819}
1820
1821static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1822			     const char *buf, size_t count)
1823{
1824	struct ipw_priv *priv = dev_get_drvdata(d);
1825
1826	ipw_radio_kill_sw(priv, buf[0] == '1');
1827
1828	return count;
1829}
1830
1831static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1832
1833static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1834			       char *buf)
1835{
1836	struct ipw_priv *priv = dev_get_drvdata(d);
1837	int pos = 0, len = 0;
1838	if (priv->config & CFG_SPEED_SCAN) {
1839		while (priv->speed_scan[pos] != 0)
1840			len += sprintf(&buf[len], "%d ",
1841				       priv->speed_scan[pos++]);
1842		return len + sprintf(&buf[len], "\n");
1843	}
1844
1845	return sprintf(buf, "0\n");
1846}
1847
1848static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1849				const char *buf, size_t count)
1850{
1851	struct ipw_priv *priv = dev_get_drvdata(d);
1852	int channel, pos = 0;
1853	const char *p = buf;
1854
1855	/* list of space separated channels to scan, optionally ending with 0 */
1856	while ((channel = simple_strtol(p, NULL, 0))) {
1857		if (pos == MAX_SPEED_SCAN - 1) {
1858			priv->speed_scan[pos] = 0;
1859			break;
1860		}
1861
1862		if (libipw_is_valid_channel(priv->ieee, channel))
1863			priv->speed_scan[pos++] = channel;
1864		else
1865			IPW_WARNING("Skipping invalid channel request: %d\n",
1866				    channel);
1867		p = strchr(p, ' ');
1868		if (!p)
1869			break;
1870		while (*p == ' ' || *p == '\t')
1871			p++;
1872	}
1873
1874	if (pos == 0)
1875		priv->config &= ~CFG_SPEED_SCAN;
1876	else {
1877		priv->speed_scan_pos = 0;
1878		priv->config |= CFG_SPEED_SCAN;
1879	}
1880
1881	return count;
1882}
1883
1884static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1885		   store_speed_scan);
1886
1887static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1888			      char *buf)
1889{
1890	struct ipw_priv *priv = dev_get_drvdata(d);
1891	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1892}
1893
1894static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1895			       const char *buf, size_t count)
1896{
1897	struct ipw_priv *priv = dev_get_drvdata(d);
1898	if (buf[0] == '1')
1899		priv->config |= CFG_NET_STATS;
1900	else
1901		priv->config &= ~CFG_NET_STATS;
1902
1903	return count;
1904}
1905
1906static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1907		   show_net_stats, store_net_stats);
1908
1909static ssize_t show_channels(struct device *d,
1910			     struct device_attribute *attr,
1911			     char *buf)
1912{
1913	struct ipw_priv *priv = dev_get_drvdata(d);
1914	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1915	int len = 0, i;
1916
1917	len = sprintf(&buf[len],
1918		      "Displaying %d channels in 2.4Ghz band "
1919		      "(802.11bg):\n", geo->bg_channels);
1920
1921	for (i = 0; i < geo->bg_channels; i++) {
1922		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1923			       geo->bg[i].channel,
1924			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1925			       " (radar spectrum)" : "",
1926			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1927				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1928			       ? "" : ", IBSS",
1929			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1930			       "passive only" : "active/passive",
1931			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1932			       "B" : "B/G");
1933	}
1934
1935	len += sprintf(&buf[len],
1936		       "Displaying %d channels in 5.2Ghz band "
1937		       "(802.11a):\n", geo->a_channels);
1938	for (i = 0; i < geo->a_channels; i++) {
1939		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1940			       geo->a[i].channel,
1941			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1942			       " (radar spectrum)" : "",
1943			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1944				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1945			       ? "" : ", IBSS",
1946			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1947			       "passive only" : "active/passive");
1948	}
1949
1950	return len;
1951}
1952
1953static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1954
1955static void notify_wx_assoc_event(struct ipw_priv *priv)
1956{
1957	union iwreq_data wrqu;
1958	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1959	if (priv->status & STATUS_ASSOCIATED)
1960		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1961	else
1962		memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1963	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1964}
1965
1966static void ipw_irq_tasklet(struct ipw_priv *priv)
1967{
1968	u32 inta, inta_mask, handled = 0;
1969	unsigned long flags;
1970	int rc = 0;
1971
1972	spin_lock_irqsave(&priv->irq_lock, flags);
1973
1974	inta = ipw_read32(priv, IPW_INTA_RW);
1975	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1976	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1977
1978	/* Add any cached INTA values that need to be handled */
1979	inta |= priv->isr_inta;
1980
1981	spin_unlock_irqrestore(&priv->irq_lock, flags);
1982
1983	spin_lock_irqsave(&priv->lock, flags);
1984
1985	/* handle all the justifications for the interrupt */
1986	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1987		ipw_rx(priv);
1988		handled |= IPW_INTA_BIT_RX_TRANSFER;
1989	}
1990
1991	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1992		IPW_DEBUG_HC("Command completed.\n");
1993		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1994		priv->status &= ~STATUS_HCMD_ACTIVE;
1995		wake_up_interruptible(&priv->wait_command_queue);
1996		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1997	}
1998
1999	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2000		IPW_DEBUG_TX("TX_QUEUE_1\n");
2001		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2002		handled |= IPW_INTA_BIT_TX_QUEUE_1;
2003	}
2004
2005	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2006		IPW_DEBUG_TX("TX_QUEUE_2\n");
2007		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2008		handled |= IPW_INTA_BIT_TX_QUEUE_2;
2009	}
2010
2011	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2012		IPW_DEBUG_TX("TX_QUEUE_3\n");
2013		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2014		handled |= IPW_INTA_BIT_TX_QUEUE_3;
2015	}
2016
2017	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2018		IPW_DEBUG_TX("TX_QUEUE_4\n");
2019		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2020		handled |= IPW_INTA_BIT_TX_QUEUE_4;
2021	}
2022
2023	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2024		IPW_WARNING("STATUS_CHANGE\n");
2025		handled |= IPW_INTA_BIT_STATUS_CHANGE;
2026	}
2027
2028	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2029		IPW_WARNING("TX_PERIOD_EXPIRED\n");
2030		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2031	}
2032
2033	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2034		IPW_WARNING("HOST_CMD_DONE\n");
2035		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2036	}
2037
2038	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2039		IPW_WARNING("FW_INITIALIZATION_DONE\n");
2040		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2041	}
2042
2043	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2044		IPW_WARNING("PHY_OFF_DONE\n");
2045		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2046	}
2047
2048	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2049		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2050		priv->status |= STATUS_RF_KILL_HW;
2051		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2052		wake_up_interruptible(&priv->wait_command_queue);
2053		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2054		cancel_delayed_work(&priv->request_scan);
2055		cancel_delayed_work(&priv->request_direct_scan);
2056		cancel_delayed_work(&priv->request_passive_scan);
2057		cancel_delayed_work(&priv->scan_event);
2058		schedule_work(&priv->link_down);
2059		queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2060		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2061	}
2062
2063	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2064		IPW_WARNING("Firmware error detected.  Restarting.\n");
2065		if (priv->error) {
2066			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2067			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2068				struct ipw_fw_error *error =
2069				    ipw_alloc_error_log(priv);
2070				ipw_dump_error_log(priv, error);
2071				kfree(error);
2072			}
2073		} else {
2074			priv->error = ipw_alloc_error_log(priv);
2075			if (priv->error)
2076				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2077			else
2078				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2079					     "log.\n");
2080			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2081				ipw_dump_error_log(priv, priv->error);
2082		}
2083
2084		/* XXX: If hardware encryption is for WPA/WPA2,
2085		 * we have to notify the supplicant. */
2086		if (priv->ieee->sec.encrypt) {
2087			priv->status &= ~STATUS_ASSOCIATED;
2088			notify_wx_assoc_event(priv);
2089		}
2090
2091		/* Keep the restart process from trying to send host
2092		 * commands by clearing the INIT status bit */
2093		priv->status &= ~STATUS_INIT;
2094
2095		/* Cancel currently queued command. */
2096		priv->status &= ~STATUS_HCMD_ACTIVE;
2097		wake_up_interruptible(&priv->wait_command_queue);
2098
2099		queue_work(priv->workqueue, &priv->adapter_restart);
2100		handled |= IPW_INTA_BIT_FATAL_ERROR;
2101	}
2102
2103	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2104		IPW_ERROR("Parity error\n");
2105		handled |= IPW_INTA_BIT_PARITY_ERROR;
2106	}
2107
2108	if (handled != inta) {
2109		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2110	}
2111
2112	spin_unlock_irqrestore(&priv->lock, flags);
2113
2114	/* enable all interrupts */
2115	ipw_enable_interrupts(priv);
2116}
2117
2118#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2119static char *get_cmd_string(u8 cmd)
2120{
2121	switch (cmd) {
2122		IPW_CMD(HOST_COMPLETE);
2123		IPW_CMD(POWER_DOWN);
2124		IPW_CMD(SYSTEM_CONFIG);
2125		IPW_CMD(MULTICAST_ADDRESS);
2126		IPW_CMD(SSID);
2127		IPW_CMD(ADAPTER_ADDRESS);
2128		IPW_CMD(PORT_TYPE);
2129		IPW_CMD(RTS_THRESHOLD);
2130		IPW_CMD(FRAG_THRESHOLD);
2131		IPW_CMD(POWER_MODE);
2132		IPW_CMD(WEP_KEY);
2133		IPW_CMD(TGI_TX_KEY);
2134		IPW_CMD(SCAN_REQUEST);
2135		IPW_CMD(SCAN_REQUEST_EXT);
2136		IPW_CMD(ASSOCIATE);
2137		IPW_CMD(SUPPORTED_RATES);
2138		IPW_CMD(SCAN_ABORT);
2139		IPW_CMD(TX_FLUSH);
2140		IPW_CMD(QOS_PARAMETERS);
2141		IPW_CMD(DINO_CONFIG);
2142		IPW_CMD(RSN_CAPABILITIES);
2143		IPW_CMD(RX_KEY);
2144		IPW_CMD(CARD_DISABLE);
2145		IPW_CMD(SEED_NUMBER);
2146		IPW_CMD(TX_POWER);
2147		IPW_CMD(COUNTRY_INFO);
2148		IPW_CMD(AIRONET_INFO);
2149		IPW_CMD(AP_TX_POWER);
2150		IPW_CMD(CCKM_INFO);
2151		IPW_CMD(CCX_VER_INFO);
2152		IPW_CMD(SET_CALIBRATION);
2153		IPW_CMD(SENSITIVITY_CALIB);
2154		IPW_CMD(RETRY_LIMIT);
2155		IPW_CMD(IPW_PRE_POWER_DOWN);
2156		IPW_CMD(VAP_BEACON_TEMPLATE);
2157		IPW_CMD(VAP_DTIM_PERIOD);
2158		IPW_CMD(EXT_SUPPORTED_RATES);
2159		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2160		IPW_CMD(VAP_QUIET_INTERVALS);
2161		IPW_CMD(VAP_CHANNEL_SWITCH);
2162		IPW_CMD(VAP_MANDATORY_CHANNELS);
2163		IPW_CMD(VAP_CELL_PWR_LIMIT);
2164		IPW_CMD(VAP_CF_PARAM_SET);
2165		IPW_CMD(VAP_SET_BEACONING_STATE);
2166		IPW_CMD(MEASUREMENT);
2167		IPW_CMD(POWER_CAPABILITY);
2168		IPW_CMD(SUPPORTED_CHANNELS);
2169		IPW_CMD(TPC_REPORT);
2170		IPW_CMD(WME_INFO);
2171		IPW_CMD(PRODUCTION_COMMAND);
2172	default:
2173		return "UNKNOWN";
2174	}
2175}
2176
2177#define HOST_COMPLETE_TIMEOUT HZ
2178
2179static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2180{
2181	int rc = 0;
2182	unsigned long flags;
2183
2184	spin_lock_irqsave(&priv->lock, flags);
2185	if (priv->status & STATUS_HCMD_ACTIVE) {
2186		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2187			  get_cmd_string(cmd->cmd));
2188		spin_unlock_irqrestore(&priv->lock, flags);
2189		return -EAGAIN;
2190	}
2191
2192	priv->status |= STATUS_HCMD_ACTIVE;
2193
2194	if (priv->cmdlog) {
2195		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2196		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2197		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2198		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2199		       cmd->len);
2200		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2201	}
2202
2203	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2204		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2205		     priv->status);
2206
2207#ifndef DEBUG_CMD_WEP_KEY
2208	if (cmd->cmd == IPW_CMD_WEP_KEY)
2209		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2210	else
2211#endif
2212		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2213
2214	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2215	if (rc) {
2216		priv->status &= ~STATUS_HCMD_ACTIVE;
2217		IPW_ERROR("Failed to send %s: Reason %d\n",
2218			  get_cmd_string(cmd->cmd), rc);
2219		spin_unlock_irqrestore(&priv->lock, flags);
2220		goto exit;
2221	}
2222	spin_unlock_irqrestore(&priv->lock, flags);
2223
2224	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2225					      !(priv->
2226						status & STATUS_HCMD_ACTIVE),
2227					      HOST_COMPLETE_TIMEOUT);
2228	if (rc == 0) {
2229		spin_lock_irqsave(&priv->lock, flags);
2230		if (priv->status & STATUS_HCMD_ACTIVE) {
2231			IPW_ERROR("Failed to send %s: Command timed out.\n",
2232				  get_cmd_string(cmd->cmd));
2233			priv->status &= ~STATUS_HCMD_ACTIVE;
2234			spin_unlock_irqrestore(&priv->lock, flags);
2235			rc = -EIO;
2236			goto exit;
2237		}
2238		spin_unlock_irqrestore(&priv->lock, flags);
2239	} else
2240		rc = 0;
2241
2242	if (priv->status & STATUS_RF_KILL_HW) {
2243		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2244			  get_cmd_string(cmd->cmd));
2245		rc = -EIO;
2246		goto exit;
2247	}
2248
2249      exit:
2250	if (priv->cmdlog) {
2251		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2252		priv->cmdlog_pos %= priv->cmdlog_len;
2253	}
2254	return rc;
2255}
2256
2257static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2258{
2259	struct host_cmd cmd = {
2260		.cmd = command,
2261	};
2262
2263	return __ipw_send_cmd(priv, &cmd);
2264}
2265
2266static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2267			    void *data)
2268{
2269	struct host_cmd cmd = {
2270		.cmd = command,
2271		.len = len,
2272		.param = data,
2273	};
2274
2275	return __ipw_send_cmd(priv, &cmd);
2276}
2277
2278static int ipw_send_host_complete(struct ipw_priv *priv)
2279{
2280	if (!priv) {
2281		IPW_ERROR("Invalid args\n");
2282		return -1;
2283	}
2284
2285	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2286}
2287
2288static int ipw_send_system_config(struct ipw_priv *priv)
2289{
2290	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2291				sizeof(priv->sys_config),
2292				&priv->sys_config);
2293}
2294
2295static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2296{
2297	if (!priv || !ssid) {
2298		IPW_ERROR("Invalid args\n");
2299		return -1;
2300	}
2301
2302	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2303				ssid);
2304}
2305
2306static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2307{
2308	if (!priv || !mac) {
2309		IPW_ERROR("Invalid args\n");
2310		return -1;
2311	}
2312
2313	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2314		       priv->net_dev->name, mac);
2315
2316	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2317}
2318
2319/*
2320 * NOTE: This must be executed from our workqueue as it results in udelay
2321 * being called which may corrupt the keyboard if executed on default
2322 * workqueue
2323 */
2324static void ipw_adapter_restart(void *adapter)
2325{
2326	struct ipw_priv *priv = adapter;
2327
2328	if (priv->status & STATUS_RF_KILL_MASK)
2329		return;
2330
2331	ipw_down(priv);
2332
2333	if (priv->assoc_network &&
2334	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2335		ipw_remove_current_network(priv);
2336
2337	if (ipw_up(priv)) {
2338		IPW_ERROR("Failed to up device\n");
2339		return;
2340	}
2341}
2342
2343static void ipw_bg_adapter_restart(struct work_struct *work)
2344{
2345	struct ipw_priv *priv =
2346		container_of(work, struct ipw_priv, adapter_restart);
2347	mutex_lock(&priv->mutex);
2348	ipw_adapter_restart(priv);
2349	mutex_unlock(&priv->mutex);
2350}
2351
2352static void ipw_abort_scan(struct ipw_priv *priv);
2353
2354#define IPW_SCAN_CHECK_WATCHDOG	(5 * HZ)
2355
2356static void ipw_scan_check(void *data)
2357{
2358	struct ipw_priv *priv = data;
2359
2360	if (priv->status & STATUS_SCAN_ABORTING) {
2361		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2362			       "adapter after (%dms).\n",
2363			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2364		queue_work(priv->workqueue, &priv->adapter_restart);
2365	} else if (priv->status & STATUS_SCANNING) {
2366		IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2367			       "after (%dms).\n",
2368			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2369		ipw_abort_scan(priv);
2370		queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
2371	}
2372}
2373
2374static void ipw_bg_scan_check(struct work_struct *work)
2375{
2376	struct ipw_priv *priv =
2377		container_of(work, struct ipw_priv, scan_check.work);
2378	mutex_lock(&priv->mutex);
2379	ipw_scan_check(priv);
2380	mutex_unlock(&priv->mutex);
2381}
2382
2383static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2384				     struct ipw_scan_request_ext *request)
2385{
2386	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2387				sizeof(*request), request);
2388}
2389
2390static int ipw_send_scan_abort(struct ipw_priv *priv)
2391{
2392	if (!priv) {
2393		IPW_ERROR("Invalid args\n");
2394		return -1;
2395	}
2396
2397	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2398}
2399
2400static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2401{
2402	struct ipw_sensitivity_calib calib = {
2403		.beacon_rssi_raw = cpu_to_le16(sens),
2404	};
2405
2406	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2407				&calib);
2408}
2409
2410static int ipw_send_associate(struct ipw_priv *priv,
2411			      struct ipw_associate *associate)
2412{
2413	if (!priv || !associate) {
2414		IPW_ERROR("Invalid args\n");
2415		return -1;
2416	}
2417
2418	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2419				associate);
2420}
2421
2422static int ipw_send_supported_rates(struct ipw_priv *priv,
2423				    struct ipw_supported_rates *rates)
2424{
2425	if (!priv || !rates) {
2426		IPW_ERROR("Invalid args\n");
2427		return -1;
2428	}
2429
2430	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2431				rates);
2432}
2433
2434static int ipw_set_random_seed(struct ipw_priv *priv)
2435{
2436	u32 val;
2437
2438	if (!priv) {
2439		IPW_ERROR("Invalid args\n");
2440		return -1;
2441	}
2442
2443	get_random_bytes(&val, sizeof(val));
2444
2445	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2446}
2447
2448static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2449{
2450	__le32 v = cpu_to_le32(phy_off);
2451	if (!priv) {
2452		IPW_ERROR("Invalid args\n");
2453		return -1;
2454	}
2455
2456	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2457}
2458
2459static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2460{
2461	if (!priv || !power) {
2462		IPW_ERROR("Invalid args\n");
2463		return -1;
2464	}
2465
2466	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2467}
2468
2469static int ipw_set_tx_power(struct ipw_priv *priv)
2470{
2471	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2472	struct ipw_tx_power tx_power;
2473	s8 max_power;
2474	int i;
2475
2476	memset(&tx_power, 0, sizeof(tx_power));
2477
2478	/* configure device for 'G' band */
2479	tx_power.ieee_mode = IPW_G_MODE;
2480	tx_power.num_channels = geo->bg_channels;
2481	for (i = 0; i < geo->bg_channels; i++) {
2482		max_power = geo->bg[i].max_power;
2483		tx_power.channels_tx_power[i].channel_number =
2484		    geo->bg[i].channel;
2485		tx_power.channels_tx_power[i].tx_power = max_power ?
2486		    min(max_power, priv->tx_power) : priv->tx_power;
2487	}
2488	if (ipw_send_tx_power(priv, &tx_power))
2489		return -EIO;
2490
2491	/* configure device to also handle 'B' band */
2492	tx_power.ieee_mode = IPW_B_MODE;
2493	if (ipw_send_tx_power(priv, &tx_power))
2494		return -EIO;
2495
2496	/* configure device to also handle 'A' band */
2497	if (priv->ieee->abg_true) {
2498		tx_power.ieee_mode = IPW_A_MODE;
2499		tx_power.num_channels = geo->a_channels;
2500		for (i = 0; i < tx_power.num_channels; i++) {
2501			max_power = geo->a[i].max_power;
2502			tx_power.channels_tx_power[i].channel_number =
2503			    geo->a[i].channel;
2504			tx_power.channels_tx_power[i].tx_power = max_power ?
2505			    min(max_power, priv->tx_power) : priv->tx_power;
2506		}
2507		if (ipw_send_tx_power(priv, &tx_power))
2508			return -EIO;
2509	}
2510	return 0;
2511}
2512
2513static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2514{
2515	struct ipw_rts_threshold rts_threshold = {
2516		.rts_threshold = cpu_to_le16(rts),
2517	};
2518
2519	if (!priv) {
2520		IPW_ERROR("Invalid args\n");
2521		return -1;
2522	}
2523
2524	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2525				sizeof(rts_threshold), &rts_threshold);
2526}
2527
2528static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2529{
2530	struct ipw_frag_threshold frag_threshold = {
2531		.frag_threshold = cpu_to_le16(frag),
2532	};
2533
2534	if (!priv) {
2535		IPW_ERROR("Invalid args\n");
2536		return -1;
2537	}
2538
2539	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2540				sizeof(frag_threshold), &frag_threshold);
2541}
2542
2543static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2544{
2545	__le32 param;
2546
2547	if (!priv) {
2548		IPW_ERROR("Invalid args\n");
2549		return -1;
2550	}
2551
2552	/* If on battery, set to 3, if AC set to CAM, else user
2553	 * level */
2554	switch (mode) {
2555	case IPW_POWER_BATTERY:
2556		param = cpu_to_le32(IPW_POWER_INDEX_3);
2557		break;
2558	case IPW_POWER_AC:
2559		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2560		break;
2561	default:
2562		param = cpu_to_le32(mode);
2563		break;
2564	}
2565
2566	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2567				&param);
2568}
2569
2570static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2571{
2572	struct ipw_retry_limit retry_limit = {
2573		.short_retry_limit = slimit,
2574		.long_retry_limit = llimit
2575	};
2576
2577	if (!priv) {
2578		IPW_ERROR("Invalid args\n");
2579		return -1;
2580	}
2581
2582	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2583				&retry_limit);
2584}
2585
2586/*
2587 * The IPW device contains a Microwire compatible EEPROM that stores
2588 * various data like the MAC address.  Usually the firmware has exclusive
2589 * access to the eeprom, but during device initialization (before the
2590 * device driver has sent the HostComplete command to the firmware) the
2591 * device driver has read access to the EEPROM by way of indirect addressing
2592 * through a couple of memory mapped registers.
2593 *
2594 * The following is a simplified implementation for pulling data out of the
2595 * the eeprom, along with some helper functions to find information in
2596 * the per device private data's copy of the eeprom.
2597 *
2598 * NOTE: To better understand how these functions work (i.e what is a chip
2599 *       select and why do have to keep driving the eeprom clock?), read
2600 *       just about any data sheet for a Microwire compatible EEPROM.
2601 */
2602
2603/* write a 32 bit value into the indirect accessor register */
2604static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2605{
2606	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2607
2608	/* the eeprom requires some time to complete the operation */
2609	udelay(p->eeprom_delay);
2610}
2611
2612/* perform a chip select operation */
2613static void eeprom_cs(struct ipw_priv *priv)
2614{
2615	eeprom_write_reg(priv, 0);
2616	eeprom_write_reg(priv, EEPROM_BIT_CS);
2617	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2618	eeprom_write_reg(priv, EEPROM_BIT_CS);
2619}
2620
2621/* perform a chip select operation */
2622static void eeprom_disable_cs(struct ipw_priv *priv)
2623{
2624	eeprom_write_reg(priv, EEPROM_BIT_CS);
2625	eeprom_write_reg(priv, 0);
2626	eeprom_write_reg(priv, EEPROM_BIT_SK);
2627}
2628
2629/* push a single bit down to the eeprom */
2630static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2631{
2632	int d = (bit ? EEPROM_BIT_DI : 0);
2633	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2634	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2635}
2636
2637/* push an opcode followed by an address down to the eeprom */
2638static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2639{
2640	int i;
2641
2642	eeprom_cs(priv);
2643	eeprom_write_bit(priv, 1);
2644	eeprom_write_bit(priv, op & 2);
2645	eeprom_write_bit(priv, op & 1);
2646	for (i = 7; i >= 0; i--) {
2647		eeprom_write_bit(priv, addr & (1 << i));
2648	}
2649}
2650
2651/* pull 16 bits off the eeprom, one bit at a time */
2652static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2653{
2654	int i;
2655	u16 r = 0;
2656
2657	/* Send READ Opcode */
2658	eeprom_op(priv, EEPROM_CMD_READ, addr);
2659
2660	/* Send dummy bit */
2661	eeprom_write_reg(priv, EEPROM_BIT_CS);
2662
2663	/* Read the byte off the eeprom one bit at a time */
2664	for (i = 0; i < 16; i++) {
2665		u32 data = 0;
2666		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2667		eeprom_write_reg(priv, EEPROM_BIT_CS);
2668		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2669		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2670	}
2671
2672	/* Send another dummy bit */
2673	eeprom_write_reg(priv, 0);
2674	eeprom_disable_cs(priv);
2675
2676	return r;
2677}
2678
2679/* helper function for pulling the mac address out of the private */
2680/* data's copy of the eeprom data                                 */
2681static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2682{
2683	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2684}
2685
2686/*
2687 * Either the device driver (i.e. the host) or the firmware can
2688 * load eeprom data into the designated region in SRAM.  If neither
2689 * happens then the FW will shutdown with a fatal error.
2690 *
2691 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2692 * bit needs region of shared SRAM needs to be non-zero.
2693 */
2694static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2695{
2696	int i;
2697	__le16 *eeprom = (__le16 *) priv->eeprom;
2698
2699	IPW_DEBUG_TRACE(">>\n");
2700
2701	/* read entire contents of eeprom into private buffer */
2702	for (i = 0; i < 128; i++)
2703		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2704
2705	/*
2706	   If the data looks correct, then copy it to our private
2707	   copy.  Otherwise let the firmware know to perform the operation
2708	   on its own.
2709	 */
2710	if (priv->eeprom[EEPROM_VERSION] != 0) {
2711		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2712
2713		/* write the eeprom data to sram */
2714		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2715			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2716
2717		/* Do not load eeprom data on fatal error or suspend */
2718		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2719	} else {
2720		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2721
2722		/* Load eeprom data on fatal error or suspend */
2723		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2724	}
2725
2726	IPW_DEBUG_TRACE("<<\n");
2727}
2728
2729static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2730{
2731	count >>= 2;
2732	if (!count)
2733		return;
2734	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2735	while (count--)
2736		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2737}
2738
2739static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2740{
2741	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2742			CB_NUMBER_OF_ELEMENTS_SMALL *
2743			sizeof(struct command_block));
2744}
2745
2746static int ipw_fw_dma_enable(struct ipw_priv *priv)
2747{				/* start dma engine but no transfers yet */
2748
2749	IPW_DEBUG_FW(">> :\n");
2750
2751	/* Start the dma */
2752	ipw_fw_dma_reset_command_blocks(priv);
2753
2754	/* Write CB base address */
2755	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2756
2757	IPW_DEBUG_FW("<< :\n");
2758	return 0;
2759}
2760
2761static void ipw_fw_dma_abort(struct ipw_priv *priv)
2762{
2763	u32 control = 0;
2764
2765	IPW_DEBUG_FW(">> :\n");
2766
2767	/* set the Stop and Abort bit */
2768	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2769	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2770	priv->sram_desc.last_cb_index = 0;
2771
2772	IPW_DEBUG_FW("<<\n");
2773}
2774
2775static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2776					  struct command_block *cb)
2777{
2778	u32 address =
2779	    IPW_SHARED_SRAM_DMA_CONTROL +
2780	    (sizeof(struct command_block) * index);
2781	IPW_DEBUG_FW(">> :\n");
2782
2783	ipw_write_indirect(priv, address, (u8 *) cb,
2784			   (int)sizeof(struct command_block));
2785
2786	IPW_DEBUG_FW("<< :\n");
2787	return 0;
2788
2789}
2790
2791static int ipw_fw_dma_kick(struct ipw_priv *priv)
2792{
2793	u32 control = 0;
2794	u32 index = 0;
2795
2796	IPW_DEBUG_FW(">> :\n");
2797
2798	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2799		ipw_fw_dma_write_command_block(priv, index,
2800					       &priv->sram_desc.cb_list[index]);
2801
2802	/* Enable the DMA in the CSR register */
2803	ipw_clear_bit(priv, IPW_RESET_REG,
2804		      IPW_RESET_REG_MASTER_DISABLED |
2805		      IPW_RESET_REG_STOP_MASTER);
2806
2807	/* Set the Start bit. */
2808	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2809	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2810
2811	IPW_DEBUG_FW("<< :\n");
2812	return 0;
2813}
2814
2815static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2816{
2817	u32 address;
2818	u32 register_value = 0;
2819	u32 cb_fields_address = 0;
2820
2821	IPW_DEBUG_FW(">> :\n");
2822	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2823	IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2824
2825	/* Read the DMA Controlor register */
2826	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2827	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2828
2829	/* Print the CB values */
2830	cb_fields_address = address;
2831	register_value = ipw_read_reg32(priv, cb_fields_address);
2832	IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2833
2834	cb_fields_address += sizeof(u32);
2835	register_value = ipw_read_reg32(priv, cb_fields_address);
2836	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2837
2838	cb_fields_address += sizeof(u32);
2839	register_value = ipw_read_reg32(priv, cb_fields_address);
2840	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2841			  register_value);
2842
2843	cb_fields_address += sizeof(u32);
2844	register_value = ipw_read_reg32(priv, cb_fields_address);
2845	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2846
2847	IPW_DEBUG_FW(">> :\n");
2848}
2849
2850static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2851{
2852	u32 current_cb_address = 0;
2853	u32 current_cb_index = 0;
2854
2855	IPW_DEBUG_FW("<< :\n");
2856	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2857
2858	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2859	    sizeof(struct command_block);
2860
2861	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2862			  current_cb_index, current_cb_address);
2863
2864	IPW_DEBUG_FW(">> :\n");
2865	return current_cb_index;
2866
2867}
2868
2869static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2870					u32 src_address,
2871					u32 dest_address,
2872					u32 length,
2873					int interrupt_enabled, int is_last)
2874{
2875
2876	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2877	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2878	    CB_DEST_SIZE_LONG;
2879	struct command_block *cb;
2880	u32 last_cb_element = 0;
2881
2882	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2883			  src_address, dest_address, length);
2884
2885	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2886		return -1;
2887
2888	last_cb_element = priv->sram_desc.last_cb_index;
2889	cb = &priv->sram_desc.cb_list[last_cb_element];
2890	priv->sram_desc.last_cb_index++;
2891
2892	/* Calculate the new CB control word */
2893	if (interrupt_enabled)
2894		control |= CB_INT_ENABLED;
2895
2896	if (is_last)
2897		control |= CB_LAST_VALID;
2898
2899	control |= length;
2900
2901	/* Calculate the CB Element's checksum value */
2902	cb->status = control ^ src_address ^ dest_address;
2903
2904	/* Copy the Source and Destination addresses */
2905	cb->dest_addr = dest_address;
2906	cb->source_addr = src_address;
2907
2908	/* Copy the Control Word last */
2909	cb->control = control;
2910
2911	return 0;
2912}
2913
2914static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2915				 int nr, u32 dest_address, u32 len)
2916{
2917	int ret, i;
2918	u32 size;
2919
2920	IPW_DEBUG_FW(">>\n");
2921	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2922			  nr, dest_address, len);
2923
2924	for (i = 0; i < nr; i++) {
2925		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2926		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2927						   dest_address +
2928						   i * CB_MAX_LENGTH, size,
2929						   0, 0);
2930		if (ret) {
2931			IPW_DEBUG_FW_INFO(": Failed\n");
2932			return -1;
2933		} else
2934			IPW_DEBUG_FW_INFO(": Added new cb\n");
2935	}
2936
2937	IPW_DEBUG_FW("<<\n");
2938	return 0;
2939}
2940
2941static int ipw_fw_dma_wait(struct ipw_priv *priv)
2942{
2943	u32 current_index = 0, previous_index;
2944	u32 watchdog = 0;
2945
2946	IPW_DEBUG_FW(">> :\n");
2947
2948	current_index = ipw_fw_dma_command_block_index(priv);
2949	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2950			  (int)priv->sram_desc.last_cb_index);
2951
2952	while (current_index < priv->sram_desc.last_cb_index) {
2953		udelay(50);
2954		previous_index = current_index;
2955		current_index = ipw_fw_dma_command_block_index(priv);
2956
2957		if (previous_index < current_index) {
2958			watchdog = 0;
2959			continue;
2960		}
2961		if (++watchdog > 400) {
2962			IPW_DEBUG_FW_INFO("Timeout\n");
2963			ipw_fw_dma_dump_command_block(priv);
2964			ipw_fw_dma_abort(priv);
2965			return -1;
2966		}
2967	}
2968
2969	ipw_fw_dma_abort(priv);
2970
2971	/*Disable the DMA in the CSR register */
2972	ipw_set_bit(priv, IPW_RESET_REG,
2973		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2974
2975	IPW_DEBUG_FW("<< dmaWaitSync\n");
2976	return 0;
2977}
2978
2979static void ipw_remove_current_network(struct ipw_priv *priv)
2980{
2981	struct list_head *element, *safe;
2982	struct libipw_network *network = NULL;
2983	unsigned long flags;
2984
2985	spin_lock_irqsave(&priv->ieee->lock, flags);
2986	list_for_each_safe(element, safe, &priv->ieee->network_list) {
2987		network = list_entry(element, struct libipw_network, list);
2988		if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2989			list_del(element);
2990			list_add_tail(&network->list,
2991				      &priv->ieee->network_free_list);
2992		}
2993	}
2994	spin_unlock_irqrestore(&priv->ieee->lock, flags);
2995}
2996
2997/**
2998 * Check that card is still alive.
2999 * Reads debug register from domain0.
3000 * If card is present, pre-defined value should
3001 * be found there.
3002 *
3003 * @param priv
3004 * @return 1 if card is present, 0 otherwise
3005 */
3006static inline int ipw_alive(struct ipw_priv *priv)
3007{
3008	return ipw_read32(priv, 0x90) == 0xd55555d5;
3009}
3010
3011/* timeout in msec, attempted in 10-msec quanta */
3012static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3013			       int timeout)
3014{
3015	int i = 0;
3016
3017	do {
3018		if ((ipw_read32(priv, addr) & mask) == mask)
3019			return i;
3020		mdelay(10);
3021		i += 10;
3022	} while (i < timeout);
3023
3024	return -ETIME;
3025}
3026
3027/* These functions load the firmware and micro code for the operation of
3028 * the ipw hardware.  It assumes the buffer has all the bits for the
3029 * image and the caller is handling the memory allocation and clean up.
3030 */
3031
3032static int ipw_stop_master(struct ipw_priv *priv)
3033{
3034	int rc;
3035
3036	IPW_DEBUG_TRACE(">>\n");
3037	/* stop master. typical delay - 0 */
3038	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3039
3040	/* timeout is in msec, polled in 10-msec quanta */
3041	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3042			  IPW_RESET_REG_MASTER_DISABLED, 100);
3043	if (rc < 0) {
3044		IPW_ERROR("wait for stop master failed after 100ms\n");
3045		return -1;
3046	}
3047
3048	IPW_DEBUG_INFO("stop master %dms\n", rc);
3049
3050	return rc;
3051}
3052
3053static void ipw_arc_release(struct ipw_priv *priv)
3054{
3055	IPW_DEBUG_TRACE(">>\n");
3056	mdelay(5);
3057
3058	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3059
3060	/* no one knows timing, for safety add some delay */
3061	mdelay(5);
3062}
3063
3064struct fw_chunk {
3065	__le32 address;
3066	__le32 length;
3067};
3068
3069static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3070{
3071	int rc = 0, i, addr;
3072	u8 cr = 0;
3073	__le16 *image;
3074
3075	image = (__le16 *) data;
3076
3077	IPW_DEBUG_TRACE(">>\n");
3078
3079	rc = ipw_stop_master(priv);
3080
3081	if (rc < 0)
3082		return rc;
3083
3084	for (addr = IPW_SHARED_LOWER_BOUND;
3085	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3086		ipw_write32(priv, addr, 0);
3087	}
3088
3089	/* no ucode (yet) */
3090	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3091	/* destroy DMA queues */
3092	/* reset sequence */
3093
3094	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3095	ipw_arc_release(priv);
3096	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3097	mdelay(1);
3098
3099	/* reset PHY */
3100	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3101	mdelay(1);
3102
3103	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3104	mdelay(1);
3105
3106	/* enable ucode store */
3107	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3108	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3109	mdelay(1);
3110
3111	/* write ucode */
3112	/**
3113	 * @bug
3114	 * Do NOT set indirect address register once and then
3115	 * store data to indirect data register in the loop.
3116	 * It seems very reasonable, but in this case DINO do not
3117	 * accept ucode. It is essential to set address each time.
3118	 */
3119	/* load new ipw uCode */
3120	for (i = 0; i < len / 2; i++)
3121		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3122				le16_to_cpu(image[i]));
3123
3124	/* enable DINO */
3125	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3126	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3127
3128	/* this is where the igx / win driver deveates from the VAP driver. */
3129
3130	/* wait for alive response */
3131	for (i = 0; i < 100; i++) {
3132		/* poll for incoming data */
3133		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3134		if (cr & DINO_RXFIFO_DATA)
3135			break;
3136		mdelay(1);
3137	}
3138
3139	if (cr & DINO_RXFIFO_DATA) {
3140		/* alive_command_responce size is NOT multiple of 4 */
3141		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3142
3143		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3144			response_buffer[i] =
3145			    cpu_to_le32(ipw_read_reg32(priv,
3146						       IPW_BASEBAND_RX_FIFO_READ));
3147		memcpy(&priv->dino_alive, response_buffer,
3148		       sizeof(priv->dino_alive));
3149		if (priv->dino_alive.alive_command == 1
3150		    && priv->dino_alive.ucode_valid == 1) {
3151			rc = 0;
3152			IPW_DEBUG_INFO
3153			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3154			     "of %02d/%02d/%02d %02d:%02d\n",
3155			     priv->dino_alive.software_revision,
3156			     priv->dino_alive.software_revision,
3157			     priv->dino_alive.device_identifier,
3158			     priv->dino_alive.device_identifier,
3159			     priv->dino_alive.time_stamp[0],
3160			     priv->dino_alive.time_stamp[1],
3161			     priv->dino_alive.time_stamp[2],
3162			     priv->dino_alive.time_stamp[3],
3163			     priv->dino_alive.time_stamp[4]);
3164		} else {
3165			IPW_DEBUG_INFO("Microcode is not alive\n");
3166			rc = -EINVAL;
3167		}
3168	} else {
3169		IPW_DEBUG_INFO("No alive response from DINO\n");
3170		rc = -ETIME;
3171	}
3172
3173	/* disable DINO, otherwise for some reason
3174	   firmware have problem getting alive resp. */
3175	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3176
3177	return rc;
3178}
3179
3180static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3181{
3182	int ret = -1;
3183	int offset = 0;
3184	struct fw_chunk *chunk;
3185	int total_nr = 0;
3186	int i;
3187	struct pci_pool *pool;
3188	void **virts;
3189	dma_addr_t *phys;
3190
3191	IPW_DEBUG_TRACE("<< :\n");
3192
3193	virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3194			GFP_KERNEL);
3195	if (!virts)
3196		return -ENOMEM;
3197
3198	phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3199			GFP_KERNEL);
3200	if (!phys) {
3201		kfree(virts);
3202		return -ENOMEM;
3203	}
3204	pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3205	if (!pool) {
3206		IPW_ERROR("pci_pool_create failed\n");
3207		kfree(phys);
3208		kfree(virts);
3209		return -ENOMEM;
3210	}
3211
3212	/* Start the Dma */
3213	ret = ipw_fw_dma_enable(priv);
3214
3215	/* the DMA is already ready this would be a bug. */
3216	BUG_ON(priv->sram_desc.last_cb_index > 0);
3217
3218	do {
3219		u32 chunk_len;
3220		u8 *start;
3221		int size;
3222		int nr = 0;
3223
3224		chunk = (struct fw_chunk *)(data + offset);
3225		offset += sizeof(struct fw_chunk);
3226		chunk_len = le32_to_cpu(chunk->length);
3227		start = data + offset;
3228
3229		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3230		for (i = 0; i < nr; i++) {
3231			virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3232							 &phys[total_nr]);
3233			if (!virts[total_nr]) {
3234				ret = -ENOMEM;
3235				goto out;
3236			}
3237			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3238				     CB_MAX_LENGTH);
3239			memcpy(virts[total_nr], start, size);
3240			start += size;
3241			total_nr++;
3242			/* We don't support fw chunk larger than 64*8K */
3243			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3244		}
3245
3246		/* build DMA packet and queue up for sending */
3247		/* dma to chunk->address, the chunk->length bytes from data +
3248		 * offeset*/
3249		/* Dma loading */
3250		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3251					    nr, le32_to_cpu(chunk->address),
3252					    chunk_len);
3253		if (ret) {
3254			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3255			goto out;
3256		}
3257
3258		offset += chunk_len;
3259	} while (offset < len);
3260
3261	/* Run the DMA and wait for the answer */
3262	ret = ipw_fw_dma_kick(priv);
3263	if (ret) {
3264		IPW_ERROR("dmaKick Failed\n");
3265		goto out;
3266	}
3267
3268	ret = ipw_fw_dma_wait(priv);
3269	if (ret) {
3270		IPW_ERROR("dmaWaitSync Failed\n");
3271		goto out;
3272	}
3273 out:
3274	for (i = 0; i < total_nr; i++)
3275		pci_pool_free(pool, virts[i], phys[i]);
3276
3277	pci_pool_destroy(pool);
3278	kfree(phys);
3279	kfree(virts);
3280
3281	return ret;
3282}
3283
3284/* stop nic */
3285static int ipw_stop_nic(struct ipw_priv *priv)
3286{
3287	int rc = 0;
3288
3289	/* stop */
3290	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3291
3292	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3293			  IPW_RESET_REG_MASTER_DISABLED, 500);
3294	if (rc < 0) {
3295		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3296		return rc;
3297	}
3298
3299	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3300
3301	return rc;
3302}
3303
3304static void ipw_start_nic(struct ipw_priv *priv)
3305{
3306	IPW_DEBUG_TRACE(">>\n");
3307
3308	/* prvHwStartNic  release ARC */
3309	ipw_clear_bit(priv, IPW_RESET_REG,
3310		      IPW_RESET_REG_MASTER_DISABLED |
3311		      IPW_RESET_REG_STOP_MASTER |
3312		      CBD_RESET_REG_PRINCETON_RESET);
3313
3314	/* enable power management */
3315	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3316		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3317
3318	IPW_DEBUG_TRACE("<<\n");
3319}
3320
3321static int ipw_init_nic(struct ipw_priv *priv)
3322{
3323	int rc;
3324
3325	IPW_DEBUG_TRACE(">>\n");
3326	/* reset */
3327	/*prvHwInitNic */
3328	/* set "initialization complete" bit to move adapter to D0 state */
3329	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3330
3331	/* low-level PLL activation */
3332	ipw_write32(priv, IPW_READ_INT_REGISTER,
3333		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3334
3335	/* wait for clock stabilization */
3336	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3337			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3338	if (rc < 0)
3339		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3340
3341	/* assert SW reset */
3342	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3343
3344	udelay(10);
3345
3346	/* set "initialization complete" bit to move adapter to D0 state */
3347	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3348
3349	IPW_DEBUG_TRACE(">>\n");
3350	return 0;
3351}
3352
3353/* Call this function from process context, it will sleep in request_firmware.
3354 * Probe is an ok place to call this from.
3355 */
3356static int ipw_reset_nic(struct ipw_priv *priv)
3357{
3358	int rc = 0;
3359	unsigned long flags;
3360
3361	IPW_DEBUG_TRACE(">>\n");
3362
3363	rc = ipw_init_nic(priv);
3364
3365	spin_lock_irqsave(&priv->lock, flags);
3366	/* Clear the 'host command active' bit... */
3367	priv->status &= ~STATUS_HCMD_ACTIVE;
3368	wake_up_interruptible(&priv->wait_command_queue);
3369	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3370	wake_up_interruptible(&priv->wait_state);
3371	spin_unlock_irqrestore(&priv->lock, flags);
3372
3373	IPW_DEBUG_TRACE("<<\n");
3374	return rc;
3375}
3376
3377
3378struct ipw_fw {
3379	__le32 ver;
3380	__le32 boot_size;
3381	__le32 ucode_size;
3382	__le32 fw_size;
3383	u8 data[0];
3384};
3385
3386static int ipw_get_fw(struct ipw_priv *priv,
3387		      const struct firmware **raw, const char *name)
3388{
3389	struct ipw_fw *fw;
3390	int rc;
3391
3392	/* ask firmware_class module to get the boot firmware off disk */
3393	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3394	if (rc < 0) {
3395		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3396		return rc;
3397	}
3398
3399	if ((*raw)->size < sizeof(*fw)) {
3400		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3401		return -EINVAL;
3402	}
3403
3404	fw = (void *)(*raw)->data;
3405
3406	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3407	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3408		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3409			  name, (*raw)->size);
3410		return -EINVAL;
3411	}
3412
3413	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3414		       name,
3415		       le32_to_cpu(fw->ver) >> 16,
3416		       le32_to_cpu(fw->ver) & 0xff,
3417		       (*raw)->size - sizeof(*fw));
3418	return 0;
3419}
3420
3421#define IPW_RX_BUF_SIZE (3000)
3422
3423static void ipw_rx_queue_reset(struct ipw_priv *priv,
3424				      struct ipw_rx_queue *rxq)
3425{
3426	unsigned long flags;
3427	int i;
3428
3429	spin_lock_irqsave(&rxq->lock, flags);
3430
3431	INIT_LIST_HEAD(&rxq->rx_free);
3432	INIT_LIST_HEAD(&rxq->rx_used);
3433
3434	/* Fill the rx_used queue with _all_ of the Rx buffers */
3435	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3436		/* In the reset function, these buffers may have been allocated
3437		 * to an SKB, so we need to unmap and free potential storage */
3438		if (rxq->pool[i].skb != NULL) {
3439			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3440					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3441			dev_kfree_skb(rxq->pool[i].skb);
3442			rxq->pool[i].skb = NULL;
3443		}
3444		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3445	}
3446
3447	/* Set us so that we have processed and used all buffers, but have
3448	 * not restocked the Rx queue with fresh buffers */
3449	rxq->read = rxq->write = 0;
3450	rxq->free_count = 0;
3451	spin_unlock_irqrestore(&rxq->lock, flags);
3452}
3453
3454#ifdef CONFIG_PM
3455static int fw_loaded = 0;
3456static const struct firmware *raw = NULL;
3457
3458static void free_firmware(void)
3459{
3460	if (fw_loaded) {
3461		release_firmware(raw);
3462		raw = NULL;
3463		fw_loaded = 0;
3464	}
3465}
3466#else
3467#define free_firmware() do {} while (0)
3468#endif
3469
3470static int ipw_load(struct ipw_priv *priv)
3471{
3472#ifndef CONFIG_PM
3473	const struct firmware *raw = NULL;
3474#endif
3475	struct ipw_fw *fw;
3476	u8 *boot_img, *ucode_img, *fw_img;
3477	u8 *name = NULL;
3478	int rc = 0, retries = 3;
3479
3480	switch (priv->ieee->iw_mode) {
3481	case IW_MODE_ADHOC:
3482		name = "ipw2200-ibss.fw";
3483		break;
3484#ifdef CONFIG_IPW2200_MONITOR
3485	case IW_MODE_MONITOR:
3486		name = "ipw2200-sniffer.fw";
3487		break;
3488#endif
3489	case IW_MODE_INFRA:
3490		name = "ipw2200-bss.fw";
3491		break;
3492	}
3493
3494	if (!name) {
3495		rc = -EINVAL;
3496		goto error;
3497	}
3498
3499#ifdef CONFIG_PM
3500	if (!fw_loaded) {
3501#endif
3502		rc = ipw_get_fw(priv, &raw, name);
3503		if (rc < 0)
3504			goto error;
3505#ifdef CONFIG_PM
3506	}
3507#endif
3508
3509	fw = (void *)raw->data;
3510	boot_img = &fw->data[0];
3511	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3512	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3513			   le32_to_cpu(fw->ucode_size)];
3514
3515	if (rc < 0)
3516		goto error;
3517
3518	if (!priv->rxq)
3519		priv->rxq = ipw_rx_queue_alloc(priv);
3520	else
3521		ipw_rx_queue_reset(priv, priv->rxq);
3522	if (!priv->rxq) {
3523		IPW_ERROR("Unable to initialize Rx queue\n");
3524		goto error;
3525	}
3526
3527      retry:
3528	/* Ensure interrupts are disabled */
3529	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3530	priv->status &= ~STATUS_INT_ENABLED;
3531
3532	/* ack pending interrupts */
3533	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3534
3535	ipw_stop_nic(priv);
3536
3537	rc = ipw_reset_nic(priv);
3538	if (rc < 0) {
3539		IPW_ERROR("Unable to reset NIC\n");
3540		goto error;
3541	}
3542
3543	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3544			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3545
3546	/* DMA the initial boot firmware into the device */
3547	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3548	if (rc < 0) {
3549		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3550		goto error;
3551	}
3552
3553	/* kick start the device */
3554	ipw_start_nic(priv);
3555
3556	/* wait for the device to finish its initial startup sequence */
3557	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3558			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3559	if (rc < 0) {
3560		IPW_ERROR("device failed to boot initial fw image\n");
3561		goto error;
3562	}
3563	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3564
3565	/* ack fw init done interrupt */
3566	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3567
3568	/* DMA the ucode into the device */
3569	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3570	if (rc < 0) {
3571		IPW_ERROR("Unable to load ucode: %d\n", rc);
3572		goto error;
3573	}
3574
3575	/* stop nic */
3576	ipw_stop_nic(priv);
3577
3578	/* DMA bss firmware into the device */
3579	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3580	if (rc < 0) {
3581		IPW_ERROR("Unable to load firmware: %d\n", rc);
3582		goto error;
3583	}
3584#ifdef CONFIG_PM
3585	fw_loaded = 1;
3586#endif
3587
3588	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3589
3590	rc = ipw_queue_reset(priv);
3591	if (rc < 0) {
3592		IPW_ERROR("Unable to initialize queues\n");
3593		goto error;
3594	}
3595
3596	/* Ensure interrupts are disabled */
3597	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3598	/* ack pending interrupts */
3599	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3600
3601	/* kick start the device */
3602	ipw_start_nic(priv);
3603
3604	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3605		if (retries > 0) {
3606			IPW_WARNING("Parity error.  Retrying init.\n");
3607			retries--;
3608			goto retry;
3609		}
3610
3611		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3612		rc = -EIO;
3613		goto error;
3614	}
3615
3616	/* wait for the device */
3617	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3618			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3619	if (rc < 0) {
3620		IPW_ERROR("device failed to start within 500ms\n");
3621		goto error;
3622	}
3623	IPW_DEBUG_INFO("device response after %dms\n", rc);
3624
3625	/* ack fw init done interrupt */
3626	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3627
3628	/* read eeprom data and initialize the eeprom region of sram */
3629	priv->eeprom_delay = 1;
3630	ipw_eeprom_init_sram(priv);
3631
3632	/* enable interrupts */
3633	ipw_enable_interrupts(priv);
3634
3635	/* Ensure our queue has valid packets */
3636	ipw_rx_queue_replenish(priv);
3637
3638	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3639
3640	/* ack pending interrupts */
3641	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3642
3643#ifndef CONFIG_PM
3644	release_firmware(raw);
3645#endif
3646	return 0;
3647
3648      error:
3649	if (priv->rxq) {
3650		ipw_rx_queue_free(priv, priv->rxq);
3651		priv->rxq = NULL;
3652	}
3653	ipw_tx_queue_free(priv);
3654	if (raw)
3655		release_firmware(raw);
3656#ifdef CONFIG_PM
3657	fw_loaded = 0;
3658	raw = NULL;
3659#endif
3660
3661	return rc;
3662}
3663
3664/**
3665 * DMA services
3666 *
3667 * Theory of operation
3668 *
3669 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3670 * 2 empty entries always kept in the buffer to protect from overflow.
3671 *
3672 * For Tx queue, there are low mark and high mark limits. If, after queuing
3673 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3674 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3675 * Tx queue resumed.
3676 *
3677 * The IPW operates with six queues, one receive queue in the device's
3678 * sram, one transmit queue for sending commands to the device firmware,
3679 * and four transmit queues for data.
3680 *
3681 * The four transmit queues allow for performing quality of service (qos)
3682 * transmissions as per the 802.11 protocol.  Currently Linux does not
3683 * provide a mechanism to the user for utilizing prioritized queues, so
3684 * we only utilize the first data transmit queue (queue1).
3685 */
3686
3687/**
3688 * Driver allocates buffers of this size for Rx
3689 */
3690
3691/**
3692 * ipw_rx_queue_space - Return number of free slots available in queue.
3693 */
3694static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3695{
3696	int s = q->read - q->write;
3697	if (s <= 0)
3698		s += RX_QUEUE_SIZE;
3699	/* keep some buffer to not confuse full and empty queue */
3700	s -= 2;
3701	if (s < 0)
3702		s = 0;
3703	return s;
3704}
3705
3706static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3707{
3708	int s = q->last_used - q->first_empty;
3709	if (s <= 0)
3710		s += q->n_bd;
3711	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3712	if (s < 0)
3713		s = 0;
3714	return s;
3715}
3716
3717static inline int ipw_queue_inc_wrap(int index, int n_bd)
3718{
3719	return (++index == n_bd) ? 0 : index;
3720}
3721
3722/**
3723 * Initialize common DMA queue structure
3724 *
3725 * @param q                queue to init
3726 * @param count            Number of BD's to allocate. Should be power of 2
3727 * @param read_register    Address for 'read' register
3728 *                         (not offset within BAR, full address)
3729 * @param write_register   Address for 'write' register
3730 *                         (not offset within BAR, full address)
3731 * @param base_register    Address for 'base' register
3732 *                         (not offset within BAR, full address)
3733 * @param size             Address for 'size' register
3734 *                         (not offset within BAR, full address)
3735 */
3736static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3737			   int count, u32 read, u32 write, u32 base, u32 size)
3738{
3739	q->n_bd = count;
3740
3741	q->low_mark = q->n_bd / 4;
3742	if (q->low_mark < 4)
3743		q->low_mark = 4;
3744
3745	q->high_mark = q->n_bd / 8;
3746	if (q->high_mark < 2)
3747		q->high_mark = 2;
3748
3749	q->first_empty = q->last_used = 0;
3750	q->reg_r = read;
3751	q->reg_w = write;
3752
3753	ipw_write32(priv, base, q->dma_addr);
3754	ipw_write32(priv, size, count);
3755	ipw_write32(priv, read, 0);
3756	ipw_write32(priv, write, 0);
3757
3758	_ipw_read32(priv, 0x90);
3759}
3760
3761static int ipw_queue_tx_init(struct ipw_priv *priv,
3762			     struct clx2_tx_queue *q,
3763			     int count, u32 read, u32 write, u32 base, u32 size)
3764{
3765	struct pci_dev *dev = priv->pci_dev;
3766
3767	q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3768	if (!q->txb) {
3769		IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3770		return -ENOMEM;
3771	}
3772
3773	q->bd =
3774	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3775	if (!q->bd) {
3776		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3777			  sizeof(q->bd[0]) * count);
3778		kfree(q->txb);
3779		q->txb = NULL;
3780		return -ENOMEM;
3781	}
3782
3783	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3784	return 0;
3785}
3786
3787/**
3788 * Free one TFD, those at index [txq->q.last_used].
3789 * Do NOT advance any indexes
3790 *
3791 * @param dev
3792 * @param txq
3793 */
3794static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3795				  struct clx2_tx_queue *txq)
3796{
3797	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3798	struct pci_dev *dev = priv->pci_dev;
3799	int i;
3800
3801	/* classify bd */
3802	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3803		/* nothing to cleanup after for host commands */
3804		return;
3805
3806	/* sanity check */
3807	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3808		IPW_ERROR("Too many chunks: %i\n",
3809			  le32_to_cpu(bd->u.data.num_chunks));
3810		/** @todo issue fatal error, it is quite serious situation */
3811		return;
3812	}
3813
3814	/* unmap chunks if any */
3815	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3816		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3817				 le16_to_cpu(bd->u.data.chunk_len[i]),
3818				 PCI_DMA_TODEVICE);
3819		if (txq->txb[txq->q.last_used]) {
3820			libipw_txb_free(txq->txb[txq->q.last_used]);
3821			txq->txb[txq->q.last_used] = NULL;
3822		}
3823	}
3824}
3825
3826/**
3827 * Deallocate DMA queue.
3828 *
3829 * Empty queue by removing and destroying all BD's.
3830 * Free all buffers.
3831 *
3832 * @param dev
3833 * @param q
3834 */
3835static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3836{
3837	struct clx2_queue *q = &txq->q;
3838	struct pci_dev *dev = priv->pci_dev;
3839
3840	if (q->n_bd == 0)
3841		return;
3842
3843	/* first, empty all BD's */
3844	for (; q->first_empty != q->last_used;
3845	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3846		ipw_queue_tx_free_tfd(priv, txq);
3847	}
3848
3849	/* free buffers belonging to queue itself */
3850	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3851			    q->dma_addr);
3852	kfree(txq->txb);
3853
3854	/* 0 fill whole structure */
3855	memset(txq, 0, sizeof(*txq));
3856}
3857
3858/**
3859 * Destroy all DMA queues and structures
3860 *
3861 * @param priv
3862 */
3863static void ipw_tx_queue_free(struct ipw_priv *priv)
3864{
3865	/* Tx CMD queue */
3866	ipw_queue_tx_free(priv, &priv->txq_cmd);
3867
3868	/* Tx queues */
3869	ipw_queue_tx_free(priv, &priv->txq[0]);
3870	ipw_queue_tx_free(priv, &priv->txq[1]);
3871	ipw_queue_tx_free(priv, &priv->txq[2]);
3872	ipw_queue_tx_free(priv, &priv->txq[3]);
3873}
3874
3875static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3876{
3877	/* First 3 bytes are manufacturer */
3878	bssid[0] = priv->mac_addr[0];
3879	bssid[1] = priv->mac_addr[1];
3880	bssid[2] = priv->mac_addr[2];
3881
3882	/* Last bytes are random */
3883	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3884
3885	bssid[0] &= 0xfe;	/* clear multicast bit */
3886	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3887}
3888
3889static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3890{
3891	struct ipw_station_entry entry;
3892	int i;
3893
3894	for (i = 0; i < priv->num_stations; i++) {
3895		if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3896			/* Another node is active in network */
3897			priv->missed_adhoc_beacons = 0;
3898			if (!(priv->config & CFG_STATIC_CHANNEL))
3899				/* when other nodes drop out, we drop out */
3900				priv->config &= ~CFG_ADHOC_PERSIST;
3901
3902			return i;
3903		}
3904	}
3905
3906	if (i == MAX_STATIONS)
3907		return IPW_INVALID_STATION;
3908
3909	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3910
3911	entry.reserved = 0;
3912	entry.support_mode = 0;
3913	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3914	memcpy(priv->stations[i], bssid, ETH_ALEN);
3915	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3916			 &entry, sizeof(entry));
3917	priv->num_stations++;
3918
3919	return i;
3920}
3921
3922static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3923{
3924	int i;
3925
3926	for (i = 0; i < priv->num_stations; i++)
3927		if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3928			return i;
3929
3930	return IPW_INVALID_STATION;
3931}
3932
3933static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3934{
3935	int err;
3936
3937	if (priv->status & STATUS_ASSOCIATING) {
3938		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3939		queue_work(priv->workqueue, &priv->disassociate);
3940		return;
3941	}
3942
3943	if (!(priv->status & STATUS_ASSOCIATED)) {
3944		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3945		return;
3946	}
3947
3948	IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3949			"on channel %d.\n",
3950			priv->assoc_request.bssid,
3951			priv->assoc_request.channel);
3952
3953	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3954	priv->status |= STATUS_DISASSOCIATING;
3955
3956	if (quiet)
3957		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3958	else
3959		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3960
3961	err = ipw_send_associate(priv, &priv->assoc_request);
3962	if (err) {
3963		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3964			     "failed.\n");
3965		return;
3966	}
3967
3968}
3969
3970static int ipw_disassociate(void *data)
3971{
3972	struct ipw_priv *priv = data;
3973	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3974		return 0;
3975	ipw_send_disassociate(data, 0);
3976	netif_carrier_off(priv->net_dev);
3977	return 1;
3978}
3979
3980static void ipw_bg_disassociate(struct work_struct *work)
3981{
3982	struct ipw_priv *priv =
3983		container_of(work, struct ipw_priv, disassociate);
3984	mutex_lock(&priv->mutex);
3985	ipw_disassociate(priv);
3986	mutex_unlock(&priv->mutex);
3987}
3988
3989static void ipw_system_config(struct work_struct *work)
3990{
3991	struct ipw_priv *priv =
3992		container_of(work, struct ipw_priv, system_config);
3993
3994#ifdef CONFIG_IPW2200_PROMISCUOUS
3995	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3996		priv->sys_config.accept_all_data_frames = 1;
3997		priv->sys_config.accept_non_directed_frames = 1;
3998		priv->sys_config.accept_all_mgmt_bcpr = 1;
3999		priv->sys_config.accept_all_mgmt_frames = 1;
4000	}
4001#endif
4002
4003	ipw_send_system_config(priv);
4004}
4005
4006struct ipw_status_code {
4007	u16 status;
4008	const char *reason;
4009};
4010
4011static const struct ipw_status_code ipw_status_codes[] = {
4012	{0x00, "Successful"},
4013	{0x01, "Unspecified failure"},
4014	{0x0A, "Cannot support all requested capabilities in the "
4015	 "Capability information field"},
4016	{0x0B, "Reassociation denied due to inability to confirm that "
4017	 "association exists"},
4018	{0x0C, "Association denied due to reason outside the scope of this "
4019	 "standard"},
4020	{0x0D,
4021	 "Responding station does not support the specified authentication "
4022	 "algorithm"},
4023	{0x0E,
4024	 "Received an Authentication frame with authentication sequence "
4025	 "transaction sequence number out of expected sequence"},
4026	{0x0F, "Authentication rejected because of challenge failure"},
4027	{0x10, "Authentication rejected due to timeout waiting for next "
4028	 "frame in sequence"},
4029	{0x11, "Association denied because AP is unable to handle additional "
4030	 "associated stations"},
4031	{0x12,
4032	 "Association denied due to requesting station not supporting all "
4033	 "of the datarates in the BSSBasicServiceSet Parameter"},
4034	{0x13,
4035	 "Association denied due to requesting station not supporting "
4036	 "short preamble operation"},
4037	{0x14,
4038	 "Association denied due to requesting station not supporting "
4039	 "PBCC encoding"},
4040	{0x15,
4041	 "Association denied due to requesting station not supporting "
4042	 "channel agility"},
4043	{0x19,
4044	 "Association denied due to requesting station not supporting "
4045	 "short slot operation"},
4046	{0x1A,
4047	 "Association denied due to requesting station not supporting "
4048	 "DSSS-OFDM operation"},
4049	{0x28, "Invalid Information Element"},
4050	{0x29, "Group Cipher is not valid"},
4051	{0x2A, "Pairwise Cipher is not valid"},
4052	{0x2B, "AKMP is not valid"},
4053	{0x2C, "Unsupported RSN IE version"},
4054	{0x2D, "Invalid RSN IE Capabilities"},
4055	{0x2E, "Cipher suite is rejected per security policy"},
4056};
4057
4058static const char *ipw_get_status_code(u16 status)
4059{
4060	int i;
4061	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4062		if (ipw_status_codes[i].status == (status & 0xff))
4063			return ipw_status_codes[i].reason;
4064	return "Unknown status value.";
4065}
4066
4067static void inline average_init(struct average *avg)
4068{
4069	memset(avg, 0, sizeof(*avg));
4070}
4071
4072#define DEPTH_RSSI 8
4073#define DEPTH_NOISE 16
4074static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4075{
4076	return ((depth-1)*prev_avg +  val)/depth;
4077}
4078
4079static void average_add(struct average *avg, s16 val)
4080{
4081	avg->sum -= avg->entries[avg->pos];
4082	avg->sum += val;
4083	avg->entries[avg->pos++] = val;
4084	if (unlikely(avg->pos == AVG_ENTRIES)) {
4085		avg->init = 1;
4086		avg->pos = 0;
4087	}
4088}
4089
4090static s16 average_value(struct average *avg)
4091{
4092	if (!unlikely(avg->init)) {
4093		if (avg->pos)
4094			return avg->sum / avg->pos;
4095		return 0;
4096	}
4097
4098	return avg->sum / AVG_ENTRIES;
4099}
4100
4101static void ipw_reset_stats(struct ipw_priv *priv)
4102{
4103	u32 len = sizeof(u32);
4104
4105	priv->quality = 0;
4106
4107	average_init(&priv->average_missed_beacons);
4108	priv->exp_avg_rssi = -60;
4109	priv->exp_avg_noise = -85 + 0x100;
4110
4111	priv->last_rate = 0;
4112	priv->last_missed_beacons = 0;
4113	priv->last_rx_packets = 0;
4114	priv->last_tx_packets = 0;
4115	priv->last_tx_failures = 0;
4116
4117	/* Firmware managed, reset only when NIC is restarted, so we have to
4118	 * normalize on the current value */
4119	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4120			&priv->last_rx_err, &len);
4121	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4122			&priv->last_tx_failures, &len);
4123
4124	/* Driver managed, reset with each association */
4125	priv->missed_adhoc_beacons = 0;
4126	priv->missed_beacons = 0;
4127	priv->tx_packets = 0;
4128	priv->rx_packets = 0;
4129
4130}
4131
4132static u32 ipw_get_max_rate(struct ipw_priv *priv)
4133{
4134	u32 i = 0x80000000;
4135	u32 mask = priv->rates_mask;
4136	/* If currently associated in B mode, restrict the maximum
4137	 * rate match to B rates */
4138	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4139		mask &= LIBIPW_CCK_RATES_MASK;
4140
4141	/* TODO: Verify that the rate is supported by the current rates
4142	 * list. */
4143
4144	while (i && !(mask & i))
4145		i >>= 1;
4146	switch (i) {
4147	case LIBIPW_CCK_RATE_1MB_MASK:
4148		return 1000000;
4149	case LIBIPW_CCK_RATE_2MB_MASK:
4150		return 2000000;
4151	case LIBIPW_CCK_RATE_5MB_MASK:
4152		return 5500000;
4153	case LIBIPW_OFDM_RATE_6MB_MASK:
4154		return 6000000;
4155	case LIBIPW_OFDM_RATE_9MB_MASK:
4156		return 9000000;
4157	case LIBIPW_CCK_RATE_11MB_MASK:
4158		return 11000000;
4159	case LIBIPW_OFDM_RATE_12MB_MASK:
4160		return 12000000;
4161	case LIBIPW_OFDM_RATE_18MB_MASK:
4162		return 18000000;
4163	case LIBIPW_OFDM_RATE_24MB_MASK:
4164		return 24000000;
4165	case LIBIPW_OFDM_RATE_36MB_MASK:
4166		return 36000000;
4167	case LIBIPW_OFDM_RATE_48MB_MASK:
4168		return 48000000;
4169	case LIBIPW_OFDM_RATE_54MB_MASK:
4170		return 54000000;
4171	}
4172
4173	if (priv->ieee->mode == IEEE_B)
4174		return 11000000;
4175	else
4176		return 54000000;
4177}
4178
4179static u32 ipw_get_current_rate(struct ipw_priv *priv)
4180{
4181	u32 rate, len = sizeof(rate);
4182	int err;
4183
4184	if (!(priv->status & STATUS_ASSOCIATED))
4185		return 0;
4186
4187	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4188		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4189				      &len);
4190		if (err) {
4191			IPW_DEBUG_INFO("failed querying ordinals.\n");
4192			return 0;
4193		}
4194	} else
4195		return ipw_get_max_rate(priv);
4196
4197	switch (rate) {
4198	case IPW_TX_RATE_1MB:
4199		return 1000000;
4200	case IPW_TX_RATE_2MB:
4201		return 2000000;
4202	case IPW_TX_RATE_5MB:
4203		return 5500000;
4204	case IPW_TX_RATE_6MB:
4205		return 6000000;
4206	case IPW_TX_RATE_9MB:
4207		return 9000000;
4208	case IPW_TX_RATE_11MB:
4209		return 11000000;
4210	case IPW_TX_RATE_12MB:
4211		return 12000000;
4212	case IPW_TX_RATE_18MB:
4213		return 18000000;
4214	case IPW_TX_RATE_24MB:
4215		return 24000000;
4216	case IPW_TX_RATE_36MB:
4217		return 36000000;
4218	case IPW_TX_RATE_48MB:
4219		return 48000000;
4220	case IPW_TX_RATE_54MB:
4221		return 54000000;
4222	}
4223
4224	return 0;
4225}
4226
4227#define IPW_STATS_INTERVAL (2 * HZ)
4228static void ipw_gather_stats(struct ipw_priv *priv)
4229{
4230	u32 rx_err, rx_err_delta, rx_packets_delta;
4231	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4232	u32 missed_beacons_percent, missed_beacons_delta;
4233	u32 quality = 0;
4234	u32 len = sizeof(u32);
4235	s16 rssi;
4236	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4237	    rate_quality;
4238	u32 max_rate;
4239
4240	if (!(priv->status & STATUS_ASSOCIATED)) {
4241		priv->quality = 0;
4242		return;
4243	}
4244
4245	/* Update the statistics */
4246	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4247			&priv->missed_beacons, &len);
4248	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4249	priv->last_missed_beacons = priv->missed_beacons;
4250	if (priv->assoc_request.beacon_interval) {
4251		missed_beacons_percent = missed_beacons_delta *
4252		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4253		    (IPW_STATS_INTERVAL * 10);
4254	} else {
4255		missed_beacons_percent = 0;
4256	}
4257	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4258
4259	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4260	rx_err_delta = rx_err - priv->last_rx_err;
4261	priv->last_rx_err = rx_err;
4262
4263	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4264	tx_failures_delta = tx_failures - priv->last_tx_failures;
4265	priv->last_tx_failures = tx_failures;
4266
4267	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4268	priv->last_rx_packets = priv->rx_packets;
4269
4270	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4271	priv->last_tx_packets = priv->tx_packets;
4272
4273	/* Calculate quality based on the following:
4274	 *
4275	 * Missed beacon: 100% = 0, 0% = 70% missed
4276	 * Rate: 60% = 1Mbs, 100% = Max
4277	 * Rx and Tx errors represent a straight % of total Rx/Tx
4278	 * RSSI: 100% = > -50,  0% = < -80
4279	 * Rx errors: 100% = 0, 0% = 50% missed
4280	 *
4281	 * The lowest computed quality is used.
4282	 *
4283	 */
4284#define BEACON_THRESHOLD 5
4285	beacon_quality = 100 - missed_beacons_percent;
4286	if (beacon_quality < BEACON_THRESHOLD)
4287		beacon_quality = 0;
4288	else
4289		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4290		    (100 - BEACON_THRESHOLD);
4291	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4292			beacon_quality, missed_beacons_percent);
4293
4294	priv->last_rate = ipw_get_current_rate(priv);
4295	max_rate = ipw_get_max_rate(priv);
4296	rate_quality = priv->last_rate * 40 / max_rate + 60;
4297	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4298			rate_quality, priv->last_rate / 1000000);
4299
4300	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4301		rx_quality = 100 - (rx_err_delta * 100) /
4302		    (rx_packets_delta + rx_err_delta);
4303	else
4304		rx_quality = 100;
4305	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4306			rx_quality, rx_err_delta, rx_packets_delta);
4307
4308	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4309		tx_quality = 100 - (tx_failures_delta * 100) /
4310		    (tx_packets_delta + tx_failures_delta);
4311	else
4312		tx_quality = 100;
4313	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4314			tx_quality, tx_failures_delta, tx_packets_delta);
4315
4316	rssi = priv->exp_avg_rssi;
4317	signal_quality =
4318	    (100 *
4319	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4320	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4321	     (priv->ieee->perfect_rssi - rssi) *
4322	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4323	      62 * (priv->ieee->perfect_rssi - rssi))) /
4324	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4325	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4326	if (signal_quality > 100)
4327		signal_quality = 100;
4328	else if (signal_quality < 1)
4329		signal_quality = 0;
4330
4331	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4332			signal_quality, rssi);
4333
4334	quality = min(rx_quality, signal_quality);
4335	quality = min(tx_quality, quality);
4336	quality = min(rate_quality, quality);
4337	quality = min(beacon_quality, quality);
4338	if (quality == beacon_quality)
4339		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4340				quality);
4341	if (quality == rate_quality)
4342		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4343				quality);
4344	if (quality == tx_quality)
4345		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4346				quality);
4347	if (quality == rx_quality)
4348		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4349				quality);
4350	if (quality == signal_quality)
4351		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4352				quality);
4353
4354	priv->quality = quality;
4355
4356	queue_delayed_work(priv->workqueue, &priv->gather_stats,
4357			   IPW_STATS_INTERVAL);
4358}
4359
4360static void ipw_bg_gather_stats(struct work_struct *work)
4361{
4362	struct ipw_priv *priv =
4363		container_of(work, struct ipw_priv, gather_stats.work);
4364	mutex_lock(&priv->mutex);
4365	ipw_gather_stats(priv);
4366	mutex_unlock(&priv->mutex);
4367}
4368
4369/* Missed beacon behavior:
4370 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4371 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4372 * Above disassociate threshold, give up and stop scanning.
4373 * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4374static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4375					    int missed_count)
4376{
4377	priv->notif_missed_beacons = missed_count;
4378
4379	if (missed_count > priv->disassociate_threshold &&
4380	    priv->status & STATUS_ASSOCIATED) {
4381		/* If associated and we've hit the missed
4382		 * beacon threshold, disassociate, turn
4383		 * off roaming, and abort any active scans */
4384		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4385			  IPW_DL_STATE | IPW_DL_ASSOC,
4386			  "Missed beacon: %d - disassociate\n", missed_count);
4387		priv->status &= ~STATUS_ROAMING;
4388		if (priv->status & STATUS_SCANNING) {
4389			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4390				  IPW_DL_STATE,
4391				  "Aborting scan with missed beacon.\n");
4392			queue_work(priv->workqueue, &priv->abort_scan);
4393		}
4394
4395		queue_work(priv->workqueue, &priv->disassociate);
4396		return;
4397	}
4398
4399	if (priv->status & STATUS_ROAMING) {
4400		/* If we are currently roaming, then just
4401		 * print a debug statement... */
4402		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4403			  "Missed beacon: %d - roam in progress\n",
4404			  missed_count);
4405		return;
4406	}
4407
4408	if (roaming &&
4409	    (missed_count > priv->roaming_threshold &&
4410	     missed_count <= priv->disassociate_threshold)) {
4411		/* If we are not already roaming, set the ROAM
4412		 * bit in the status and kick off a scan.
4413		 * This can happen several times before we reach
4414		 * disassociate_threshold. */
4415		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4416			  "Missed beacon: %d - initiate "
4417			  "roaming\n", missed_count);
4418		if (!(priv->status & STATUS_ROAMING)) {
4419			priv->status |= STATUS_ROAMING;
4420			if (!(priv->status & STATUS_SCANNING))
4421				queue_delayed_work(priv->workqueue,
4422						   &priv->request_scan, 0);
4423		}
4424		return;
4425	}
4426
4427	if (priv->status & STATUS_SCANNING &&
4428	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4429		/* Stop scan to keep fw from getting
4430		 * stuck (only if we aren't roaming --
4431		 * otherwise we'll never scan more than 2 or 3
4432		 * channels..) */
4433		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4434			  "Aborting scan with missed beacon.\n");
4435		queue_work(priv->workqueue, &priv->abort_scan);
4436	}
4437
4438	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4439}
4440
4441static void ipw_scan_event(struct work_struct *work)
4442{
4443	union iwreq_data wrqu;
4444
4445	struct ipw_priv *priv =
4446		container_of(work, struct ipw_priv, scan_event.work);
4447
4448	wrqu.data.length = 0;
4449	wrqu.data.flags = 0;
4450	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4451}
4452
4453static void handle_scan_event(struct ipw_priv *priv)
4454{
4455	/* Only userspace-requested scan completion events go out immediately */
4456	if (!priv->user_requested_scan) {
4457		if (!delayed_work_pending(&priv->scan_event))
4458			queue_delayed_work(priv->workqueue, &priv->scan_event,
4459					 round_jiffies_relative(msecs_to_jiffies(4000)));
4460	} else {
4461		union iwreq_data wrqu;
4462
4463		priv->user_requested_scan = 0;
4464		cancel_delayed_work(&priv->scan_event);
4465
4466		wrqu.data.length = 0;
4467		wrqu.data.flags = 0;
4468		wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4469	}
4470}
4471
4472/**
4473 * Handle host notification packet.
4474 * Called from interrupt routine
4475 */
4476static void ipw_rx_notification(struct ipw_priv *priv,
4477				       struct ipw_rx_notification *notif)
4478{
4479	DECLARE_SSID_BUF(ssid);
4480	u16 size = le16_to_cpu(notif->size);
4481
4482	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4483
4484	switch (notif->subtype) {
4485	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4486			struct notif_association *assoc = &notif->u.assoc;
4487
4488			switch (assoc->state) {
4489			case CMAS_ASSOCIATED:{
4490					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4491						  IPW_DL_ASSOC,
4492						  "associated: '%s' %pM\n",
4493						  print_ssid(ssid, priv->essid,
4494							     priv->essid_len),
4495						  priv->bssid);
4496
4497					switch (priv->ieee->iw_mode) {
4498					case IW_MODE_INFRA:
4499						memcpy(priv->ieee->bssid,
4500						       priv->bssid, ETH_ALEN);
4501						break;
4502
4503					case IW_MODE_ADHOC:
4504						memcpy(priv->ieee->bssid,
4505						       priv->bssid, ETH_ALEN);
4506
4507						/* clear out the station table */
4508						priv->num_stations = 0;
4509
4510						IPW_DEBUG_ASSOC
4511						    ("queueing adhoc check\n");
4512						queue_delayed_work(priv->
4513								   workqueue,
4514								   &priv->
4515								   adhoc_check,
4516								   le16_to_cpu(priv->
4517								   assoc_request.
4518								   beacon_interval));
4519						break;
4520					}
4521
4522					priv->status &= ~STATUS_ASSOCIATING;
4523					priv->status |= STATUS_ASSOCIATED;
4524					queue_work(priv->workqueue,
4525						   &priv->system_config);
4526
4527#ifdef CONFIG_IPW2200_QOS
4528#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4529			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4530					if ((priv->status & STATUS_AUTH) &&
4531					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4532					     == IEEE80211_STYPE_ASSOC_RESP)) {
4533						if ((sizeof
4534						     (struct
4535						      libipw_assoc_response)
4536						     <= size)
4537						    && (size <= 2314)) {
4538							struct
4539							libipw_rx_stats
4540							    stats = {
4541								.len = size - 1,
4542							};
4543
4544							IPW_DEBUG_QOS
4545							    ("QoS Associate "
4546							     "size %d\n", size);
4547							libipw_rx_mgt(priv->
4548									 ieee,
4549									 (struct
4550									  libipw_hdr_4addr
4551									  *)
4552									 &notif->u.raw, &stats);
4553						}
4554					}
4555#endif
4556
4557					schedule_work(&priv->link_up);
4558
4559					break;
4560				}
4561
4562			case CMAS_AUTHENTICATED:{
4563					if (priv->
4564					    status & (STATUS_ASSOCIATED |
4565						      STATUS_AUTH)) {
4566						struct notif_authenticate *auth
4567						    = &notif->u.auth;
4568						IPW_DEBUG(IPW_DL_NOTIF |
4569							  IPW_DL_STATE |
4570							  IPW_DL_ASSOC,
4571							  "deauthenticated: '%s' "
4572							  "%pM"
4573							  ": (0x%04X) - %s\n",
4574							  print_ssid(ssid,
4575								     priv->
4576								     essid,
4577								     priv->
4578								     essid_len),
4579							  priv->bssid,
4580							  le16_to_cpu(auth->status),
4581							  ipw_get_status_code
4582							  (le16_to_cpu
4583							   (auth->status)));
4584
4585						priv->status &=
4586						    ~(STATUS_ASSOCIATING |
4587						      STATUS_AUTH |
4588						      STATUS_ASSOCIATED);
4589
4590						schedule_work(&priv->link_down);
4591						break;
4592					}
4593
4594					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4595						  IPW_DL_ASSOC,
4596						  "authenticated: '%s' %pM\n",
4597						  print_ssid(ssid, priv->essid,
4598							     priv->essid_len),
4599						  priv->bssid);
4600					break;
4601				}
4602
4603			case CMAS_INIT:{
4604					if (priv->status & STATUS_AUTH) {
4605						struct
4606						    libipw_assoc_response
4607						*resp;
4608						resp =
4609						    (struct
4610						     libipw_assoc_response
4611						     *)&notif->u.raw;
4612						IPW_DEBUG(IPW_DL_NOTIF |
4613							  IPW_DL_STATE |
4614							  IPW_DL_ASSOC,
4615							  "association failed (0x%04X): %s\n",
4616							  le16_to_cpu(resp->status),
4617							  ipw_get_status_code
4618							  (le16_to_cpu
4619							   (resp->status)));
4620					}
4621
4622					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4623						  IPW_DL_ASSOC,
4624						  "disassociated: '%s' %pM\n",
4625						  print_ssid(ssid, priv->essid,
4626							     priv->essid_len),
4627						  priv->bssid);
4628
4629					priv->status &=
4630					    ~(STATUS_DISASSOCIATING |
4631					      STATUS_ASSOCIATING |
4632					      STATUS_ASSOCIATED | STATUS_AUTH);
4633					if (priv->assoc_network
4634					    && (priv->assoc_network->
4635						capability &
4636						WLAN_CAPABILITY_IBSS))
4637						ipw_remove_current_network
4638						    (priv);
4639
4640					schedule_work(&priv->link_down);
4641
4642					break;
4643				}
4644
4645			case CMAS_RX_ASSOC_RESP:
4646				break;
4647
4648			default:
4649				IPW_ERROR("assoc: unknown (%d)\n",
4650					  assoc->state);
4651				break;
4652			}
4653
4654			break;
4655		}
4656
4657	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4658			struct notif_authenticate *auth = &notif->u.auth;
4659			switch (auth->state) {
4660			case CMAS_AUTHENTICATED:
4661				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4662					  "authenticated: '%s' %pM\n",
4663					  print_ssid(ssid, priv->essid,
4664						     priv->essid_len),
4665					  priv->bssid);
4666				priv->status |= STATUS_AUTH;
4667				break;
4668
4669			case CMAS_INIT:
4670				if (priv->status & STATUS_AUTH) {
4671					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4672						  IPW_DL_ASSOC,
4673						  "authentication failed (0x%04X): %s\n",
4674						  le16_to_cpu(auth->status),
4675						  ipw_get_status_code(le16_to_cpu
4676								      (auth->
4677								       status)));
4678				}
4679				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4680					  IPW_DL_ASSOC,
4681					  "deauthenticated: '%s' %pM\n",
4682					  print_ssid(ssid, priv->essid,
4683						     priv->essid_len),
4684					  priv->bssid);
4685
4686				priv->status &= ~(STATUS_ASSOCIATING |
4687						  STATUS_AUTH |
4688						  STATUS_ASSOCIATED);
4689
4690				schedule_work(&priv->link_down);
4691				break;
4692
4693			case CMAS_TX_AUTH_SEQ_1:
4694				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4695					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4696				break;
4697			case CMAS_RX_AUTH_SEQ_2:
4698				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4699					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4700				break;
4701			case CMAS_AUTH_SEQ_1_PASS:
4702				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4703					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4704				break;
4705			case CMAS_AUTH_SEQ_1_FAIL:
4706				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4707					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4708				break;
4709			case CMAS_TX_AUTH_SEQ_3:
4710				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4711					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4712				break;
4713			case CMAS_RX_AUTH_SEQ_4:
4714				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4715					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4716				break;
4717			case CMAS_AUTH_SEQ_2_PASS:
4718				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4719					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4720				break;
4721			case CMAS_AUTH_SEQ_2_FAIL:
4722				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4723					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4724				break;
4725			case CMAS_TX_ASSOC:
4726				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4727					  IPW_DL_ASSOC, "TX_ASSOC\n");
4728				break;
4729			case CMAS_RX_ASSOC_RESP:
4730				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4731					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4732
4733				break;
4734			case CMAS_ASSOCIATED:
4735				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4736					  IPW_DL_ASSOC, "ASSOCIATED\n");
4737				break;
4738			default:
4739				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4740						auth->state);
4741				break;
4742			}
4743			break;
4744		}
4745
4746	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4747			struct notif_channel_result *x =
4748			    &notif->u.channel_result;
4749
4750			if (size == sizeof(*x)) {
4751				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4752					       x->channel_num);
4753			} else {
4754				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4755					       "(should be %zd)\n",
4756					       size, sizeof(*x));
4757			}
4758			break;
4759		}
4760
4761	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4762			struct notif_scan_complete *x = &notif->u.scan_complete;
4763			if (size == sizeof(*x)) {
4764				IPW_DEBUG_SCAN
4765				    ("Scan completed: type %d, %d channels, "
4766				     "%d status\n", x->scan_type,
4767				     x->num_channels, x->status);
4768			} else {
4769				IPW_ERROR("Scan completed of wrong size %d "
4770					  "(should be %zd)\n",
4771					  size, sizeof(*x));
4772			}
4773
4774			priv->status &=
4775			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4776
4777			wake_up_interruptible(&priv->wait_state);
4778			cancel_delayed_work(&priv->scan_check);
4779
4780			if (priv->status & STATUS_EXIT_PENDING)
4781				break;
4782
4783			priv->ieee->scans++;
4784
4785#ifdef CONFIG_IPW2200_MONITOR
4786			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4787				priv->status |= STATUS_SCAN_FORCED;
4788				queue_delayed_work(priv->workqueue,
4789						   &priv->request_scan, 0);
4790				break;
4791			}
4792			priv->status &= ~STATUS_SCAN_FORCED;
4793#endif				/* CONFIG_IPW2200_MONITOR */
4794
4795			/* Do queued direct scans first */
4796			if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4797				queue_delayed_work(priv->workqueue,
4798						   &priv->request_direct_scan, 0);
4799			}
4800
4801			if (!(priv->status & (STATUS_ASSOCIATED |
4802					      STATUS_ASSOCIATING |
4803					      STATUS_ROAMING |
4804					      STATUS_DISASSOCIATING)))
4805				queue_work(priv->workqueue, &priv->associate);
4806			else if (priv->status & STATUS_ROAMING) {
4807				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4808					/* If a scan completed and we are in roam mode, then
4809					 * the scan that completed was the one requested as a
4810					 * result of entering roam... so, schedule the
4811					 * roam work */
4812					queue_work(priv->workqueue,
4813						   &priv->roam);
4814				else
4815					/* Don't schedule if we aborted the scan */
4816					priv->status &= ~STATUS_ROAMING;
4817			} else if (priv->status & STATUS_SCAN_PENDING)
4818				queue_delayed_work(priv->workqueue,
4819						   &priv->request_scan, 0);
4820			else if (priv->config & CFG_BACKGROUND_SCAN
4821				 && priv->status & STATUS_ASSOCIATED)
4822				queue_delayed_work(priv->workqueue,
4823						   &priv->request_scan,
4824						   round_jiffies_relative(HZ));
4825
4826			/* Send an empty event to user space.
4827			 * We don't send the received data on the event because
4828			 * it would require us to do complex transcoding, and
4829			 * we want to minimise the work done in the irq handler
4830			 * Use a request to extract the data.
4831			 * Also, we generate this even for any scan, regardless
4832			 * on how the scan was initiated. User space can just
4833			 * sync on periodic scan to get fresh data...
4834			 * Jean II */
4835			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4836				handle_scan_event(priv);
4837			break;
4838		}
4839
4840	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4841			struct notif_frag_length *x = &notif->u.frag_len;
4842
4843			if (size == sizeof(*x))
4844				IPW_ERROR("Frag length: %d\n",
4845					  le16_to_cpu(x->frag_length));
4846			else
4847				IPW_ERROR("Frag length of wrong size %d "
4848					  "(should be %zd)\n",
4849					  size, sizeof(*x));
4850			break;
4851		}
4852
4853	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4854			struct notif_link_deterioration *x =
4855			    &notif->u.link_deterioration;
4856
4857			if (size == sizeof(*x)) {
4858				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4859					"link deterioration: type %d, cnt %d\n",
4860					x->silence_notification_type,
4861					x->silence_count);
4862				memcpy(&priv->last_link_deterioration, x,
4863				       sizeof(*x));
4864			} else {
4865				IPW_ERROR("Link Deterioration of wrong size %d "
4866					  "(should be %zd)\n",
4867					  size, sizeof(*x));
4868			}
4869			break;
4870		}
4871
4872	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4873			IPW_ERROR("Dino config\n");
4874			if (priv->hcmd
4875			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4876				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4877
4878			break;
4879		}
4880
4881	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4882			struct notif_beacon_state *x = &notif->u.beacon_state;
4883			if (size != sizeof(*x)) {
4884				IPW_ERROR
4885				    ("Beacon state of wrong size %d (should "
4886				     "be %zd)\n", size, sizeof(*x));
4887				break;
4888			}
4889
4890			if (le32_to_cpu(x->state) ==
4891			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4892				ipw_handle_missed_beacon(priv,
4893							 le32_to_cpu(x->
4894								     number));
4895
4896			break;
4897		}
4898
4899	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4900			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4901			if (size == sizeof(*x)) {
4902				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4903					  "0x%02x station %d\n",
4904					  x->key_state, x->security_type,
4905					  x->station_index);
4906				break;
4907			}
4908
4909			IPW_ERROR
4910			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4911			     size, sizeof(*x));
4912			break;
4913		}
4914
4915	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4916			struct notif_calibration *x = &notif->u.calibration;
4917
4918			if (size == sizeof(*x)) {
4919				memcpy(&priv->calib, x, sizeof(*x));
4920				IPW_DEBUG_INFO("TODO: Calibration\n");
4921				break;
4922			}
4923
4924			IPW_ERROR
4925			    ("Calibration of wrong size %d (should be %zd)\n",
4926			     size, sizeof(*x));
4927			break;
4928		}
4929
4930	case HOST_NOTIFICATION_NOISE_STATS:{
4931			if (size == sizeof(u32)) {
4932				priv->exp_avg_noise =
4933				    exponential_average(priv->exp_avg_noise,
4934				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4935				    DEPTH_NOISE);
4936				break;
4937			}
4938
4939			IPW_ERROR
4940			    ("Noise stat is wrong size %d (should be %zd)\n",
4941			     size, sizeof(u32));
4942			break;
4943		}
4944
4945	default:
4946		IPW_DEBUG_NOTIF("Unknown notification: "
4947				"subtype=%d,flags=0x%2x,size=%d\n",
4948				notif->subtype, notif->flags, size);
4949	}
4950}
4951
4952/**
4953 * Destroys all DMA structures and initialise them again
4954 *
4955 * @param priv
4956 * @return error code
4957 */
4958static int ipw_queue_reset(struct ipw_priv *priv)
4959{
4960	int rc = 0;
4961	/** @todo customize queue sizes */
4962	int nTx = 64, nTxCmd = 8;
4963	ipw_tx_queue_free(priv);
4964	/* Tx CMD queue */
4965	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4966			       IPW_TX_CMD_QUEUE_READ_INDEX,
4967			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4968			       IPW_TX_CMD_QUEUE_BD_BASE,
4969			       IPW_TX_CMD_QUEUE_BD_SIZE);
4970	if (rc) {
4971		IPW_ERROR("Tx Cmd queue init failed\n");
4972		goto error;
4973	}
4974	/* Tx queue(s) */
4975	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4976			       IPW_TX_QUEUE_0_READ_INDEX,
4977			       IPW_TX_QUEUE_0_WRITE_INDEX,
4978			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4979	if (rc) {
4980		IPW_ERROR("Tx 0 queue init failed\n");
4981		goto error;
4982	}
4983	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4984			       IPW_TX_QUEUE_1_READ_INDEX,
4985			       IPW_TX_QUEUE_1_WRITE_INDEX,
4986			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4987	if (rc) {
4988		IPW_ERROR("Tx 1 queue init failed\n");
4989		goto error;
4990	}
4991	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4992			       IPW_TX_QUEUE_2_READ_INDEX,
4993			       IPW_TX_QUEUE_2_WRITE_INDEX,
4994			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4995	if (rc) {
4996		IPW_ERROR("Tx 2 queue init failed\n");
4997		goto error;
4998	}
4999	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
5000			       IPW_TX_QUEUE_3_READ_INDEX,
5001			       IPW_TX_QUEUE_3_WRITE_INDEX,
5002			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5003	if (rc) {
5004		IPW_ERROR("Tx 3 queue init failed\n");
5005		goto error;
5006	}
5007	/* statistics */
5008	priv->rx_bufs_min = 0;
5009	priv->rx_pend_max = 0;
5010	return rc;
5011
5012      error:
5013	ipw_tx_queue_free(priv);
5014	return rc;
5015}
5016
5017/**
5018 * Reclaim Tx queue entries no more used by NIC.
5019 *
5020 * When FW advances 'R' index, all entries between old and
5021 * new 'R' index need to be reclaimed. As result, some free space
5022 * forms. If there is enough free space (> low mark), wake Tx queue.
5023 *
5024 * @note Need to protect against garbage in 'R' index
5025 * @param priv
5026 * @param txq
5027 * @param qindex
5028 * @return Number of used entries remains in the queue
5029 */
5030static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5031				struct clx2_tx_queue *txq, int qindex)
5032{
5033	u32 hw_tail;
5034	int used;
5035	struct clx2_queue *q = &txq->q;
5036
5037	hw_tail = ipw_read32(priv, q->reg_r);
5038	if (hw_tail >= q->n_bd) {
5039		IPW_ERROR
5040		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5041		     hw_tail, q->n_bd);
5042		goto done;
5043	}
5044	for (; q->last_used != hw_tail;
5045	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5046		ipw_queue_tx_free_tfd(priv, txq);
5047		priv->tx_packets++;
5048	}
5049      done:
5050	if ((ipw_tx_queue_space(q) > q->low_mark) &&
5051	    (qindex >= 0))
5052		netif_wake_queue(priv->net_dev);
5053	used = q->first_empty - q->last_used;
5054	if (used < 0)
5055		used += q->n_bd;
5056
5057	return used;
5058}
5059
5060static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5061			     int len, int sync)
5062{
5063	struct clx2_tx_queue *txq = &priv->txq_cmd;
5064	struct clx2_queue *q = &txq->q;
5065	struct tfd_frame *tfd;
5066
5067	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5068		IPW_ERROR("No space for Tx\n");
5069		return -EBUSY;
5070	}
5071
5072	tfd = &txq->bd[q->first_empty];
5073	txq->txb[q->first_empty] = NULL;
5074
5075	memset(tfd, 0, sizeof(*tfd));
5076	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5077	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5078	priv->hcmd_seq++;
5079	tfd->u.cmd.index = hcmd;
5080	tfd->u.cmd.length = len;
5081	memcpy(tfd->u.cmd.payload, buf, len);
5082	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5083	ipw_write32(priv, q->reg_w, q->first_empty);
5084	_ipw_read32(priv, 0x90);
5085
5086	return 0;
5087}
5088
5089/*
5090 * Rx theory of operation
5091 *
5092 * The host allocates 32 DMA target addresses and passes the host address
5093 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5094 * 0 to 31
5095 *
5096 * Rx Queue Indexes
5097 * The host/firmware share two index registers for managing the Rx buffers.
5098 *
5099 * The READ index maps to the first position that the firmware may be writing
5100 * to -- the driver can read up to (but not including) this position and get
5101 * good data.
5102 * The READ index is managed by the firmware once the card is enabled.
5103 *
5104 * The WRITE index maps to the last position the driver has read from -- the
5105 * position preceding WRITE is the last slot the firmware can place a packet.
5106 *
5107 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5108 * WRITE = READ.
5109 *
5110 * During initialization the host sets up the READ queue position to the first
5111 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5112 *
5113 * When the firmware places a packet in a buffer it will advance the READ index
5114 * and fire the RX interrupt.  The driver can then query the READ index and
5115 * process as many packets as possible, moving the WRITE index forward as it
5116 * resets the Rx queue buffers with new memory.
5117 *
5118 * The management in the driver is as follows:
5119 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5120 *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5121 *   to replensish the ipw->rxq->rx_free.
5122 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5123 *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5124 *   'processed' and 'read' driver indexes as well)
5125 * + A received packet is processed and handed to the kernel network stack,
5126 *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5127 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5128 *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5129 *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5130 *   were enough free buffers and RX_STALLED is set it is cleared.
5131 *
5132 *
5133 * Driver sequence:
5134 *
5135 * ipw_rx_queue_alloc()       Allocates rx_free
5136 * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5137 *                            ipw_rx_queue_restock
5138 * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5139 *                            queue, updates firmware pointers, and updates
5140 *                            the WRITE index.  If insufficient rx_free buffers
5141 *                            are available, schedules ipw_rx_queue_replenish
5142 *
5143 * -- enable interrupts --
5144 * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5145 *                            READ INDEX, detaching the SKB from the pool.
5146 *                            Moves the packet buffer from queue to rx_used.
5147 *                            Calls ipw_rx_queue_restock to refill any empty
5148 *                            slots.
5149 * ...
5150 *
5151 */
5152
5153/*
5154 * If there are slots in the RX queue that  need to be restocked,
5155 * and we have free pre-allocated buffers, fill the ranks as much
5156 * as we can pulling from rx_free.
5157 *
5158 * This moves the 'write' index forward to catch up with 'processed', and
5159 * also updates the memory address in the firmware to reference the new
5160 * target buffer.
5161 */
5162static void ipw_rx_queue_restock(struct ipw_priv *priv)
5163{
5164	struct ipw_rx_queue *rxq = priv->rxq;
5165	struct list_head *element;
5166	struct ipw_rx_mem_buffer *rxb;
5167	unsigned long flags;
5168	int write;
5169
5170	spin_lock_irqsave(&rxq->lock, flags);
5171	write = rxq->write;
5172	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5173		element = rxq->rx_free.next;
5174		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5175		list_del(element);
5176
5177		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5178			    rxb->dma_addr);
5179		rxq->queue[rxq->write] = rxb;
5180		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5181		rxq->free_count--;
5182	}
5183	spin_unlock_irqrestore(&rxq->lock, flags);
5184
5185	/* If the pre-allocated buffer pool is dropping low, schedule to
5186	 * refill it */
5187	if (rxq->free_count <= RX_LOW_WATERMARK)
5188		queue_work(priv->workqueue, &priv->rx_replenish);
5189
5190	/* If we've added more space for the firmware to place data, tell it */
5191	if (write != rxq->write)
5192		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5193}
5194
5195/*
5196 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5197 * Also restock the Rx queue via ipw_rx_queue_restock.
5198 *
5199 * This is called as a scheduled work item (except for during intialization)
5200 */
5201static void ipw_rx_queue_replenish(void *data)
5202{
5203	struct ipw_priv *priv = data;
5204	struct ipw_rx_queue *rxq = priv->rxq;
5205	struct list_head *element;
5206	struct ipw_rx_mem_buffer *rxb;
5207	unsigned long flags;
5208
5209	spin_lock_irqsave(&rxq->lock, flags);
5210	while (!list_empty(&rxq->rx_used)) {
5211		element = rxq->rx_used.next;
5212		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5213		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5214		if (!rxb->skb) {
5215			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5216			       priv->net_dev->name);
5217			/* We don't reschedule replenish work here -- we will
5218			 * call the restock method and if it still needs
5219			 * more buffers it will schedule replenish */
5220			break;
5221		}
5222		list_del(element);
5223
5224		rxb->dma_addr =
5225		    pci_map_single(priv->pci_dev, rxb->skb->data,
5226				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5227
5228		list_add_tail(&rxb->list, &rxq->rx_free);
5229		rxq->free_count++;
5230	}
5231	spin_unlock_irqrestore(&rxq->lock, flags);
5232
5233	ipw_rx_queue_restock(priv);
5234}
5235
5236static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5237{
5238	struct ipw_priv *priv =
5239		container_of(work, struct ipw_priv, rx_replenish);
5240	mutex_lock(&priv->mutex);
5241	ipw_rx_queue_replenish(priv);
5242	mutex_unlock(&priv->mutex);
5243}
5244
5245/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5246 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5247 * This free routine walks the list of POOL entries and if SKB is set to
5248 * non NULL it is unmapped and freed
5249 */
5250static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5251{
5252	int i;
5253
5254	if (!rxq)
5255		return;
5256
5257	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5258		if (rxq->pool[i].skb != NULL) {
5259			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5260					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5261			dev_kfree_skb(rxq->pool[i].skb);
5262		}
5263	}
5264
5265	kfree(rxq);
5266}
5267
5268static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5269{
5270	struct ipw_rx_queue *rxq;
5271	int i;
5272
5273	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5274	if (unlikely(!rxq)) {
5275		IPW_ERROR("memory allocation failed\n");
5276		return NULL;
5277	}
5278	spin_lock_init(&rxq->lock);
5279	INIT_LIST_HEAD(&rxq->rx_free);
5280	INIT_LIST_HEAD(&rxq->rx_used);
5281
5282	/* Fill the rx_used queue with _all_ of the Rx buffers */
5283	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5284		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5285
5286	/* Set us so that we have processed and used all buffers, but have
5287	 * not restocked the Rx queue with fresh buffers */
5288	rxq->read = rxq->write = 0;
5289	rxq->free_count = 0;
5290
5291	return rxq;
5292}
5293
5294static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5295{
5296	rate &= ~LIBIPW_BASIC_RATE_MASK;
5297	if (ieee_mode == IEEE_A) {
5298		switch (rate) {
5299		case LIBIPW_OFDM_RATE_6MB:
5300			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5301			    1 : 0;
5302		case LIBIPW_OFDM_RATE_9MB:
5303			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5304			    1 : 0;
5305		case LIBIPW_OFDM_RATE_12MB:
5306			return priv->
5307			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5308		case LIBIPW_OFDM_RATE_18MB:
5309			return priv->
5310			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5311		case LIBIPW_OFDM_RATE_24MB:
5312			return priv->
5313			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5314		case LIBIPW_OFDM_RATE_36MB:
5315			return priv->
5316			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5317		case LIBIPW_OFDM_RATE_48MB:
5318			return priv->
5319			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5320		case LIBIPW_OFDM_RATE_54MB:
5321			return priv->
5322			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5323		default:
5324			return 0;
5325		}
5326	}
5327
5328	/* B and G mixed */
5329	switch (rate) {
5330	case LIBIPW_CCK_RATE_1MB:
5331		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5332	case LIBIPW_CCK_RATE_2MB:
5333		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5334	case LIBIPW_CCK_RATE_5MB:
5335		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5336	case LIBIPW_CCK_RATE_11MB:
5337		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5338	}
5339
5340	/* If we are limited to B modulations, bail at this point */
5341	if (ieee_mode == IEEE_B)
5342		return 0;
5343
5344	/* G */
5345	switch (rate) {
5346	case LIBIPW_OFDM_RATE_6MB:
5347		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5348	case LIBIPW_OFDM_RATE_9MB:
5349		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5350	case LIBIPW_OFDM_RATE_12MB:
5351		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5352	case LIBIPW_OFDM_RATE_18MB:
5353		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5354	case LIBIPW_OFDM_RATE_24MB:
5355		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5356	case LIBIPW_OFDM_RATE_36MB:
5357		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5358	case LIBIPW_OFDM_RATE_48MB:
5359		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5360	case LIBIPW_OFDM_RATE_54MB:
5361		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5362	}
5363
5364	return 0;
5365}
5366
5367static int ipw_compatible_rates(struct ipw_priv *priv,
5368				const struct libipw_network *network,
5369				struct ipw_supported_rates *rates)
5370{
5371	int num_rates, i;
5372
5373	memset(rates, 0, sizeof(*rates));
5374	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5375	rates->num_rates = 0;
5376	for (i = 0; i < num_rates; i++) {
5377		if (!ipw_is_rate_in_mask(priv, network->mode,
5378					 network->rates[i])) {
5379
5380			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5381				IPW_DEBUG_SCAN("Adding masked mandatory "
5382					       "rate %02X\n",
5383					       network->rates[i]);
5384				rates->supported_rates[rates->num_rates++] =
5385				    network->rates[i];
5386				continue;
5387			}
5388
5389			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5390				       network->rates[i], priv->rates_mask);
5391			continue;
5392		}
5393
5394		rates->supported_rates[rates->num_rates++] = network->rates[i];
5395	}
5396
5397	num_rates = min(network->rates_ex_len,
5398			(u8) (IPW_MAX_RATES - num_rates));
5399	for (i = 0; i < num_rates; i++) {
5400		if (!ipw_is_rate_in_mask(priv, network->mode,
5401					 network->rates_ex[i])) {
5402			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5403				IPW_DEBUG_SCAN("Adding masked mandatory "
5404					       "rate %02X\n",
5405					       network->rates_ex[i]);
5406				rates->supported_rates[rates->num_rates++] =
5407				    network->rates[i];
5408				continue;
5409			}
5410
5411			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5412				       network->rates_ex[i], priv->rates_mask);
5413			continue;
5414		}
5415
5416		rates->supported_rates[rates->num_rates++] =
5417		    network->rates_ex[i];
5418	}
5419
5420	return 1;
5421}
5422
5423static void ipw_copy_rates(struct ipw_supported_rates *dest,
5424				  const struct ipw_supported_rates *src)
5425{
5426	u8 i;
5427	for (i = 0; i < src->num_rates; i++)
5428		dest->supported_rates[i] = src->supported_rates[i];
5429	dest->num_rates = src->num_rates;
5430}
5431
5432/* TODO: Look at sniffed packets in the air to determine if the basic rate
5433 * mask should ever be used -- right now all callers to add the scan rates are
5434 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5435static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5436				   u8 modulation, u32 rate_mask)
5437{
5438	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5439	    LIBIPW_BASIC_RATE_MASK : 0;
5440
5441	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5442		rates->supported_rates[rates->num_rates++] =
5443		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5444
5445	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5446		rates->supported_rates[rates->num_rates++] =
5447		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5448
5449	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5450		rates->supported_rates[rates->num_rates++] = basic_mask |
5451		    LIBIPW_CCK_RATE_5MB;
5452
5453	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5454		rates->supported_rates[rates->num_rates++] = basic_mask |
5455		    LIBIPW_CCK_RATE_11MB;
5456}
5457
5458static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5459				    u8 modulation, u32 rate_mask)
5460{
5461	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5462	    LIBIPW_BASIC_RATE_MASK : 0;
5463
5464	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5465		rates->supported_rates[rates->num_rates++] = basic_mask |
5466		    LIBIPW_OFDM_RATE_6MB;
5467
5468	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5469		rates->supported_rates[rates->num_rates++] =
5470		    LIBIPW_OFDM_RATE_9MB;
5471
5472	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5473		rates->supported_rates[rates->num_rates++] = basic_mask |
5474		    LIBIPW_OFDM_RATE_12MB;
5475
5476	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5477		rates->supported_rates[rates->num_rates++] =
5478		    LIBIPW_OFDM_RATE_18MB;
5479
5480	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5481		rates->supported_rates[rates->num_rates++] = basic_mask |
5482		    LIBIPW_OFDM_RATE_24MB;
5483
5484	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5485		rates->supported_rates[rates->num_rates++] =
5486		    LIBIPW_OFDM_RATE_36MB;
5487
5488	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5489		rates->supported_rates[rates->num_rates++] =
5490		    LIBIPW_OFDM_RATE_48MB;
5491
5492	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5493		rates->supported_rates[rates->num_rates++] =
5494		    LIBIPW_OFDM_RATE_54MB;
5495}
5496
5497struct ipw_network_match {
5498	struct libipw_network *network;
5499	struct ipw_supported_rates rates;
5500};
5501
5502static int ipw_find_adhoc_network(struct ipw_priv *priv,
5503				  struct ipw_network_match *match,
5504				  struct libipw_network *network,
5505				  int roaming)
5506{
5507	struct ipw_supported_rates rates;
5508	DECLARE_SSID_BUF(ssid);
5509
5510	/* Verify that this network's capability is compatible with the
5511	 * current mode (AdHoc or Infrastructure) */
5512	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5513	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5514		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5515				"capability mismatch.\n",
5516				print_ssid(ssid, network->ssid,
5517					   network->ssid_len),
5518				network->bssid);
5519		return 0;
5520	}
5521
5522	if (unlikely(roaming)) {
5523		/* If we are roaming, then ensure check if this is a valid
5524		 * network to try and roam to */
5525		if ((network->ssid_len != match->network->ssid_len) ||
5526		    memcmp(network->ssid, match->network->ssid,
5527			   network->ssid_len)) {
5528			IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5529					"because of non-network ESSID.\n",
5530					print_ssid(ssid, network->ssid,
5531						   network->ssid_len),
5532					network->bssid);
5533			return 0;
5534		}
5535	} else {
5536		/* If an ESSID has been configured then compare the broadcast
5537		 * ESSID to ours */
5538		if ((priv->config & CFG_STATIC_ESSID) &&
5539		    ((network->ssid_len != priv->essid_len) ||
5540		     memcmp(network->ssid, priv->essid,
5541			    min(network->ssid_len, priv->essid_len)))) {
5542			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5543
5544			strncpy(escaped,
5545				print_ssid(ssid, network->ssid,
5546					   network->ssid_len),
5547				sizeof(escaped));
5548			IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5549					"because of ESSID mismatch: '%s'.\n",
5550					escaped, network->bssid,
5551					print_ssid(ssid, priv->essid,
5552						   priv->essid_len));
5553			return 0;
5554		}
5555	}
5556
5557	/* If the old network rate is better than this one, don't bother
5558	 * testing everything else. */
5559
5560	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5561		IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5562				"current network.\n",
5563				print_ssid(ssid, match->network->ssid,
5564					   match->network->ssid_len));
5565		return 0;
5566	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5567		IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5568				"current network.\n",
5569				print_ssid(ssid, match->network->ssid,
5570					   match->network->ssid_len));
5571		return 0;
5572	}
5573
5574	/* Now go through and see if the requested network is valid... */
5575	if (priv->ieee->scan_age != 0 &&
5576	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5577		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5578				"because of age: %ums.\n",
5579				print_ssid(ssid, network->ssid,
5580					   network->ssid_len),
5581				network->bssid,
5582				jiffies_to_msecs(jiffies -
5583						 network->last_scanned));
5584		return 0;
5585	}
5586
5587	if ((priv->config & CFG_STATIC_CHANNEL) &&
5588	    (network->channel != priv->channel)) {
5589		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5590				"because of channel mismatch: %d != %d.\n",
5591				print_ssid(ssid, network->ssid,
5592					   network->ssid_len),
5593				network->bssid,
5594				network->channel, priv->channel);
5595		return 0;
5596	}
5597
5598	/* Verify privacy compatability */
5599	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5600	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5601		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5602				"because of privacy mismatch: %s != %s.\n",
5603				print_ssid(ssid, network->ssid,
5604					   network->ssid_len),
5605				network->bssid,
5606				priv->
5607				capability & CAP_PRIVACY_ON ? "on" : "off",
5608				network->
5609				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5610				"off");
5611		return 0;
5612	}
5613
5614	if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5615		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5616				"because of the same BSSID match: %pM"
5617				".\n", print_ssid(ssid, network->ssid,
5618						  network->ssid_len),
5619				network->bssid,
5620				priv->bssid);
5621		return 0;
5622	}
5623
5624	/* Filter out any incompatible freq / mode combinations */
5625	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5626		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5627				"because of invalid frequency/mode "
5628				"combination.\n",
5629				print_ssid(ssid, network->ssid,
5630					   network->ssid_len),
5631				network->bssid);
5632		return 0;
5633	}
5634
5635	/* Ensure that the rates supported by the driver are compatible with
5636	 * this AP, including verification of basic rates (mandatory) */
5637	if (!ipw_compatible_rates(priv, network, &rates)) {
5638		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5639				"because configured rate mask excludes "
5640				"AP mandatory rate.\n",
5641				print_ssid(ssid, network->ssid,
5642					   network->ssid_len),
5643				network->bssid);
5644		return 0;
5645	}
5646
5647	if (rates.num_rates == 0) {
5648		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5649				"because of no compatible rates.\n",
5650				print_ssid(ssid, network->ssid,
5651					   network->ssid_len),
5652				network->bssid);
5653		return 0;
5654	}
5655
5656	/* TODO: Perform any further minimal comparititive tests.  We do not
5657	 * want to put too much policy logic here; intelligent scan selection
5658	 * should occur within a generic IEEE 802.11 user space tool.  */
5659
5660	/* Set up 'new' AP to this network */
5661	ipw_copy_rates(&match->rates, &rates);
5662	match->network = network;
5663	IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5664			print_ssid(ssid, network->ssid, network->ssid_len),
5665			network->bssid);
5666
5667	return 1;
5668}
5669
5670static void ipw_merge_adhoc_network(struct work_struct *work)
5671{
5672	DECLARE_SSID_BUF(ssid);
5673	struct ipw_priv *priv =
5674		container_of(work, struct ipw_priv, merge_networks);
5675	struct libipw_network *network = NULL;
5676	struct ipw_network_match match = {
5677		.network = priv->assoc_network
5678	};
5679
5680	if ((priv->status & STATUS_ASSOCIATED) &&
5681	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5682		/* First pass through ROAM process -- look for a better
5683		 * network */
5684		unsigned long flags;
5685
5686		spin_lock_irqsave(&priv->ieee->lock, flags);
5687		list_for_each_entry(network, &priv->ieee->network_list, list) {
5688			if (network != priv->assoc_network)
5689				ipw_find_adhoc_network(priv, &match, network,
5690						       1);
5691		}
5692		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5693
5694		if (match.network == priv->assoc_network) {
5695			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5696					"merge to.\n");
5697			return;
5698		}
5699
5700		mutex_lock(&priv->mutex);
5701		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5702			IPW_DEBUG_MERGE("remove network %s\n",
5703					print_ssid(ssid, priv->essid,
5704						   priv->essid_len));
5705			ipw_remove_current_network(priv);
5706		}
5707
5708		ipw_disassociate(priv);
5709		priv->assoc_network = match.network;
5710		mutex_unlock(&priv->mutex);
5711		return;
5712	}
5713}
5714
5715static int ipw_best_network(struct ipw_priv *priv,
5716			    struct ipw_network_match *match,
5717			    struct libipw_network *network, int roaming)
5718{
5719	struct ipw_supported_rates rates;
5720	DECLARE_SSID_BUF(ssid);
5721
5722	/* Verify that this network's capability is compatible with the
5723	 * current mode (AdHoc or Infrastructure) */
5724	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5725	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5726	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5727	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5728		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5729				"capability mismatch.\n",
5730				print_ssid(ssid, network->ssid,
5731					   network->ssid_len),
5732				network->bssid);
5733		return 0;
5734	}
5735
5736	if (unlikely(roaming)) {
5737		/* If we are roaming, then ensure check if this is a valid
5738		 * network to try and roam to */
5739		if ((network->ssid_len != match->network->ssid_len) ||
5740		    memcmp(network->ssid, match->network->ssid,
5741			   network->ssid_len)) {
5742			IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5743					"because of non-network ESSID.\n",
5744					print_ssid(ssid, network->ssid,
5745						   network->ssid_len),
5746					network->bssid);
5747			return 0;
5748		}
5749	} else {
5750		/* If an ESSID has been configured then compare the broadcast
5751		 * ESSID to ours */
5752		if ((priv->config & CFG_STATIC_ESSID) &&
5753		    ((network->ssid_len != priv->essid_len) ||
5754		     memcmp(network->ssid, priv->essid,
5755			    min(network->ssid_len, priv->essid_len)))) {
5756			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5757			strncpy(escaped,
5758				print_ssid(ssid, network->ssid,
5759					   network->ssid_len),
5760				sizeof(escaped));
5761			IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5762					"because of ESSID mismatch: '%s'.\n",
5763					escaped, network->bssid,
5764					print_ssid(ssid, priv->essid,
5765						   priv->essid_len));
5766			return 0;
5767		}
5768	}
5769
5770	/* If the old network rate is better than this one, don't bother
5771	 * testing everything else. */
5772	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5773		char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5774		strncpy(escaped,
5775			print_ssid(ssid, network->ssid, network->ssid_len),
5776			sizeof(escaped));
5777		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5778				"'%s (%pM)' has a stronger signal.\n",
5779				escaped, network->bssid,
5780				print_ssid(ssid, match->network->ssid,
5781					   match->network->ssid_len),
5782				match->network->bssid);
5783		return 0;
5784	}
5785
5786	/* If this network has already had an association attempt within the
5787	 * last 3 seconds, do not try and associate again... */
5788	if (network->last_associate &&
5789	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5790		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5791				"because of storming (%ums since last "
5792				"assoc attempt).\n",
5793				print_ssid(ssid, network->ssid,
5794					   network->ssid_len),
5795				network->bssid,
5796				jiffies_to_msecs(jiffies -
5797						 network->last_associate));
5798		return 0;
5799	}
5800
5801	/* Now go through and see if the requested network is valid... */
5802	if (priv->ieee->scan_age != 0 &&
5803	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5804		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5805				"because of age: %ums.\n",
5806				print_ssid(ssid, network->ssid,
5807					   network->ssid_len),
5808				network->bssid,
5809				jiffies_to_msecs(jiffies -
5810						 network->last_scanned));
5811		return 0;
5812	}
5813
5814	if ((priv->config & CFG_STATIC_CHANNEL) &&
5815	    (network->channel != priv->channel)) {
5816		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5817				"because of channel mismatch: %d != %d.\n",
5818				print_ssid(ssid, network->ssid,
5819					   network->ssid_len),
5820				network->bssid,
5821				network->channel, priv->channel);
5822		return 0;
5823	}
5824
5825	/* Verify privacy compatability */
5826	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5827	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5828		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5829				"because of privacy mismatch: %s != %s.\n",
5830				print_ssid(ssid, network->ssid,
5831					   network->ssid_len),
5832				network->bssid,
5833				priv->capability & CAP_PRIVACY_ON ? "on" :
5834				"off",
5835				network->capability &
5836				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5837		return 0;
5838	}
5839
5840	if ((priv->config & CFG_STATIC_BSSID) &&
5841	    memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5842		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5843				"because of BSSID mismatch: %pM.\n",
5844				print_ssid(ssid, network->ssid,
5845					   network->ssid_len),
5846				network->bssid, priv->bssid);
5847		return 0;
5848	}
5849
5850	/* Filter out any incompatible freq / mode combinations */
5851	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5852		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5853				"because of invalid frequency/mode "
5854				"combination.\n",
5855				print_ssid(ssid, network->ssid,
5856					   network->ssid_len),
5857				network->bssid);
5858		return 0;
5859	}
5860
5861	/* Filter out invalid channel in current GEO */
5862	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5863		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5864				"because of invalid channel in current GEO\n",
5865				print_ssid(ssid, network->ssid,
5866					   network->ssid_len),
5867				network->bssid);
5868		return 0;
5869	}
5870
5871	/* Ensure that the rates supported by the driver are compatible with
5872	 * this AP, including verification of basic rates (mandatory) */
5873	if (!ipw_compatible_rates(priv, network, &rates)) {
5874		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5875				"because configured rate mask excludes "
5876				"AP mandatory rate.\n",
5877				print_ssid(ssid, network->ssid,
5878					   network->ssid_len),
5879				network->bssid);
5880		return 0;
5881	}
5882
5883	if (rates.num_rates == 0) {
5884		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5885				"because of no compatible rates.\n",
5886				print_ssid(ssid, network->ssid,
5887					   network->ssid_len),
5888				network->bssid);
5889		return 0;
5890	}
5891
5892	/* TODO: Perform any further minimal comparititive tests.  We do not
5893	 * want to put too much policy logic here; intelligent scan selection
5894	 * should occur within a generic IEEE 802.11 user space tool.  */
5895
5896	/* Set up 'new' AP to this network */
5897	ipw_copy_rates(&match->rates, &rates);
5898	match->network = network;
5899
5900	IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5901			print_ssid(ssid, network->ssid, network->ssid_len),
5902			network->bssid);
5903
5904	return 1;
5905}
5906
5907static void ipw_adhoc_create(struct ipw_priv *priv,
5908			     struct libipw_network *network)
5909{
5910	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5911	int i;
5912
5913	/*
5914	 * For the purposes of scanning, we can set our wireless mode
5915	 * to trigger scans across combinations of bands, but when it
5916	 * comes to creating a new ad-hoc network, we have tell the FW
5917	 * exactly which band to use.
5918	 *
5919	 * We also have the possibility of an invalid channel for the
5920	 * chossen band.  Attempting to create a new ad-hoc network
5921	 * with an invalid channel for wireless mode will trigger a
5922	 * FW fatal error.
5923	 *
5924	 */
5925	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5926	case LIBIPW_52GHZ_BAND:
5927		network->mode = IEEE_A;
5928		i = libipw_channel_to_index(priv->ieee, priv->channel);
5929		BUG_ON(i == -1);
5930		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5931			IPW_WARNING("Overriding invalid channel\n");
5932			priv->channel = geo->a[0].channel;
5933		}
5934		break;
5935
5936	case LIBIPW_24GHZ_BAND:
5937		if (priv->ieee->mode & IEEE_G)
5938			network->mode = IEEE_G;
5939		else
5940			network->mode = IEEE_B;
5941		i = libipw_channel_to_index(priv->ieee, priv->channel);
5942		BUG_ON(i == -1);
5943		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5944			IPW_WARNING("Overriding invalid channel\n");
5945			priv->channel = geo->bg[0].channel;
5946		}
5947		break;
5948
5949	default:
5950		IPW_WARNING("Overriding invalid channel\n");
5951		if (priv->ieee->mode & IEEE_A) {
5952			network->mode = IEEE_A;
5953			priv->channel = geo->a[0].channel;
5954		} else if (priv->ieee->mode & IEEE_G) {
5955			network->mode = IEEE_G;
5956			priv->channel = geo->bg[0].channel;
5957		} else {
5958			network->mode = IEEE_B;
5959			priv->channel = geo->bg[0].channel;
5960		}
5961		break;
5962	}
5963
5964	network->channel = priv->channel;
5965	priv->config |= CFG_ADHOC_PERSIST;
5966	ipw_create_bssid(priv, network->bssid);
5967	network->ssid_len = priv->essid_len;
5968	memcpy(network->ssid, priv->essid, priv->essid_len);
5969	memset(&network->stats, 0, sizeof(network->stats));
5970	network->capability = WLAN_CAPABILITY_IBSS;
5971	if (!(priv->config & CFG_PREAMBLE_LONG))
5972		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5973	if (priv->capability & CAP_PRIVACY_ON)
5974		network->capability |= WLAN_CAPABILITY_PRIVACY;
5975	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5976	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5977	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5978	memcpy(network->rates_ex,
5979	       &priv->rates.supported_rates[network->rates_len],
5980	       network->rates_ex_len);
5981	network->last_scanned = 0;
5982	network->flags = 0;
5983	network->last_associate = 0;
5984	network->time_stamp[0] = 0;
5985	network->time_stamp[1] = 0;
5986	network->beacon_interval = 100;	/* Default */
5987	network->listen_interval = 10;	/* Default */
5988	network->atim_window = 0;	/* Default */
5989	network->wpa_ie_len = 0;
5990	network->rsn_ie_len = 0;
5991}
5992
5993static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5994{
5995	struct ipw_tgi_tx_key key;
5996
5997	if (!(priv->ieee->sec.flags & (1 << index)))
5998		return;
5999
6000	key.key_id = index;
6001	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
6002	key.security_type = type;
6003	key.station_index = 0;	/* always 0 for BSS */
6004	key.flags = 0;
6005	/* 0 for new key; previous value of counter (after fatal error) */
6006	key.tx_counter[0] = cpu_to_le32(0);
6007	key.tx_counter[1] = cpu_to_le32(0);
6008
6009	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
6010}
6011
6012static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
6013{
6014	struct ipw_wep_key key;
6015	int i;
6016
6017	key.cmd_id = DINO_CMD_WEP_KEY;
6018	key.seq_num = 0;
6019
6020	/* Note: AES keys cannot be set for multiple times.
6021	 * Only set it at the first time. */
6022	for (i = 0; i < 4; i++) {
6023		key.key_index = i | type;
6024		if (!(priv->ieee->sec.flags & (1 << i))) {
6025			key.key_size = 0;
6026			continue;
6027		}
6028
6029		key.key_size = priv->ieee->sec.key_sizes[i];
6030		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
6031
6032		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
6033	}
6034}
6035
6036static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
6037{
6038	if (priv->ieee->host_encrypt)
6039		return;
6040
6041	switch (level) {
6042	case SEC_LEVEL_3:
6043		priv->sys_config.disable_unicast_decryption = 0;
6044		priv->ieee->host_decrypt = 0;
6045		break;
6046	case SEC_LEVEL_2:
6047		priv->sys_config.disable_unicast_decryption = 1;
6048		priv->ieee->host_decrypt = 1;
6049		break;
6050	case SEC_LEVEL_1:
6051		priv->sys_config.disable_unicast_decryption = 0;
6052		priv->ieee->host_decrypt = 0;
6053		break;
6054	case SEC_LEVEL_0:
6055		priv->sys_config.disable_unicast_decryption = 1;
6056		break;
6057	default:
6058		break;
6059	}
6060}
6061
6062static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
6063{
6064	if (priv->ieee->host_encrypt)
6065		return;
6066
6067	switch (level) {
6068	case SEC_LEVEL_3:
6069		priv->sys_config.disable_multicast_decryption = 0;
6070		break;
6071	case SEC_LEVEL_2:
6072		priv->sys_config.disable_multicast_decryption = 1;
6073		break;
6074	case SEC_LEVEL_1:
6075		priv->sys_config.disable_multicast_decryption = 0;
6076		break;
6077	case SEC_LEVEL_0:
6078		priv->sys_config.disable_multicast_decryption = 1;
6079		break;
6080	default:
6081		break;
6082	}
6083}
6084
6085static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6086{
6087	switch (priv->ieee->sec.level) {
6088	case SEC_LEVEL_3:
6089		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6090			ipw_send_tgi_tx_key(priv,
6091					    DCT_FLAG_EXT_SECURITY_CCM,
6092					    priv->ieee->sec.active_key);
6093
6094		if (!priv->ieee->host_mc_decrypt)
6095			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6096		break;
6097	case SEC_LEVEL_2:
6098		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6099			ipw_send_tgi_tx_key(priv,
6100					    DCT_FLAG_EXT_SECURITY_TKIP,
6101					    priv->ieee->sec.active_key);
6102		break;
6103	case SEC_LEVEL_1:
6104		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6105		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6106		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6107		break;
6108	case SEC_LEVEL_0:
6109	default:
6110		break;
6111	}
6112}
6113
6114static void ipw_adhoc_check(void *data)
6115{
6116	struct ipw_priv *priv = data;
6117
6118	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6119	    !(priv->config & CFG_ADHOC_PERSIST)) {
6120		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6121			  IPW_DL_STATE | IPW_DL_ASSOC,
6122			  "Missed beacon: %d - disassociate\n",
6123			  priv->missed_adhoc_beacons);
6124		ipw_remove_current_network(priv);
6125		ipw_disassociate(priv);
6126		return;
6127	}
6128
6129	queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6130			   le16_to_cpu(priv->assoc_request.beacon_interval));
6131}
6132
6133static void ipw_bg_adhoc_check(struct work_struct *work)
6134{
6135	struct ipw_priv *priv =
6136		container_of(work, struct ipw_priv, adhoc_check.work);
6137	mutex_lock(&priv->mutex);
6138	ipw_adhoc_check(priv);
6139	mutex_unlock(&priv->mutex);
6140}
6141
6142static void ipw_debug_config(struct ipw_priv *priv)
6143{
6144	DECLARE_SSID_BUF(ssid);
6145	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6146		       "[CFG 0x%08X]\n", priv->config);
6147	if (priv->config & CFG_STATIC_CHANNEL)
6148		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6149	else
6150		IPW_DEBUG_INFO("Channel unlocked.\n");
6151	if (priv->config & CFG_STATIC_ESSID)
6152		IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6153			       print_ssid(ssid, priv->essid, priv->essid_len));
6154	else
6155		IPW_DEBUG_INFO("ESSID unlocked.\n");
6156	if (priv->config & CFG_STATIC_BSSID)
6157		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6158	else
6159		IPW_DEBUG_INFO("BSSID unlocked.\n");
6160	if (priv->capability & CAP_PRIVACY_ON)
6161		IPW_DEBUG_INFO("PRIVACY on\n");
6162	else
6163		IPW_DEBUG_INFO("PRIVACY off\n");
6164	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6165}
6166
6167static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6168{
6169	/* TODO: Verify that this works... */
6170	struct ipw_fixed_rate fr;
6171	u32 reg;
6172	u16 mask = 0;
6173	u16 new_tx_rates = priv->rates_mask;
6174
6175	/* Identify 'current FW band' and match it with the fixed
6176	 * Tx rates */
6177
6178	switch (priv->ieee->freq_band) {
6179	case LIBIPW_52GHZ_BAND:	/* A only */
6180		/* IEEE_A */
6181		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6182			/* Invalid fixed rate mask */
6183			IPW_DEBUG_WX
6184			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6185			new_tx_rates = 0;
6186			break;
6187		}
6188
6189		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6190		break;
6191
6192	default:		/* 2.4Ghz or Mixed */
6193		/* IEEE_B */
6194		if (mode == IEEE_B) {
6195			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6196				/* Invalid fixed rate mask */
6197				IPW_DEBUG_WX
6198				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6199				new_tx_rates = 0;
6200			}
6201			break;
6202		}
6203
6204		/* IEEE_G */
6205		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6206				    LIBIPW_OFDM_RATES_MASK)) {
6207			/* Invalid fixed rate mask */
6208			IPW_DEBUG_WX
6209			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6210			new_tx_rates = 0;
6211			break;
6212		}
6213
6214		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6215			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6216			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6217		}
6218
6219		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6220			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6221			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6222		}
6223
6224		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6225			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6226			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6227		}
6228
6229		new_tx_rates |= mask;
6230		break;
6231	}
6232
6233	fr.tx_rates = cpu_to_le16(new_tx_rates);
6234
6235	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6236	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6237}
6238
6239static void ipw_abort_scan(struct ipw_priv *priv)
6240{
6241	int err;
6242
6243	if (priv->status & STATUS_SCAN_ABORTING) {
6244		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6245		return;
6246	}
6247	priv->status |= STATUS_SCAN_ABORTING;
6248
6249	err = ipw_send_scan_abort(priv);
6250	if (err)
6251		IPW_DEBUG_HC("Request to abort scan failed.\n");
6252}
6253
6254static void ipw_add_scan_channels(struct ipw_priv *priv,
6255				  struct ipw_scan_request_ext *scan,
6256				  int scan_type)
6257{
6258	int channel_index = 0;
6259	const struct libipw_geo *geo;
6260	int i;
6261
6262	geo = libipw_get_geo(priv->ieee);
6263
6264	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6265		int start = channel_index;
6266		for (i = 0; i < geo->a_channels; i++) {
6267			if ((priv->status & STATUS_ASSOCIATED) &&
6268			    geo->a[i].channel == priv->channel)
6269				continue;
6270			channel_index++;
6271			scan->channels_list[channel_index] = geo->a[i].channel;
6272			ipw_set_scan_type(scan, channel_index,
6273					  geo->a[i].
6274					  flags & LIBIPW_CH_PASSIVE_ONLY ?
6275					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6276					  scan_type);
6277		}
6278
6279		if (start != channel_index) {
6280			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6281			    (channel_index - start);
6282			channel_index++;
6283		}
6284	}
6285
6286	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6287		int start = channel_index;
6288		if (priv->config & CFG_SPEED_SCAN) {
6289			int index;
6290			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6291				/* nop out the list */
6292				[0] = 0
6293			};
6294
6295			u8 channel;
6296			while (channel_index < IPW_SCAN_CHANNELS - 1) {
6297				channel =
6298				    priv->speed_scan[priv->speed_scan_pos];
6299				if (channel == 0) {
6300					priv->speed_scan_pos = 0;
6301					channel = priv->speed_scan[0];
6302				}
6303				if ((priv->status & STATUS_ASSOCIATED) &&
6304				    channel == priv->channel) {
6305					priv->speed_scan_pos++;
6306					continue;
6307				}
6308
6309				/* If this channel has already been
6310				 * added in scan, break from loop
6311				 * and this will be the first channel
6312				 * in the next scan.
6313				 */
6314				if (channels[channel - 1] != 0)
6315					break;
6316
6317				channels[channel - 1] = 1;
6318				priv->speed_scan_pos++;
6319				channel_index++;
6320				scan->channels_list[channel_index] = channel;
6321				index =
6322				    libipw_channel_to_index(priv->ieee, channel);
6323				ipw_set_scan_type(scan, channel_index,
6324						  geo->bg[index].
6325						  flags &
6326						  LIBIPW_CH_PASSIVE_ONLY ?
6327						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6328						  : scan_type);
6329			}
6330		} else {
6331			for (i = 0; i < geo->bg_channels; i++) {
6332				if ((priv->status & STATUS_ASSOCIATED) &&
6333				    geo->bg[i].channel == priv->channel)
6334					continue;
6335				channel_index++;
6336				scan->channels_list[channel_index] =
6337				    geo->bg[i].channel;
6338				ipw_set_scan_type(scan, channel_index,
6339						  geo->bg[i].
6340						  flags &
6341						  LIBIPW_CH_PASSIVE_ONLY ?
6342						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6343						  : scan_type);
6344			}
6345		}
6346
6347		if (start != channel_index) {
6348			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6349			    (channel_index - start);
6350		}
6351	}
6352}
6353
6354static int ipw_passive_dwell_time(struct ipw_priv *priv)
6355{
6356	/* staying on passive channels longer than the DTIM interval during a
6357	 * scan, while associated, causes the firmware to cancel the scan
6358	 * without notification. Hence, don't stay on passive channels longer
6359	 * than the beacon interval.
6360	 */
6361	if (priv->status & STATUS_ASSOCIATED
6362	    && priv->assoc_network->beacon_interval > 10)
6363		return priv->assoc_network->beacon_interval - 10;
6364	else
6365		return 120;
6366}
6367
6368static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6369{
6370	struct ipw_scan_request_ext scan;
6371	int err = 0, scan_type;
6372
6373	if (!(priv->status & STATUS_INIT) ||
6374	    (priv->status & STATUS_EXIT_PENDING))
6375		return 0;
6376
6377	mutex_lock(&priv->mutex);
6378
6379	if (direct && (priv->direct_scan_ssid_len == 0)) {
6380		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6381		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6382		goto done;
6383	}
6384
6385	if (priv->status & STATUS_SCANNING) {
6386		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6387		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6388					STATUS_SCAN_PENDING;
6389		goto done;
6390	}
6391
6392	if (!(priv->status & STATUS_SCAN_FORCED) &&
6393	    priv->status & STATUS_SCAN_ABORTING) {
6394		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6395		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6396					STATUS_SCAN_PENDING;
6397		goto done;
6398	}
6399
6400	if (priv->status & STATUS_RF_KILL_MASK) {
6401		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6402		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6403					STATUS_SCAN_PENDING;
6404		goto done;
6405	}
6406
6407	memset(&scan, 0, sizeof(scan));
6408	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6409
6410	if (type == IW_SCAN_TYPE_PASSIVE) {
6411		IPW_DEBUG_WX("use passive scanning\n");
6412		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6413		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6414			cpu_to_le16(ipw_passive_dwell_time(priv));
6415		ipw_add_scan_channels(priv, &scan, scan_type);
6416		goto send_request;
6417	}
6418
6419	/* Use active scan by default. */
6420	if (priv->config & CFG_SPEED_SCAN)
6421		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6422			cpu_to_le16(30);
6423	else
6424		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6425			cpu_to_le16(20);
6426
6427	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6428		cpu_to_le16(20);
6429
6430	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6431		cpu_to_le16(ipw_passive_dwell_time(priv));
6432	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6433
6434#ifdef CONFIG_IPW2200_MONITOR
6435	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6436		u8 channel;
6437		u8 band = 0;
6438
6439		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6440		case LIBIPW_52GHZ_BAND:
6441			band = (u8) (IPW_A_MODE << 6) | 1;
6442			channel = priv->channel;
6443			break;
6444
6445		case LIBIPW_24GHZ_BAND:
6446			band = (u8) (IPW_B_MODE << 6) | 1;
6447			channel = priv->channel;
6448			break;
6449
6450		default:
6451			band = (u8) (IPW_B_MODE << 6) | 1;
6452			channel = 9;
6453			break;
6454		}
6455
6456		scan.channels_list[0] = band;
6457		scan.channels_list[1] = channel;
6458		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6459
6460		/* NOTE:  The card will sit on this channel for this time
6461		 * period.  Scan aborts are timing sensitive and frequently
6462		 * result in firmware restarts.  As such, it is best to
6463		 * set a small dwell_time here and just keep re-issuing
6464		 * scans.  Otherwise fast channel hopping will not actually
6465		 * hop channels.
6466		 *
6467		 * TODO: Move SPEED SCAN support to all modes and bands */
6468		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6469			cpu_to_le16(2000);
6470	} else {
6471#endif				/* CONFIG_IPW2200_MONITOR */
6472		/* Honor direct scans first, otherwise if we are roaming make
6473		 * this a direct scan for the current network.  Finally,
6474		 * ensure that every other scan is a fast channel hop scan */
6475		if (direct) {
6476			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6477			                    priv->direct_scan_ssid_len);
6478			if (err) {
6479				IPW_DEBUG_HC("Attempt to send SSID command  "
6480					     "failed\n");
6481				goto done;
6482			}
6483
6484			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6485		} else if ((priv->status & STATUS_ROAMING)
6486			   || (!(priv->status & STATUS_ASSOCIATED)
6487			       && (priv->config & CFG_STATIC_ESSID)
6488			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6489			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6490			if (err) {
6491				IPW_DEBUG_HC("Attempt to send SSID command "
6492					     "failed.\n");
6493				goto done;
6494			}
6495
6496			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6497		} else
6498			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6499
6500		ipw_add_scan_channels(priv, &scan, scan_type);
6501#ifdef CONFIG_IPW2200_MONITOR
6502	}
6503#endif
6504
6505send_request:
6506	err = ipw_send_scan_request_ext(priv, &scan);
6507	if (err) {
6508		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6509		goto done;
6510	}
6511
6512	priv->status |= STATUS_SCANNING;
6513	if (direct) {
6514		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6515		priv->direct_scan_ssid_len = 0;
6516	} else
6517		priv->status &= ~STATUS_SCAN_PENDING;
6518
6519	queue_delayed_work(priv->workqueue, &priv->scan_check,
6520			   IPW_SCAN_CHECK_WATCHDOG);
6521done:
6522	mutex_unlock(&priv->mutex);
6523	return err;
6524}
6525
6526static void ipw_request_passive_scan(struct work_struct *work)
6527{
6528	struct ipw_priv *priv =
6529		container_of(work, struct ipw_priv, request_passive_scan.work);
6530	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6531}
6532
6533static void ipw_request_scan(struct work_struct *work)
6534{
6535	struct ipw_priv *priv =
6536		container_of(work, struct ipw_priv, request_scan.work);
6537	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6538}
6539
6540static void ipw_request_direct_scan(struct work_struct *work)
6541{
6542	struct ipw_priv *priv =
6543		container_of(work, struct ipw_priv, request_direct_scan.work);
6544	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6545}
6546
6547static void ipw_bg_abort_scan(struct work_struct *work)
6548{
6549	struct ipw_priv *priv =
6550		container_of(work, struct ipw_priv, abort_scan);
6551	mutex_lock(&priv->mutex);
6552	ipw_abort_scan(priv);
6553	mutex_unlock(&priv->mutex);
6554}
6555
6556static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6557{
6558	/* This is called when wpa_supplicant loads and closes the driver
6559	 * interface. */
6560	priv->ieee->wpa_enabled = value;
6561	return 0;
6562}
6563
6564static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6565{
6566	struct libipw_device *ieee = priv->ieee;
6567	struct libipw_security sec = {
6568		.flags = SEC_AUTH_MODE,
6569	};
6570	int ret = 0;
6571
6572	if (value & IW_AUTH_ALG_SHARED_KEY) {
6573		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6574		ieee->open_wep = 0;
6575	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6576		sec.auth_mode = WLAN_AUTH_OPEN;
6577		ieee->open_wep = 1;
6578	} else if (value & IW_AUTH_ALG_LEAP) {
6579		sec.auth_mode = WLAN_AUTH_LEAP;
6580		ieee->open_wep = 1;
6581	} else
6582		return -EINVAL;
6583
6584	if (ieee->set_security)
6585		ieee->set_security(ieee->dev, &sec);
6586	else
6587		ret = -EOPNOTSUPP;
6588
6589	return ret;
6590}
6591
6592static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6593				int wpa_ie_len)
6594{
6595	/* make sure WPA is enabled */
6596	ipw_wpa_enable(priv, 1);
6597}
6598
6599static int ipw_set_rsn_capa(struct ipw_priv *priv,
6600			    char *capabilities, int length)
6601{
6602	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6603
6604	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6605				capabilities);
6606}
6607
6608/*
6609 * WE-18 support
6610 */
6611
6612/* SIOCSIWGENIE */
6613static int ipw_wx_set_genie(struct net_device *dev,
6614			    struct iw_request_info *info,
6615			    union iwreq_data *wrqu, char *extra)
6616{
6617	struct ipw_priv *priv = libipw_priv(dev);
6618	struct libipw_device *ieee = priv->ieee;
6619	u8 *buf;
6620	int err = 0;
6621
6622	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6623	    (wrqu->data.length && extra == NULL))
6624		return -EINVAL;
6625
6626	if (wrqu->data.length) {
6627		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6628		if (buf == NULL) {
6629			err = -ENOMEM;
6630			goto out;
6631		}
6632
6633		kfree(ieee->wpa_ie);
6634		ieee->wpa_ie = buf;
6635		ieee->wpa_ie_len = wrqu->data.length;
6636	} else {
6637		kfree(ieee->wpa_ie);
6638		ieee->wpa_ie = NULL;
6639		ieee->wpa_ie_len = 0;
6640	}
6641
6642	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6643      out:
6644	return err;
6645}
6646
6647/* SIOCGIWGENIE */
6648static int ipw_wx_get_genie(struct net_device *dev,
6649			    struct iw_request_info *info,
6650			    union iwreq_data *wrqu, char *extra)
6651{
6652	struct ipw_priv *priv = libipw_priv(dev);
6653	struct libipw_device *ieee = priv->ieee;
6654	int err = 0;
6655
6656	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6657		wrqu->data.length = 0;
6658		goto out;
6659	}
6660
6661	if (wrqu->data.length < ieee->wpa_ie_len) {
6662		err = -E2BIG;
6663		goto out;
6664	}
6665
6666	wrqu->data.length = ieee->wpa_ie_len;
6667	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6668
6669      out:
6670	return err;
6671}
6672
6673static int wext_cipher2level(int cipher)
6674{
6675	switch (cipher) {
6676	case IW_AUTH_CIPHER_NONE:
6677		return SEC_LEVEL_0;
6678	case IW_AUTH_CIPHER_WEP40:
6679	case IW_AUTH_CIPHER_WEP104:
6680		return SEC_LEVEL_1;
6681	case IW_AUTH_CIPHER_TKIP:
6682		return SEC_LEVEL_2;
6683	case IW_AUTH_CIPHER_CCMP:
6684		return SEC_LEVEL_3;
6685	default:
6686		return -1;
6687	}
6688}
6689
6690/* SIOCSIWAUTH */
6691static int ipw_wx_set_auth(struct net_device *dev,
6692			   struct iw_request_info *info,
6693			   union iwreq_data *wrqu, char *extra)
6694{
6695	struct ipw_priv *priv = libipw_priv(dev);
6696	struct libipw_device *ieee = priv->ieee;
6697	struct iw_param *param = &wrqu->param;
6698	struct lib80211_crypt_data *crypt;
6699	unsigned long flags;
6700	int ret = 0;
6701
6702	switch (param->flags & IW_AUTH_INDEX) {
6703	case IW_AUTH_WPA_VERSION:
6704		break;
6705	case IW_AUTH_CIPHER_PAIRWISE:
6706		ipw_set_hw_decrypt_unicast(priv,
6707					   wext_cipher2level(param->value));
6708		break;
6709	case IW_AUTH_CIPHER_GROUP:
6710		ipw_set_hw_decrypt_multicast(priv,
6711					     wext_cipher2level(param->value));
6712		break;
6713	case IW_AUTH_KEY_MGMT:
6714		/*
6715		 * ipw2200 does not use these parameters
6716		 */
6717		break;
6718
6719	case IW_AUTH_TKIP_COUNTERMEASURES:
6720		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6721		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6722			break;
6723
6724		flags = crypt->ops->get_flags(crypt->priv);
6725
6726		if (param->value)
6727			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6728		else
6729			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6730
6731		crypt->ops->set_flags(flags, crypt->priv);
6732
6733		break;
6734
6735	case IW_AUTH_DROP_UNENCRYPTED:{
6736			/* HACK:
6737			 *
6738			 * wpa_supplicant calls set_wpa_enabled when the driver
6739			 * is loaded and unloaded, regardless of if WPA is being
6740			 * used.  No other calls are made which can be used to
6741			 * determine if encryption will be used or not prior to
6742			 * association being expected.  If encryption is not being
6743			 * used, drop_unencrypted is set to false, else true -- we
6744			 * can use this to determine if the CAP_PRIVACY_ON bit should
6745			 * be set.
6746			 */
6747			struct libipw_security sec = {
6748				.flags = SEC_ENABLED,
6749				.enabled = param->value,
6750			};
6751			priv->ieee->drop_unencrypted = param->value;
6752			/* We only change SEC_LEVEL for open mode. Others
6753			 * are set by ipw_wpa_set_encryption.
6754			 */
6755			if (!param->value) {
6756				sec.flags |= SEC_LEVEL;
6757				sec.level = SEC_LEVEL_0;
6758			} else {
6759				sec.flags |= SEC_LEVEL;
6760				sec.level = SEC_LEVEL_1;
6761			}
6762			if (priv->ieee->set_security)
6763				priv->ieee->set_security(priv->ieee->dev, &sec);
6764			break;
6765		}
6766
6767	case IW_AUTH_80211_AUTH_ALG:
6768		ret = ipw_wpa_set_auth_algs(priv, param->value);
6769		break;
6770
6771	case IW_AUTH_WPA_ENABLED:
6772		ret = ipw_wpa_enable(priv, param->value);
6773		ipw_disassociate(priv);
6774		break;
6775
6776	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6777		ieee->ieee802_1x = param->value;
6778		break;
6779
6780	case IW_AUTH_PRIVACY_INVOKED:
6781		ieee->privacy_invoked = param->value;
6782		break;
6783
6784	default:
6785		return -EOPNOTSUPP;
6786	}
6787	return ret;
6788}
6789
6790/* SIOCGIWAUTH */
6791static int ipw_wx_get_auth(struct net_device *dev,
6792			   struct iw_request_info *info,
6793			   union iwreq_data *wrqu, char *extra)
6794{
6795	struct ipw_priv *priv = libipw_priv(dev);
6796	struct libipw_device *ieee = priv->ieee;
6797	struct lib80211_crypt_data *crypt;
6798	struct iw_param *param = &wrqu->param;
6799	int ret = 0;
6800
6801	switch (param->flags & IW_AUTH_INDEX) {
6802	case IW_AUTH_WPA_VERSION:
6803	case IW_AUTH_CIPHER_PAIRWISE:
6804	case IW_AUTH_CIPHER_GROUP:
6805	case IW_AUTH_KEY_MGMT:
6806		/*
6807		 * wpa_supplicant will control these internally
6808		 */
6809		ret = -EOPNOTSUPP;
6810		break;
6811
6812	case IW_AUTH_TKIP_COUNTERMEASURES:
6813		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6814		if (!crypt || !crypt->ops->get_flags)
6815			break;
6816
6817		param->value = (crypt->ops->get_flags(crypt->priv) &
6818				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6819
6820		break;
6821
6822	case IW_AUTH_DROP_UNENCRYPTED:
6823		param->value = ieee->drop_unencrypted;
6824		break;
6825
6826	case IW_AUTH_80211_AUTH_ALG:
6827		param->value = ieee->sec.auth_mode;
6828		break;
6829
6830	case IW_AUTH_WPA_ENABLED:
6831		param->value = ieee->wpa_enabled;
6832		break;
6833
6834	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6835		param->value = ieee->ieee802_1x;
6836		break;
6837
6838	case IW_AUTH_ROAMING_CONTROL:
6839	case IW_AUTH_PRIVACY_INVOKED:
6840		param->value = ieee->privacy_invoked;
6841		break;
6842
6843	default:
6844		return -EOPNOTSUPP;
6845	}
6846	return 0;
6847}
6848
6849/* SIOCSIWENCODEEXT */
6850static int ipw_wx_set_encodeext(struct net_device *dev,
6851				struct iw_request_info *info,
6852				union iwreq_data *wrqu, char *extra)
6853{
6854	struct ipw_priv *priv = libipw_priv(dev);
6855	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6856
6857	if (hwcrypto) {
6858		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6859			/* IPW HW can't build TKIP MIC,
6860			   host decryption still needed */
6861			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6862				priv->ieee->host_mc_decrypt = 1;
6863			else {
6864				priv->ieee->host_encrypt = 0;
6865				priv->ieee->host_encrypt_msdu = 1;
6866				priv->ieee->host_decrypt = 1;
6867			}
6868		} else {
6869			priv->ieee->host_encrypt = 0;
6870			priv->ieee->host_encrypt_msdu = 0;
6871			priv->ieee->host_decrypt = 0;
6872			priv->ieee->host_mc_decrypt = 0;
6873		}
6874	}
6875
6876	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6877}
6878
6879/* SIOCGIWENCODEEXT */
6880static int ipw_wx_get_encodeext(struct net_device *dev,
6881				struct iw_request_info *info,
6882				union iwreq_data *wrqu, char *extra)
6883{
6884	struct ipw_priv *priv = libipw_priv(dev);
6885	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6886}
6887
6888/* SIOCSIWMLME */
6889static int ipw_wx_set_mlme(struct net_device *dev,
6890			   struct iw_request_info *info,
6891			   union iwreq_data *wrqu, char *extra)
6892{
6893	struct ipw_priv *priv = libipw_priv(dev);
6894	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6895	__le16 reason;
6896
6897	reason = cpu_to_le16(mlme->reason_code);
6898
6899	switch (mlme->cmd) {
6900	case IW_MLME_DEAUTH:
6901		/* silently ignore */
6902		break;
6903
6904	case IW_MLME_DISASSOC:
6905		ipw_disassociate(priv);
6906		break;
6907
6908	default:
6909		return -EOPNOTSUPP;
6910	}
6911	return 0;
6912}
6913
6914#ifdef CONFIG_IPW2200_QOS
6915
6916/* QoS */
6917/*
6918* get the modulation type of the current network or
6919* the card current mode
6920*/
6921static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6922{
6923	u8 mode = 0;
6924
6925	if (priv->status & STATUS_ASSOCIATED) {
6926		unsigned long flags;
6927
6928		spin_lock_irqsave(&priv->ieee->lock, flags);
6929		mode = priv->assoc_network->mode;
6930		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6931	} else {
6932		mode = priv->ieee->mode;
6933	}
6934	IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6935	return mode;
6936}
6937
6938/*
6939* Handle management frame beacon and probe response
6940*/
6941static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6942					 int active_network,
6943					 struct libipw_network *network)
6944{
6945	u32 size = sizeof(struct libipw_qos_parameters);
6946
6947	if (network->capability & WLAN_CAPABILITY_IBSS)
6948		network->qos_data.active = network->qos_data.supported;
6949
6950	if (network->flags & NETWORK_HAS_QOS_MASK) {
6951		if (active_network &&
6952		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6953			network->qos_data.active = network->qos_data.supported;
6954
6955		if ((network->qos_data.active == 1) && (active_network == 1) &&
6956		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6957		    (network->qos_data.old_param_count !=
6958		     network->qos_data.param_count)) {
6959			network->qos_data.old_param_count =
6960			    network->qos_data.param_count;
6961			schedule_work(&priv->qos_activate);
6962			IPW_DEBUG_QOS("QoS parameters change call "
6963				      "qos_activate\n");
6964		}
6965	} else {
6966		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6967			memcpy(&network->qos_data.parameters,
6968			       &def_parameters_CCK, size);
6969		else
6970			memcpy(&network->qos_data.parameters,
6971			       &def_parameters_OFDM, size);
6972
6973		if ((network->qos_data.active == 1) && (active_network == 1)) {
6974			IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6975			schedule_work(&priv->qos_activate);
6976		}
6977
6978		network->qos_data.active = 0;
6979		network->qos_data.supported = 0;
6980	}
6981	if ((priv->status & STATUS_ASSOCIATED) &&
6982	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6983		if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6984			if (network->capability & WLAN_CAPABILITY_IBSS)
6985				if ((network->ssid_len ==
6986				     priv->assoc_network->ssid_len) &&
6987				    !memcmp(network->ssid,
6988					    priv->assoc_network->ssid,
6989					    network->ssid_len)) {
6990					queue_work(priv->workqueue,
6991						   &priv->merge_networks);
6992				}
6993	}
6994
6995	return 0;
6996}
6997
6998/*
6999* This function set up the firmware to support QoS. It sends
7000* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
7001*/
7002static int ipw_qos_activate(struct ipw_priv *priv,
7003			    struct libipw_qos_data *qos_network_data)
7004{
7005	int err;
7006	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
7007	struct libipw_qos_parameters *active_one = NULL;
7008	u32 size = sizeof(struct libipw_qos_parameters);
7009	u32 burst_duration;
7010	int i;
7011	u8 type;
7012
7013	type = ipw_qos_current_mode(priv);
7014
7015	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
7016	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
7017	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
7018	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
7019
7020	if (qos_network_data == NULL) {
7021		if (type == IEEE_B) {
7022			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
7023			active_one = &def_parameters_CCK;
7024		} else
7025			active_one = &def_parameters_OFDM;
7026
7027		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7028		burst_duration = ipw_qos_get_burst_duration(priv);
7029		for (i = 0; i < QOS_QUEUE_NUM; i++)
7030			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
7031			    cpu_to_le16(burst_duration);
7032	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7033		if (type == IEEE_B) {
7034			IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
7035				      type);
7036			if (priv->qos_data.qos_enable == 0)
7037				active_one = &def_parameters_CCK;
7038			else
7039				active_one = priv->qos_data.def_qos_parm_CCK;
7040		} else {
7041			if (priv->qos_data.qos_enable == 0)
7042				active_one = &def_parameters_OFDM;
7043			else
7044				active_one = priv->qos_data.def_qos_parm_OFDM;
7045		}
7046		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7047	} else {
7048		unsigned long flags;
7049		int active;
7050
7051		spin_lock_irqsave(&priv->ieee->lock, flags);
7052		active_one = &(qos_network_data->parameters);
7053		qos_network_data->old_param_count =
7054		    qos_network_data->param_count;
7055		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
7056		active = qos_network_data->supported;
7057		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7058
7059		if (active == 0) {
7060			burst_duration = ipw_qos_get_burst_duration(priv);
7061			for (i = 0; i < QOS_QUEUE_NUM; i++)
7062				qos_parameters[QOS_PARAM_SET_ACTIVE].
7063				    tx_op_limit[i] = cpu_to_le16(burst_duration);
7064		}
7065	}
7066
7067	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
7068	err = ipw_send_qos_params_command(priv,
7069					  (struct libipw_qos_parameters *)
7070					  &(qos_parameters[0]));
7071	if (err)
7072		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
7073
7074	return err;
7075}
7076
7077/*
7078* send IPW_CMD_WME_INFO to the firmware
7079*/
7080static int ipw_qos_set_info_element(struct ipw_priv *priv)
7081{
7082	int ret = 0;
7083	struct libipw_qos_information_element qos_info;
7084
7085	if (priv == NULL)
7086		return -1;
7087
7088	qos_info.elementID = QOS_ELEMENT_ID;
7089	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7090
7091	qos_info.version = QOS_VERSION_1;
7092	qos_info.ac_info = 0;
7093
7094	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7095	qos_info.qui_type = QOS_OUI_TYPE;
7096	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7097
7098	ret = ipw_send_qos_info_command(priv, &qos_info);
7099	if (ret != 0) {
7100		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7101	}
7102	return ret;
7103}
7104
7105/*
7106* Set the QoS parameter with the association request structure
7107*/
7108static int ipw_qos_association(struct ipw_priv *priv,
7109			       struct libipw_network *network)
7110{
7111	int err = 0;
7112	struct libipw_qos_data *qos_data = NULL;
7113	struct libipw_qos_data ibss_data = {
7114		.supported = 1,
7115		.active = 1,
7116	};
7117
7118	switch (priv->ieee->iw_mode) {
7119	case IW_MODE_ADHOC:
7120		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7121
7122		qos_data = &ibss_data;
7123		break;
7124
7125	case IW_MODE_INFRA:
7126		qos_data = &network->qos_data;
7127		break;
7128
7129	default:
7130		BUG();
7131		break;
7132	}
7133
7134	err = ipw_qos_activate(priv, qos_data);
7135	if (err) {
7136		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7137		return err;
7138	}
7139
7140	if (priv->qos_data.qos_enable && qos_data->supported) {
7141		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7142		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7143		return ipw_qos_set_info_element(priv);
7144	}
7145
7146	return 0;
7147}
7148
7149/*
7150* handling the beaconing responses. if we get different QoS setting
7151* off the network from the associated setting, adjust the QoS
7152* setting
7153*/
7154static int ipw_qos_association_resp(struct ipw_priv *priv,
7155				    struct libipw_network *network)
7156{
7157	int ret = 0;
7158	unsigned long flags;
7159	u32 size = sizeof(struct libipw_qos_parameters);
7160	int set_qos_param = 0;
7161
7162	if ((priv == NULL) || (network == NULL) ||
7163	    (priv->assoc_network == NULL))
7164		return ret;
7165
7166	if (!(priv->status & STATUS_ASSOCIATED))
7167		return ret;
7168
7169	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7170		return ret;
7171
7172	spin_lock_irqsave(&priv->ieee->lock, flags);
7173	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7174		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7175		       sizeof(struct libipw_qos_data));
7176		priv->assoc_network->qos_data.active = 1;
7177		if ((network->qos_data.old_param_count !=
7178		     network->qos_data.param_count)) {
7179			set_qos_param = 1;
7180			network->qos_data.old_param_count =
7181			    network->qos_data.param_count;
7182		}
7183
7184	} else {
7185		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7186			memcpy(&priv->assoc_network->qos_data.parameters,
7187			       &def_parameters_CCK, size);
7188		else
7189			memcpy(&priv->assoc_network->qos_data.parameters,
7190			       &def_parameters_OFDM, size);
7191		priv->assoc_network->qos_data.active = 0;
7192		priv->assoc_network->qos_data.supported = 0;
7193		set_qos_param = 1;
7194	}
7195
7196	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7197
7198	if (set_qos_param == 1)
7199		schedule_work(&priv->qos_activate);
7200
7201	return ret;
7202}
7203
7204static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7205{
7206	u32 ret = 0;
7207
7208	if ((priv == NULL))
7209		return 0;
7210
7211	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7212		ret = priv->qos_data.burst_duration_CCK;
7213	else
7214		ret = priv->qos_data.burst_duration_OFDM;
7215
7216	return ret;
7217}
7218
7219/*
7220* Initialize the setting of QoS global
7221*/
7222static void ipw_qos_init(struct ipw_priv *priv, int enable,
7223			 int burst_enable, u32 burst_duration_CCK,
7224			 u32 burst_duration_OFDM)
7225{
7226	priv->qos_data.qos_enable = enable;
7227
7228	if (priv->qos_data.qos_enable) {
7229		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7230		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7231		IPW_DEBUG_QOS("QoS is enabled\n");
7232	} else {
7233		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7234		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7235		IPW_DEBUG_QOS("QoS is not enabled\n");
7236	}
7237
7238	priv->qos_data.burst_enable = burst_enable;
7239
7240	if (burst_enable) {
7241		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7242		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7243	} else {
7244		priv->qos_data.burst_duration_CCK = 0;
7245		priv->qos_data.burst_duration_OFDM = 0;
7246	}
7247}
7248
7249/*
7250* map the packet priority to the right TX Queue
7251*/
7252static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7253{
7254	if (priority > 7 || !priv->qos_data.qos_enable)
7255		priority = 0;
7256
7257	return from_priority_to_tx_queue[priority] - 1;
7258}
7259
7260static int ipw_is_qos_active(struct net_device *dev,
7261			     struct sk_buff *skb)
7262{
7263	struct ipw_priv *priv = libipw_priv(dev);
7264	struct libipw_qos_data *qos_data = NULL;
7265	int active, supported;
7266	u8 *daddr = skb->data + ETH_ALEN;
7267	int unicast = !is_multicast_ether_addr(daddr);
7268
7269	if (!(priv->status & STATUS_ASSOCIATED))
7270		return 0;
7271
7272	qos_data = &priv->assoc_network->qos_data;
7273
7274	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7275		if (unicast == 0)
7276			qos_data->active = 0;
7277		else
7278			qos_data->active = qos_data->supported;
7279	}
7280	active = qos_data->active;
7281	supported = qos_data->supported;
7282	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7283		      "unicast %d\n",
7284		      priv->qos_data.qos_enable, active, supported, unicast);
7285	if (active && priv->qos_data.qos_enable)
7286		return 1;
7287
7288	return 0;
7289
7290}
7291/*
7292* add QoS parameter to the TX command
7293*/
7294static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7295					u16 priority,
7296					struct tfd_data *tfd)
7297{
7298	int tx_queue_id = 0;
7299
7300
7301	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7302	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7303
7304	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7305		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7306		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7307	}
7308	return 0;
7309}
7310
7311/*
7312* background support to run QoS activate functionality
7313*/
7314static void ipw_bg_qos_activate(struct work_struct *work)
7315{
7316	struct ipw_priv *priv =
7317		container_of(work, struct ipw_priv, qos_activate);
7318
7319	mutex_lock(&priv->mutex);
7320
7321	if (priv->status & STATUS_ASSOCIATED)
7322		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7323
7324	mutex_unlock(&priv->mutex);
7325}
7326
7327static int ipw_handle_probe_response(struct net_device *dev,
7328				     struct libipw_probe_response *resp,
7329				     struct libipw_network *network)
7330{
7331	struct ipw_priv *priv = libipw_priv(dev);
7332	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7333			      (network == priv->assoc_network));
7334
7335	ipw_qos_handle_probe_response(priv, active_network, network);
7336
7337	return 0;
7338}
7339
7340static int ipw_handle_beacon(struct net_device *dev,
7341			     struct libipw_beacon *resp,
7342			     struct libipw_network *network)
7343{
7344	struct ipw_priv *priv = libipw_priv(dev);
7345	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7346			      (network == priv->assoc_network));
7347
7348	ipw_qos_handle_probe_response(priv, active_network, network);
7349
7350	return 0;
7351}
7352
7353static int ipw_handle_assoc_response(struct net_device *dev,
7354				     struct libipw_assoc_response *resp,
7355				     struct libipw_network *network)
7356{
7357	struct ipw_priv *priv = libipw_priv(dev);
7358	ipw_qos_association_resp(priv, network);
7359	return 0;
7360}
7361
7362static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7363				       *qos_param)
7364{
7365	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7366				sizeof(*qos_param) * 3, qos_param);
7367}
7368
7369static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7370				     *qos_param)
7371{
7372	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7373				qos_param);
7374}
7375
7376#endif				/* CONFIG_IPW2200_QOS */
7377
7378static int ipw_associate_network(struct ipw_priv *priv,
7379				 struct libipw_network *network,
7380				 struct ipw_supported_rates *rates, int roaming)
7381{
7382	int err;
7383	DECLARE_SSID_BUF(ssid);
7384
7385	if (priv->config & CFG_FIXED_RATE)
7386		ipw_set_fixed_rate(priv, network->mode);
7387
7388	if (!(priv->config & CFG_STATIC_ESSID)) {
7389		priv->essid_len = min(network->ssid_len,
7390				      (u8) IW_ESSID_MAX_SIZE);
7391		memcpy(priv->essid, network->ssid, priv->essid_len);
7392	}
7393
7394	network->last_associate = jiffies;
7395
7396	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7397	priv->assoc_request.channel = network->channel;
7398	priv->assoc_request.auth_key = 0;
7399
7400	if ((priv->capability & CAP_PRIVACY_ON) &&
7401	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7402		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7403		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7404
7405		if (priv->ieee->sec.level == SEC_LEVEL_1)
7406			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7407
7408	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7409		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7410		priv->assoc_request.auth_type = AUTH_LEAP;
7411	else
7412		priv->assoc_request.auth_type = AUTH_OPEN;
7413
7414	if (priv->ieee->wpa_ie_len) {
7415		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7416		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7417				 priv->ieee->wpa_ie_len);
7418	}
7419
7420	/*
7421	 * It is valid for our ieee device to support multiple modes, but
7422	 * when it comes to associating to a given network we have to choose
7423	 * just one mode.
7424	 */
7425	if (network->mode & priv->ieee->mode & IEEE_A)
7426		priv->assoc_request.ieee_mode = IPW_A_MODE;
7427	else if (network->mode & priv->ieee->mode & IEEE_G)
7428		priv->assoc_request.ieee_mode = IPW_G_MODE;
7429	else if (network->mode & priv->ieee->mode & IEEE_B)
7430		priv->assoc_request.ieee_mode = IPW_B_MODE;
7431
7432	priv->assoc_request.capability = cpu_to_le16(network->capability);
7433	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7434	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7435		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7436	} else {
7437		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7438
7439		/* Clear the short preamble if we won't be supporting it */
7440		priv->assoc_request.capability &=
7441		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7442	}
7443
7444	/* Clear capability bits that aren't used in Ad Hoc */
7445	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7446		priv->assoc_request.capability &=
7447		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7448
7449	IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7450			"802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7451			roaming ? "Rea" : "A",
7452			print_ssid(ssid, priv->essid, priv->essid_len),
7453			network->channel,
7454			ipw_modes[priv->assoc_request.ieee_mode],
7455			rates->num_rates,
7456			(priv->assoc_request.preamble_length ==
7457			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7458			network->capability &
7459			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7460			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7461			priv->capability & CAP_PRIVACY_ON ?
7462			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7463			 "(open)") : "",
7464			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7465			priv->capability & CAP_PRIVACY_ON ?
7466			'1' + priv->ieee->sec.active_key : '.',
7467			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7468
7469	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7470	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7471	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7472		priv->assoc_request.assoc_type = HC_IBSS_START;
7473		priv->assoc_request.assoc_tsf_msw = 0;
7474		priv->assoc_request.assoc_tsf_lsw = 0;
7475	} else {
7476		if (unlikely(roaming))
7477			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7478		else
7479			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7480		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7481		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7482	}
7483
7484	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7485
7486	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7487		memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7488		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7489	} else {
7490		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7491		priv->assoc_request.atim_window = 0;
7492	}
7493
7494	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7495
7496	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7497	if (err) {
7498		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7499		return err;
7500	}
7501
7502	rates->ieee_mode = priv->assoc_request.ieee_mode;
7503	rates->purpose = IPW_RATE_CONNECT;
7504	ipw_send_supported_rates(priv, rates);
7505
7506	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7507		priv->sys_config.dot11g_auto_detection = 1;
7508	else
7509		priv->sys_config.dot11g_auto_detection = 0;
7510
7511	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7512		priv->sys_config.answer_broadcast_ssid_probe = 1;
7513	else
7514		priv->sys_config.answer_broadcast_ssid_probe = 0;
7515
7516	err = ipw_send_system_config(priv);
7517	if (err) {
7518		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7519		return err;
7520	}
7521
7522	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7523	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7524	if (err) {
7525		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7526		return err;
7527	}
7528
7529	/*
7530	 * If preemption is enabled, it is possible for the association
7531	 * to complete before we return from ipw_send_associate.  Therefore
7532	 * we have to be sure and update our priviate data first.
7533	 */
7534	priv->channel = network->channel;
7535	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7536	priv->status |= STATUS_ASSOCIATING;
7537	priv->status &= ~STATUS_SECURITY_UPDATED;
7538
7539	priv->assoc_network = network;
7540
7541#ifdef CONFIG_IPW2200_QOS
7542	ipw_qos_association(priv, network);
7543#endif
7544
7545	err = ipw_send_associate(priv, &priv->assoc_request);
7546	if (err) {
7547		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7548		return err;
7549	}
7550
7551	IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7552		  print_ssid(ssid, priv->essid, priv->essid_len),
7553		  priv->bssid);
7554
7555	return 0;
7556}
7557
7558static void ipw_roam(void *data)
7559{
7560	struct ipw_priv *priv = data;
7561	struct libipw_network *network = NULL;
7562	struct ipw_network_match match = {
7563		.network = priv->assoc_network
7564	};
7565
7566	/* The roaming process is as follows:
7567	 *
7568	 * 1.  Missed beacon threshold triggers the roaming process by
7569	 *     setting the status ROAM bit and requesting a scan.
7570	 * 2.  When the scan completes, it schedules the ROAM work
7571	 * 3.  The ROAM work looks at all of the known networks for one that
7572	 *     is a better network than the currently associated.  If none
7573	 *     found, the ROAM process is over (ROAM bit cleared)
7574	 * 4.  If a better network is found, a disassociation request is
7575	 *     sent.
7576	 * 5.  When the disassociation completes, the roam work is again
7577	 *     scheduled.  The second time through, the driver is no longer
7578	 *     associated, and the newly selected network is sent an
7579	 *     association request.
7580	 * 6.  At this point ,the roaming process is complete and the ROAM
7581	 *     status bit is cleared.
7582	 */
7583
7584	/* If we are no longer associated, and the roaming bit is no longer
7585	 * set, then we are not actively roaming, so just return */
7586	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7587		return;
7588
7589	if (priv->status & STATUS_ASSOCIATED) {
7590		/* First pass through ROAM process -- look for a better
7591		 * network */
7592		unsigned long flags;
7593		u8 rssi = priv->assoc_network->stats.rssi;
7594		priv->assoc_network->stats.rssi = -128;
7595		spin_lock_irqsave(&priv->ieee->lock, flags);
7596		list_for_each_entry(network, &priv->ieee->network_list, list) {
7597			if (network != priv->assoc_network)
7598				ipw_best_network(priv, &match, network, 1);
7599		}
7600		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7601		priv->assoc_network->stats.rssi = rssi;
7602
7603		if (match.network == priv->assoc_network) {
7604			IPW_DEBUG_ASSOC("No better APs in this network to "
7605					"roam to.\n");
7606			priv->status &= ~STATUS_ROAMING;
7607			ipw_debug_config(priv);
7608			return;
7609		}
7610
7611		ipw_send_disassociate(priv, 1);
7612		priv->assoc_network = match.network;
7613
7614		return;
7615	}
7616
7617	/* Second pass through ROAM process -- request association */
7618	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7619	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7620	priv->status &= ~STATUS_ROAMING;
7621}
7622
7623static void ipw_bg_roam(struct work_struct *work)
7624{
7625	struct ipw_priv *priv =
7626		container_of(work, struct ipw_priv, roam);
7627	mutex_lock(&priv->mutex);
7628	ipw_roam(priv);
7629	mutex_unlock(&priv->mutex);
7630}
7631
7632static int ipw_associate(void *data)
7633{
7634	struct ipw_priv *priv = data;
7635
7636	struct libipw_network *network = NULL;
7637	struct ipw_network_match match = {
7638		.network = NULL
7639	};
7640	struct ipw_supported_rates *rates;
7641	struct list_head *element;
7642	unsigned long flags;
7643	DECLARE_SSID_BUF(ssid);
7644
7645	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7646		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7647		return 0;
7648	}
7649
7650	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7651		IPW_DEBUG_ASSOC("Not attempting association (already in "
7652				"progress)\n");
7653		return 0;
7654	}
7655
7656	if (priv->status & STATUS_DISASSOCIATING) {
7657		IPW_DEBUG_ASSOC("Not attempting association (in "
7658				"disassociating)\n ");
7659		queue_work(priv->workqueue, &priv->associate);
7660		return 0;
7661	}
7662
7663	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7664		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7665				"initialized)\n");
7666		return 0;
7667	}
7668
7669	if (!(priv->config & CFG_ASSOCIATE) &&
7670	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7671		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7672		return 0;
7673	}
7674
7675	/* Protect our use of the network_list */
7676	spin_lock_irqsave(&priv->ieee->lock, flags);
7677	list_for_each_entry(network, &priv->ieee->network_list, list)
7678	    ipw_best_network(priv, &match, network, 0);
7679
7680	network = match.network;
7681	rates = &match.rates;
7682
7683	if (network == NULL &&
7684	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7685	    priv->config & CFG_ADHOC_CREATE &&
7686	    priv->config & CFG_STATIC_ESSID &&
7687	    priv->config & CFG_STATIC_CHANNEL) {
7688		/* Use oldest network if the free list is empty */
7689		if (list_empty(&priv->ieee->network_free_list)) {
7690			struct libipw_network *oldest = NULL;
7691			struct libipw_network *target;
7692
7693			list_for_each_entry(target, &priv->ieee->network_list, list) {
7694				if ((oldest == NULL) ||
7695				    (target->last_scanned < oldest->last_scanned))
7696					oldest = target;
7697			}
7698
7699			/* If there are no more slots, expire the oldest */
7700			list_del(&oldest->list);
7701			target = oldest;
7702			IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7703					"network list.\n",
7704					print_ssid(ssid, target->ssid,
7705						   target->ssid_len),
7706					target->bssid);
7707			list_add_tail(&target->list,
7708				      &priv->ieee->network_free_list);
7709		}
7710
7711		element = priv->ieee->network_free_list.next;
7712		network = list_entry(element, struct libipw_network, list);
7713		ipw_adhoc_create(priv, network);
7714		rates = &priv->rates;
7715		list_del(element);
7716		list_add_tail(&network->list, &priv->ieee->network_list);
7717	}
7718	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7719
7720	/* If we reached the end of the list, then we don't have any valid
7721	 * matching APs */
7722	if (!network) {
7723		ipw_debug_config(priv);
7724
7725		if (!(priv->status & STATUS_SCANNING)) {
7726			if (!(priv->config & CFG_SPEED_SCAN))
7727				queue_delayed_work(priv->workqueue,
7728						   &priv->request_scan,
7729						   SCAN_INTERVAL);
7730			else
7731				queue_delayed_work(priv->workqueue,
7732						   &priv->request_scan, 0);
7733		}
7734
7735		return 0;
7736	}
7737
7738	ipw_associate_network(priv, network, rates, 0);
7739
7740	return 1;
7741}
7742
7743static void ipw_bg_associate(struct work_struct *work)
7744{
7745	struct ipw_priv *priv =
7746		container_of(work, struct ipw_priv, associate);
7747	mutex_lock(&priv->mutex);
7748	ipw_associate(priv);
7749	mutex_unlock(&priv->mutex);
7750}
7751
7752static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7753				      struct sk_buff *skb)
7754{
7755	struct ieee80211_hdr *hdr;
7756	u16 fc;
7757
7758	hdr = (struct ieee80211_hdr *)skb->data;
7759	fc = le16_to_cpu(hdr->frame_control);
7760	if (!(fc & IEEE80211_FCTL_PROTECTED))
7761		return;
7762
7763	fc &= ~IEEE80211_FCTL_PROTECTED;
7764	hdr->frame_control = cpu_to_le16(fc);
7765	switch (priv->ieee->sec.level) {
7766	case SEC_LEVEL_3:
7767		/* Remove CCMP HDR */
7768		memmove(skb->data + LIBIPW_3ADDR_LEN,
7769			skb->data + LIBIPW_3ADDR_LEN + 8,
7770			skb->len - LIBIPW_3ADDR_LEN - 8);
7771		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7772		break;
7773	case SEC_LEVEL_2:
7774		break;
7775	case SEC_LEVEL_1:
7776		/* Remove IV */
7777		memmove(skb->data + LIBIPW_3ADDR_LEN,
7778			skb->data + LIBIPW_3ADDR_LEN + 4,
7779			skb->len - LIBIPW_3ADDR_LEN - 4);
7780		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7781		break;
7782	case SEC_LEVEL_0:
7783		break;
7784	default:
7785		printk(KERN_ERR "Unknown security level %d\n",
7786		       priv->ieee->sec.level);
7787		break;
7788	}
7789}
7790
7791static void ipw_handle_data_packet(struct ipw_priv *priv,
7792				   struct ipw_rx_mem_buffer *rxb,
7793				   struct libipw_rx_stats *stats)
7794{
7795	struct net_device *dev = priv->net_dev;
7796	struct libipw_hdr_4addr *hdr;
7797	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7798
7799	/* We received data from the HW, so stop the watchdog */
7800	dev->trans_start = jiffies;
7801
7802	/* We only process data packets if the
7803	 * interface is open */
7804	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7805		     skb_tailroom(rxb->skb))) {
7806		dev->stats.rx_errors++;
7807		priv->wstats.discard.misc++;
7808		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7809		return;
7810	} else if (unlikely(!netif_running(priv->net_dev))) {
7811		dev->stats.rx_dropped++;
7812		priv->wstats.discard.misc++;
7813		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7814		return;
7815	}
7816
7817	/* Advance skb->data to the start of the actual payload */
7818	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7819
7820	/* Set the size of the skb to the size of the frame */
7821	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7822
7823	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7824
7825	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7826	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7827	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7828	    (is_multicast_ether_addr(hdr->addr1) ?
7829	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7830		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7831
7832	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7833		dev->stats.rx_errors++;
7834	else {			/* libipw_rx succeeded, so it now owns the SKB */
7835		rxb->skb = NULL;
7836		__ipw_led_activity_on(priv);
7837	}
7838}
7839
7840#ifdef CONFIG_IPW2200_RADIOTAP
7841static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7842					   struct ipw_rx_mem_buffer *rxb,
7843					   struct libipw_rx_stats *stats)
7844{
7845	struct net_device *dev = priv->net_dev;
7846	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7847	struct ipw_rx_frame *frame = &pkt->u.frame;
7848
7849	/* initial pull of some data */
7850	u16 received_channel = frame->received_channel;
7851	u8 antennaAndPhy = frame->antennaAndPhy;
7852	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7853	u16 pktrate = frame->rate;
7854
7855	/* Magic struct that slots into the radiotap header -- no reason
7856	 * to build this manually element by element, we can write it much
7857	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7858	struct ipw_rt_hdr *ipw_rt;
7859
7860	short len = le16_to_cpu(pkt->u.frame.length);
7861
7862	/* We received data from the HW, so stop the watchdog */
7863	dev->trans_start = jiffies;
7864
7865	/* We only process data packets if the
7866	 * interface is open */
7867	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7868		     skb_tailroom(rxb->skb))) {
7869		dev->stats.rx_errors++;
7870		priv->wstats.discard.misc++;
7871		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7872		return;
7873	} else if (unlikely(!netif_running(priv->net_dev))) {
7874		dev->stats.rx_dropped++;
7875		priv->wstats.discard.misc++;
7876		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7877		return;
7878	}
7879
7880	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7881	 * that now */
7882	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7883		/* FIXME: Should alloc bigger skb instead */
7884		dev->stats.rx_dropped++;
7885		priv->wstats.discard.misc++;
7886		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7887		return;
7888	}
7889
7890	/* copy the frame itself */
7891	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7892		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7893
7894	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7895
7896	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7897	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7898	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7899
7900	/* Big bitfield of all the fields we provide in radiotap */
7901	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7902	     (1 << IEEE80211_RADIOTAP_TSFT) |
7903	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7904	     (1 << IEEE80211_RADIOTAP_RATE) |
7905	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7906	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7907	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7908	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7909
7910	/* Zero the flags, we'll add to them as we go */
7911	ipw_rt->rt_flags = 0;
7912	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7913			       frame->parent_tsf[2] << 16 |
7914			       frame->parent_tsf[1] << 8  |
7915			       frame->parent_tsf[0]);
7916
7917	/* Convert signal to DBM */
7918	ipw_rt->rt_dbmsignal = antsignal;
7919	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7920
7921	/* Convert the channel data and set the flags */
7922	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7923	if (received_channel > 14) {	/* 802.11a */
7924		ipw_rt->rt_chbitmask =
7925		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7926	} else if (antennaAndPhy & 32) {	/* 802.11b */
7927		ipw_rt->rt_chbitmask =
7928		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7929	} else {		/* 802.11g */
7930		ipw_rt->rt_chbitmask =
7931		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7932	}
7933
7934	/* set the rate in multiples of 500k/s */
7935	switch (pktrate) {
7936	case IPW_TX_RATE_1MB:
7937		ipw_rt->rt_rate = 2;
7938		break;
7939	case IPW_TX_RATE_2MB:
7940		ipw_rt->rt_rate = 4;
7941		break;
7942	case IPW_TX_RATE_5MB:
7943		ipw_rt->rt_rate = 10;
7944		break;
7945	case IPW_TX_RATE_6MB:
7946		ipw_rt->rt_rate = 12;
7947		break;
7948	case IPW_TX_RATE_9MB:
7949		ipw_rt->rt_rate = 18;
7950		break;
7951	case IPW_TX_RATE_11MB:
7952		ipw_rt->rt_rate = 22;
7953		break;
7954	case IPW_TX_RATE_12MB:
7955		ipw_rt->rt_rate = 24;
7956		break;
7957	case IPW_TX_RATE_18MB:
7958		ipw_rt->rt_rate = 36;
7959		break;
7960	case IPW_TX_RATE_24MB:
7961		ipw_rt->rt_rate = 48;
7962		break;
7963	case IPW_TX_RATE_36MB:
7964		ipw_rt->rt_rate = 72;
7965		break;
7966	case IPW_TX_RATE_48MB:
7967		ipw_rt->rt_rate = 96;
7968		break;
7969	case IPW_TX_RATE_54MB:
7970		ipw_rt->rt_rate = 108;
7971		break;
7972	default:
7973		ipw_rt->rt_rate = 0;
7974		break;
7975	}
7976
7977	/* antenna number */
7978	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7979
7980	/* set the preamble flag if we have it */
7981	if ((antennaAndPhy & 64))
7982		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7983
7984	/* Set the size of the skb to the size of the frame */
7985	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7986
7987	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7988
7989	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7990		dev->stats.rx_errors++;
7991	else {			/* libipw_rx succeeded, so it now owns the SKB */
7992		rxb->skb = NULL;
7993		/* no LED during capture */
7994	}
7995}
7996#endif
7997
7998#ifdef CONFIG_IPW2200_PROMISCUOUS
7999#define libipw_is_probe_response(fc) \
8000   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
8001    (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
8002
8003#define libipw_is_management(fc) \
8004   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
8005
8006#define libipw_is_control(fc) \
8007   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
8008
8009#define libipw_is_data(fc) \
8010   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
8011
8012#define libipw_is_assoc_request(fc) \
8013   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
8014
8015#define libipw_is_reassoc_request(fc) \
8016   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
8017
8018static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8019				      struct ipw_rx_mem_buffer *rxb,
8020				      struct libipw_rx_stats *stats)
8021{
8022	struct net_device *dev = priv->prom_net_dev;
8023	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
8024	struct ipw_rx_frame *frame = &pkt->u.frame;
8025	struct ipw_rt_hdr *ipw_rt;
8026
8027	/* First cache any information we need before we overwrite
8028	 * the information provided in the skb from the hardware */
8029	struct ieee80211_hdr *hdr;
8030	u16 channel = frame->received_channel;
8031	u8 phy_flags = frame->antennaAndPhy;
8032	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
8033	s8 noise = (s8) le16_to_cpu(frame->noise);
8034	u8 rate = frame->rate;
8035	short len = le16_to_cpu(pkt->u.frame.length);
8036	struct sk_buff *skb;
8037	int hdr_only = 0;
8038	u16 filter = priv->prom_priv->filter;
8039
8040	/* If the filter is set to not include Rx frames then return */
8041	if (filter & IPW_PROM_NO_RX)
8042		return;
8043
8044	/* We received data from the HW, so stop the watchdog */
8045	dev->trans_start = jiffies;
8046
8047	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
8048		dev->stats.rx_errors++;
8049		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
8050		return;
8051	}
8052
8053	/* We only process data packets if the interface is open */
8054	if (unlikely(!netif_running(dev))) {
8055		dev->stats.rx_dropped++;
8056		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
8057		return;
8058	}
8059
8060	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
8061	 * that now */
8062	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
8063		/* FIXME: Should alloc bigger skb instead */
8064		dev->stats.rx_dropped++;
8065		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
8066		return;
8067	}
8068
8069	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
8070	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
8071		if (filter & IPW_PROM_NO_MGMT)
8072			return;
8073		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
8074			hdr_only = 1;
8075	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
8076		if (filter & IPW_PROM_NO_CTL)
8077			return;
8078		if (filter & IPW_PROM_CTL_HEADER_ONLY)
8079			hdr_only = 1;
8080	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
8081		if (filter & IPW_PROM_NO_DATA)
8082			return;
8083		if (filter & IPW_PROM_DATA_HEADER_ONLY)
8084			hdr_only = 1;
8085	}
8086
8087	/* Copy the SKB since this is for the promiscuous side */
8088	skb = skb_copy(rxb->skb, GFP_ATOMIC);
8089	if (skb == NULL) {
8090		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8091		return;
8092	}
8093
8094	/* copy the frame data to write after where the radiotap header goes */
8095	ipw_rt = (void *)skb->data;
8096
8097	if (hdr_only)
8098		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8099
8100	memcpy(ipw_rt->payload, hdr, len);
8101
8102	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8103	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
8104	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
8105
8106	/* Set the size of the skb to the size of the frame */
8107	skb_put(skb, sizeof(*ipw_rt) + len);
8108
8109	/* Big bitfield of all the fields we provide in radiotap */
8110	ipw_rt->rt_hdr.it_present = cpu_to_le32(
8111	     (1 << IEEE80211_RADIOTAP_TSFT) |
8112	     (1 << IEEE80211_RADIOTAP_FLAGS) |
8113	     (1 << IEEE80211_RADIOTAP_RATE) |
8114	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
8115	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8116	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8117	     (1 << IEEE80211_RADIOTAP_ANTENNA));
8118
8119	/* Zero the flags, we'll add to them as we go */
8120	ipw_rt->rt_flags = 0;
8121	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8122			       frame->parent_tsf[2] << 16 |
8123			       frame->parent_tsf[1] << 8  |
8124			       frame->parent_tsf[0]);
8125
8126	/* Convert to DBM */
8127	ipw_rt->rt_dbmsignal = signal;
8128	ipw_rt->rt_dbmnoise = noise;
8129
8130	/* Convert the channel data and set the flags */
8131	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8132	if (channel > 14) {	/* 802.11a */
8133		ipw_rt->rt_chbitmask =
8134		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8135	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
8136		ipw_rt->rt_chbitmask =
8137		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8138	} else {		/* 802.11g */
8139		ipw_rt->rt_chbitmask =
8140		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8141	}
8142
8143	/* set the rate in multiples of 500k/s */
8144	switch (rate) {
8145	case IPW_TX_RATE_1MB:
8146		ipw_rt->rt_rate = 2;
8147		break;
8148	case IPW_TX_RATE_2MB:
8149		ipw_rt->rt_rate = 4;
8150		break;
8151	case IPW_TX_RATE_5MB:
8152		ipw_rt->rt_rate = 10;
8153		break;
8154	case IPW_TX_RATE_6MB:
8155		ipw_rt->rt_rate = 12;
8156		break;
8157	case IPW_TX_RATE_9MB:
8158		ipw_rt->rt_rate = 18;
8159		break;
8160	case IPW_TX_RATE_11MB:
8161		ipw_rt->rt_rate = 22;
8162		break;
8163	case IPW_TX_RATE_12MB:
8164		ipw_rt->rt_rate = 24;
8165		break;
8166	case IPW_TX_RATE_18MB:
8167		ipw_rt->rt_rate = 36;
8168		break;
8169	case IPW_TX_RATE_24MB:
8170		ipw_rt->rt_rate = 48;
8171		break;
8172	case IPW_TX_RATE_36MB:
8173		ipw_rt->rt_rate = 72;
8174		break;
8175	case IPW_TX_RATE_48MB:
8176		ipw_rt->rt_rate = 96;
8177		break;
8178	case IPW_TX_RATE_54MB:
8179		ipw_rt->rt_rate = 108;
8180		break;
8181	default:
8182		ipw_rt->rt_rate = 0;
8183		break;
8184	}
8185
8186	/* antenna number */
8187	ipw_rt->rt_antenna = (phy_flags & 3);
8188
8189	/* set the preamble flag if we have it */
8190	if (phy_flags & (1 << 6))
8191		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8192
8193	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8194
8195	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8196		dev->stats.rx_errors++;
8197		dev_kfree_skb_any(skb);
8198	}
8199}
8200#endif
8201
8202static int is_network_packet(struct ipw_priv *priv,
8203				    struct libipw_hdr_4addr *header)
8204{
8205	/* Filter incoming packets to determine if they are targetted toward
8206	 * this network, discarding packets coming from ourselves */
8207	switch (priv->ieee->iw_mode) {
8208	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8209		/* packets from our adapter are dropped (echo) */
8210		if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8211			return 0;
8212
8213		/* {broad,multi}cast packets to our BSSID go through */
8214		if (is_multicast_ether_addr(header->addr1))
8215			return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8216
8217		/* packets to our adapter go through */
8218		return !memcmp(header->addr1, priv->net_dev->dev_addr,
8219			       ETH_ALEN);
8220
8221	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8222		/* packets from our adapter are dropped (echo) */
8223		if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8224			return 0;
8225
8226		/* {broad,multi}cast packets to our BSS go through */
8227		if (is_multicast_ether_addr(header->addr1))
8228			return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8229
8230		/* packets to our adapter go through */
8231		return !memcmp(header->addr1, priv->net_dev->dev_addr,
8232			       ETH_ALEN);
8233	}
8234
8235	return 1;
8236}
8237
8238#define IPW_PACKET_RETRY_TIME HZ
8239
8240static  int is_duplicate_packet(struct ipw_priv *priv,
8241				      struct libipw_hdr_4addr *header)
8242{
8243	u16 sc = le16_to_cpu(header->seq_ctl);
8244	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8245	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8246	u16 *last_seq, *last_frag;
8247	unsigned long *last_time;
8248
8249	switch (priv->ieee->iw_mode) {
8250	case IW_MODE_ADHOC:
8251		{
8252			struct list_head *p;
8253			struct ipw_ibss_seq *entry = NULL;
8254			u8 *mac = header->addr2;
8255			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8256
8257			__list_for_each(p, &priv->ibss_mac_hash[index]) {
8258				entry =
8259				    list_entry(p, struct ipw_ibss_seq, list);
8260				if (!memcmp(entry->mac, mac, ETH_ALEN))
8261					break;
8262			}
8263			if (p == &priv->ibss_mac_hash[index]) {
8264				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8265				if (!entry) {
8266					IPW_ERROR
8267					    ("Cannot malloc new mac entry\n");
8268					return 0;
8269				}
8270				memcpy(entry->mac, mac, ETH_ALEN);
8271				entry->seq_num = seq;
8272				entry->frag_num = frag;
8273				entry->packet_time = jiffies;
8274				list_add(&entry->list,
8275					 &priv->ibss_mac_hash[index]);
8276				return 0;
8277			}
8278			last_seq = &entry->seq_num;
8279			last_frag = &entry->frag_num;
8280			last_time = &entry->packet_time;
8281			break;
8282		}
8283	case IW_MODE_INFRA:
8284		last_seq = &priv->last_seq_num;
8285		last_frag = &priv->last_frag_num;
8286		last_time = &priv->last_packet_time;
8287		break;
8288	default:
8289		return 0;
8290	}
8291	if ((*last_seq == seq) &&
8292	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8293		if (*last_frag == frag)
8294			goto drop;
8295		if (*last_frag + 1 != frag)
8296			/* out-of-order fragment */
8297			goto drop;
8298	} else
8299		*last_seq = seq;
8300
8301	*last_frag = frag;
8302	*last_time = jiffies;
8303	return 0;
8304
8305      drop:
8306	/* Comment this line now since we observed the card receives
8307	 * duplicate packets but the FCTL_RETRY bit is not set in the
8308	 * IBSS mode with fragmentation enabled.
8309	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8310	return 1;
8311}
8312
8313static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8314				   struct ipw_rx_mem_buffer *rxb,
8315				   struct libipw_rx_stats *stats)
8316{
8317	struct sk_buff *skb = rxb->skb;
8318	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8319	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8320	    (skb->data + IPW_RX_FRAME_SIZE);
8321
8322	libipw_rx_mgt(priv->ieee, header, stats);
8323
8324	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8325	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8326	      IEEE80211_STYPE_PROBE_RESP) ||
8327	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8328	      IEEE80211_STYPE_BEACON))) {
8329		if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8330			ipw_add_station(priv, header->addr2);
8331	}
8332
8333	if (priv->config & CFG_NET_STATS) {
8334		IPW_DEBUG_HC("sending stat packet\n");
8335
8336		/* Set the size of the skb to the size of the full
8337		 * ipw header and 802.11 frame */
8338		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8339			IPW_RX_FRAME_SIZE);
8340
8341		/* Advance past the ipw packet header to the 802.11 frame */
8342		skb_pull(skb, IPW_RX_FRAME_SIZE);
8343
8344		/* Push the libipw_rx_stats before the 802.11 frame */
8345		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8346
8347		skb->dev = priv->ieee->dev;
8348
8349		/* Point raw at the libipw_stats */
8350		skb_reset_mac_header(skb);
8351
8352		skb->pkt_type = PACKET_OTHERHOST;
8353		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8354		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8355		netif_rx(skb);
8356		rxb->skb = NULL;
8357	}
8358}
8359
8360/*
8361 * Main entry function for recieving a packet with 80211 headers.  This
8362 * should be called when ever the FW has notified us that there is a new
8363 * skb in the recieve queue.
8364 */
8365static void ipw_rx(struct ipw_priv *priv)
8366{
8367	struct ipw_rx_mem_buffer *rxb;
8368	struct ipw_rx_packet *pkt;
8369	struct libipw_hdr_4addr *header;
8370	u32 r, w, i;
8371	u8 network_packet;
8372	u8 fill_rx = 0;
8373
8374	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8375	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8376	i = priv->rxq->read;
8377
8378	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8379		fill_rx = 1;
8380
8381	while (i != r) {
8382		rxb = priv->rxq->queue[i];
8383		if (unlikely(rxb == NULL)) {
8384			printk(KERN_CRIT "Queue not allocated!\n");
8385			break;
8386		}
8387		priv->rxq->queue[i] = NULL;
8388
8389		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8390					    IPW_RX_BUF_SIZE,
8391					    PCI_DMA_FROMDEVICE);
8392
8393		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8394		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8395			     pkt->header.message_type,
8396			     pkt->header.rx_seq_num, pkt->header.control_bits);
8397
8398		switch (pkt->header.message_type) {
8399		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8400				struct libipw_rx_stats stats = {
8401					.rssi = pkt->u.frame.rssi_dbm -
8402					    IPW_RSSI_TO_DBM,
8403					.signal =
8404					    pkt->u.frame.rssi_dbm -
8405					    IPW_RSSI_TO_DBM + 0x100,
8406					.noise =
8407					    le16_to_cpu(pkt->u.frame.noise),
8408					.rate = pkt->u.frame.rate,
8409					.mac_time = jiffies,
8410					.received_channel =
8411					    pkt->u.frame.received_channel,
8412					.freq =
8413					    (pkt->u.frame.
8414					     control & (1 << 0)) ?
8415					    LIBIPW_24GHZ_BAND :
8416					    LIBIPW_52GHZ_BAND,
8417					.len = le16_to_cpu(pkt->u.frame.length),
8418				};
8419
8420				if (stats.rssi != 0)
8421					stats.mask |= LIBIPW_STATMASK_RSSI;
8422				if (stats.signal != 0)
8423					stats.mask |= LIBIPW_STATMASK_SIGNAL;
8424				if (stats.noise != 0)
8425					stats.mask |= LIBIPW_STATMASK_NOISE;
8426				if (stats.rate != 0)
8427					stats.mask |= LIBIPW_STATMASK_RATE;
8428
8429				priv->rx_packets++;
8430
8431#ifdef CONFIG_IPW2200_PROMISCUOUS
8432	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8433		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8434#endif
8435
8436#ifdef CONFIG_IPW2200_MONITOR
8437				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8438#ifdef CONFIG_IPW2200_RADIOTAP
8439
8440                ipw_handle_data_packet_monitor(priv,
8441					       rxb,
8442					       &stats);
8443#else
8444		ipw_handle_data_packet(priv, rxb,
8445				       &stats);
8446#endif
8447					break;
8448				}
8449#endif
8450
8451				header =
8452				    (struct libipw_hdr_4addr *)(rxb->skb->
8453								   data +
8454								   IPW_RX_FRAME_SIZE);
8455				/* TODO: Check Ad-Hoc dest/source and make sure
8456				 * that we are actually parsing these packets
8457				 * correctly -- we should probably use the
8458				 * frame control of the packet and disregard
8459				 * the current iw_mode */
8460
8461				network_packet =
8462				    is_network_packet(priv, header);
8463				if (network_packet && priv->assoc_network) {
8464					priv->assoc_network->stats.rssi =
8465					    stats.rssi;
8466					priv->exp_avg_rssi =
8467					    exponential_average(priv->exp_avg_rssi,
8468					    stats.rssi, DEPTH_RSSI);
8469				}
8470
8471				IPW_DEBUG_RX("Frame: len=%u\n",
8472					     le16_to_cpu(pkt->u.frame.length));
8473
8474				if (le16_to_cpu(pkt->u.frame.length) <
8475				    libipw_get_hdrlen(le16_to_cpu(
8476						    header->frame_ctl))) {
8477					IPW_DEBUG_DROP
8478					    ("Received packet is too small. "
8479					     "Dropping.\n");
8480					priv->net_dev->stats.rx_errors++;
8481					priv->wstats.discard.misc++;
8482					break;
8483				}
8484
8485				switch (WLAN_FC_GET_TYPE
8486					(le16_to_cpu(header->frame_ctl))) {
8487
8488				case IEEE80211_FTYPE_MGMT:
8489					ipw_handle_mgmt_packet(priv, rxb,
8490							       &stats);
8491					break;
8492
8493				case IEEE80211_FTYPE_CTL:
8494					break;
8495
8496				case IEEE80211_FTYPE_DATA:
8497					if (unlikely(!network_packet ||
8498						     is_duplicate_packet(priv,
8499									 header)))
8500					{
8501						IPW_DEBUG_DROP("Dropping: "
8502							       "%pM, "
8503							       "%pM, "
8504							       "%pM\n",
8505							       header->addr1,
8506							       header->addr2,
8507							       header->addr3);
8508						break;
8509					}
8510
8511					ipw_handle_data_packet(priv, rxb,
8512							       &stats);
8513
8514					break;
8515				}
8516				break;
8517			}
8518
8519		case RX_HOST_NOTIFICATION_TYPE:{
8520				IPW_DEBUG_RX
8521				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8522				     pkt->u.notification.subtype,
8523				     pkt->u.notification.flags,
8524				     le16_to_cpu(pkt->u.notification.size));
8525				ipw_rx_notification(priv, &pkt->u.notification);
8526				break;
8527			}
8528
8529		default:
8530			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8531				     pkt->header.message_type);
8532			break;
8533		}
8534
8535		/* For now we just don't re-use anything.  We can tweak this
8536		 * later to try and re-use notification packets and SKBs that
8537		 * fail to Rx correctly */
8538		if (rxb->skb != NULL) {
8539			dev_kfree_skb_any(rxb->skb);
8540			rxb->skb = NULL;
8541		}
8542
8543		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8544				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8545		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8546
8547		i = (i + 1) % RX_QUEUE_SIZE;
8548
8549		/* If there are a lot of unsued frames, restock the Rx queue
8550		 * so the ucode won't assert */
8551		if (fill_rx) {
8552			priv->rxq->read = i;
8553			ipw_rx_queue_replenish(priv);
8554		}
8555	}
8556
8557	/* Backtrack one entry */
8558	priv->rxq->read = i;
8559	ipw_rx_queue_restock(priv);
8560}
8561
8562#define DEFAULT_RTS_THRESHOLD     2304U
8563#define MIN_RTS_THRESHOLD         1U
8564#define MAX_RTS_THRESHOLD         2304U
8565#define DEFAULT_BEACON_INTERVAL   100U
8566#define	DEFAULT_SHORT_RETRY_LIMIT 7U
8567#define	DEFAULT_LONG_RETRY_LIMIT  4U
8568
8569/**
8570 * ipw_sw_reset
8571 * @option: options to control different reset behaviour
8572 * 	    0 = reset everything except the 'disable' module_param
8573 * 	    1 = reset everything and print out driver info (for probe only)
8574 * 	    2 = reset everything
8575 */
8576static int ipw_sw_reset(struct ipw_priv *priv, int option)
8577{
8578	int band, modulation;
8579	int old_mode = priv->ieee->iw_mode;
8580
8581	/* Initialize module parameter values here */
8582	priv->config = 0;
8583
8584	/* We default to disabling the LED code as right now it causes
8585	 * too many systems to lock up... */
8586	if (!led_support)
8587		priv->config |= CFG_NO_LED;
8588
8589	if (associate)
8590		priv->config |= CFG_ASSOCIATE;
8591	else
8592		IPW_DEBUG_INFO("Auto associate disabled.\n");
8593
8594	if (auto_create)
8595		priv->config |= CFG_ADHOC_CREATE;
8596	else
8597		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8598
8599	priv->config &= ~CFG_STATIC_ESSID;
8600	priv->essid_len = 0;
8601	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8602
8603	if (disable && option) {
8604		priv->status |= STATUS_RF_KILL_SW;
8605		IPW_DEBUG_INFO("Radio disabled.\n");
8606	}
8607
8608	if (default_channel != 0) {
8609		priv->config |= CFG_STATIC_CHANNEL;
8610		priv->channel = default_channel;
8611		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8612		/* TODO: Validate that provided channel is in range */
8613	}
8614#ifdef CONFIG_IPW2200_QOS
8615	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8616		     burst_duration_CCK, burst_duration_OFDM);
8617#endif				/* CONFIG_IPW2200_QOS */
8618
8619	switch (network_mode) {
8620	case 1:
8621		priv->ieee->iw_mode = IW_MODE_ADHOC;
8622		priv->net_dev->type = ARPHRD_ETHER;
8623
8624		break;
8625#ifdef CONFIG_IPW2200_MONITOR
8626	case 2:
8627		priv->ieee->iw_mode = IW_MODE_MONITOR;
8628#ifdef CONFIG_IPW2200_RADIOTAP
8629		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8630#else
8631		priv->net_dev->type = ARPHRD_IEEE80211;
8632#endif
8633		break;
8634#endif
8635	default:
8636	case 0:
8637		priv->net_dev->type = ARPHRD_ETHER;
8638		priv->ieee->iw_mode = IW_MODE_INFRA;
8639		break;
8640	}
8641
8642	if (hwcrypto) {
8643		priv->ieee->host_encrypt = 0;
8644		priv->ieee->host_encrypt_msdu = 0;
8645		priv->ieee->host_decrypt = 0;
8646		priv->ieee->host_mc_decrypt = 0;
8647	}
8648	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8649
8650	/* IPW2200/2915 is abled to do hardware fragmentation. */
8651	priv->ieee->host_open_frag = 0;
8652
8653	if ((priv->pci_dev->device == 0x4223) ||
8654	    (priv->pci_dev->device == 0x4224)) {
8655		if (option == 1)
8656			printk(KERN_INFO DRV_NAME
8657			       ": Detected Intel PRO/Wireless 2915ABG Network "
8658			       "Connection\n");
8659		priv->ieee->abg_true = 1;
8660		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8661		modulation = LIBIPW_OFDM_MODULATION |
8662		    LIBIPW_CCK_MODULATION;
8663		priv->adapter = IPW_2915ABG;
8664		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8665	} else {
8666		if (option == 1)
8667			printk(KERN_INFO DRV_NAME
8668			       ": Detected Intel PRO/Wireless 2200BG Network "
8669			       "Connection\n");
8670
8671		priv->ieee->abg_true = 0;
8672		band = LIBIPW_24GHZ_BAND;
8673		modulation = LIBIPW_OFDM_MODULATION |
8674		    LIBIPW_CCK_MODULATION;
8675		priv->adapter = IPW_2200BG;
8676		priv->ieee->mode = IEEE_G | IEEE_B;
8677	}
8678
8679	priv->ieee->freq_band = band;
8680	priv->ieee->modulation = modulation;
8681
8682	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8683
8684	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8685	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8686
8687	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8688	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8689	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8690
8691	/* If power management is turned on, default to AC mode */
8692	priv->power_mode = IPW_POWER_AC;
8693	priv->tx_power = IPW_TX_POWER_DEFAULT;
8694
8695	return old_mode == priv->ieee->iw_mode;
8696}
8697
8698/*
8699 * This file defines the Wireless Extension handlers.  It does not
8700 * define any methods of hardware manipulation and relies on the
8701 * functions defined in ipw_main to provide the HW interaction.
8702 *
8703 * The exception to this is the use of the ipw_get_ordinal()
8704 * function used to poll the hardware vs. making unecessary calls.
8705 *
8706 */
8707
8708static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8709{
8710	if (channel == 0) {
8711		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8712		priv->config &= ~CFG_STATIC_CHANNEL;
8713		IPW_DEBUG_ASSOC("Attempting to associate with new "
8714				"parameters.\n");
8715		ipw_associate(priv);
8716		return 0;
8717	}
8718
8719	priv->config |= CFG_STATIC_CHANNEL;
8720
8721	if (priv->channel == channel) {
8722		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8723			       channel);
8724		return 0;
8725	}
8726
8727	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8728	priv->channel = channel;
8729
8730#ifdef CONFIG_IPW2200_MONITOR
8731	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8732		int i;
8733		if (priv->status & STATUS_SCANNING) {
8734			IPW_DEBUG_SCAN("Scan abort triggered due to "
8735				       "channel change.\n");
8736			ipw_abort_scan(priv);
8737		}
8738
8739		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8740			udelay(10);
8741
8742		if (priv->status & STATUS_SCANNING)
8743			IPW_DEBUG_SCAN("Still scanning...\n");
8744		else
8745			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8746				       1000 - i);
8747
8748		return 0;
8749	}
8750#endif				/* CONFIG_IPW2200_MONITOR */
8751
8752	/* Network configuration changed -- force [re]association */
8753	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8754	if (!ipw_disassociate(priv))
8755		ipw_associate(priv);
8756
8757	return 0;
8758}
8759
8760static int ipw_wx_set_freq(struct net_device *dev,
8761			   struct iw_request_info *info,
8762			   union iwreq_data *wrqu, char *extra)
8763{
8764	struct ipw_priv *priv = libipw_priv(dev);
8765	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8766	struct iw_freq *fwrq = &wrqu->freq;
8767	int ret = 0, i;
8768	u8 channel, flags;
8769	int band;
8770
8771	if (fwrq->m == 0) {
8772		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8773		mutex_lock(&priv->mutex);
8774		ret = ipw_set_channel(priv, 0);
8775		mutex_unlock(&priv->mutex);
8776		return ret;
8777	}
8778	/* if setting by freq convert to channel */
8779	if (fwrq->e == 1) {
8780		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8781		if (channel == 0)
8782			return -EINVAL;
8783	} else
8784		channel = fwrq->m;
8785
8786	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8787		return -EINVAL;
8788
8789	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8790		i = libipw_channel_to_index(priv->ieee, channel);
8791		if (i == -1)
8792			return -EINVAL;
8793
8794		flags = (band == LIBIPW_24GHZ_BAND) ?
8795		    geo->bg[i].flags : geo->a[i].flags;
8796		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8797			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8798			return -EINVAL;
8799		}
8800	}
8801
8802	IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8803	mutex_lock(&priv->mutex);
8804	ret = ipw_set_channel(priv, channel);
8805	mutex_unlock(&priv->mutex);
8806	return ret;
8807}
8808
8809static int ipw_wx_get_freq(struct net_device *dev,
8810			   struct iw_request_info *info,
8811			   union iwreq_data *wrqu, char *extra)
8812{
8813	struct ipw_priv *priv = libipw_priv(dev);
8814
8815	wrqu->freq.e = 0;
8816
8817	/* If we are associated, trying to associate, or have a statically
8818	 * configured CHANNEL then return that; otherwise return ANY */
8819	mutex_lock(&priv->mutex);
8820	if (priv->config & CFG_STATIC_CHANNEL ||
8821	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8822		int i;
8823
8824		i = libipw_channel_to_index(priv->ieee, priv->channel);
8825		BUG_ON(i == -1);
8826		wrqu->freq.e = 1;
8827
8828		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8829		case LIBIPW_52GHZ_BAND:
8830			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8831			break;
8832
8833		case LIBIPW_24GHZ_BAND:
8834			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8835			break;
8836
8837		default:
8838			BUG();
8839		}
8840	} else
8841		wrqu->freq.m = 0;
8842
8843	mutex_unlock(&priv->mutex);
8844	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8845	return 0;
8846}
8847
8848static int ipw_wx_set_mode(struct net_device *dev,
8849			   struct iw_request_info *info,
8850			   union iwreq_data *wrqu, char *extra)
8851{
8852	struct ipw_priv *priv = libipw_priv(dev);
8853	int err = 0;
8854
8855	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8856
8857	switch (wrqu->mode) {
8858#ifdef CONFIG_IPW2200_MONITOR
8859	case IW_MODE_MONITOR:
8860#endif
8861	case IW_MODE_ADHOC:
8862	case IW_MODE_INFRA:
8863		break;
8864	case IW_MODE_AUTO:
8865		wrqu->mode = IW_MODE_INFRA;
8866		break;
8867	default:
8868		return -EINVAL;
8869	}
8870	if (wrqu->mode == priv->ieee->iw_mode)
8871		return 0;
8872
8873	mutex_lock(&priv->mutex);
8874
8875	ipw_sw_reset(priv, 0);
8876
8877#ifdef CONFIG_IPW2200_MONITOR
8878	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8879		priv->net_dev->type = ARPHRD_ETHER;
8880
8881	if (wrqu->mode == IW_MODE_MONITOR)
8882#ifdef CONFIG_IPW2200_RADIOTAP
8883		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8884#else
8885		priv->net_dev->type = ARPHRD_IEEE80211;
8886#endif
8887#endif				/* CONFIG_IPW2200_MONITOR */
8888
8889	/* Free the existing firmware and reset the fw_loaded
8890	 * flag so ipw_load() will bring in the new firmware */
8891	free_firmware();
8892
8893	priv->ieee->iw_mode = wrqu->mode;
8894
8895	queue_work(priv->workqueue, &priv->adapter_restart);
8896	mutex_unlock(&priv->mutex);
8897	return err;
8898}
8899
8900static int ipw_wx_get_mode(struct net_device *dev,
8901			   struct iw_request_info *info,
8902			   union iwreq_data *wrqu, char *extra)
8903{
8904	struct ipw_priv *priv = libipw_priv(dev);
8905	mutex_lock(&priv->mutex);
8906	wrqu->mode = priv->ieee->iw_mode;
8907	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8908	mutex_unlock(&priv->mutex);
8909	return 0;
8910}
8911
8912/* Values are in microsecond */
8913static const s32 timeout_duration[] = {
8914	350000,
8915	250000,
8916	75000,
8917	37000,
8918	25000,
8919};
8920
8921static const s32 period_duration[] = {
8922	400000,
8923	700000,
8924	1000000,
8925	1000000,
8926	1000000
8927};
8928
8929static int ipw_wx_get_range(struct net_device *dev,
8930			    struct iw_request_info *info,
8931			    union iwreq_data *wrqu, char *extra)
8932{
8933	struct ipw_priv *priv = libipw_priv(dev);
8934	struct iw_range *range = (struct iw_range *)extra;
8935	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8936	int i = 0, j;
8937
8938	wrqu->data.length = sizeof(*range);
8939	memset(range, 0, sizeof(*range));
8940
8941	/* 54Mbs == ~27 Mb/s real (802.11g) */
8942	range->throughput = 27 * 1000 * 1000;
8943
8944	range->max_qual.qual = 100;
8945	/* TODO: Find real max RSSI and stick here */
8946	range->max_qual.level = 0;
8947	range->max_qual.noise = 0;
8948	range->max_qual.updated = 7;	/* Updated all three */
8949
8950	range->avg_qual.qual = 70;
8951	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8952	range->avg_qual.level = 0;	/* FIXME to real average level */
8953	range->avg_qual.noise = 0;
8954	range->avg_qual.updated = 7;	/* Updated all three */
8955	mutex_lock(&priv->mutex);
8956	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8957
8958	for (i = 0; i < range->num_bitrates; i++)
8959		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8960		    500000;
8961
8962	range->max_rts = DEFAULT_RTS_THRESHOLD;
8963	range->min_frag = MIN_FRAG_THRESHOLD;
8964	range->max_frag = MAX_FRAG_THRESHOLD;
8965
8966	range->encoding_size[0] = 5;
8967	range->encoding_size[1] = 13;
8968	range->num_encoding_sizes = 2;
8969	range->max_encoding_tokens = WEP_KEYS;
8970
8971	/* Set the Wireless Extension versions */
8972	range->we_version_compiled = WIRELESS_EXT;
8973	range->we_version_source = 18;
8974
8975	i = 0;
8976	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8977		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8978			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8979			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8980				continue;
8981
8982			range->freq[i].i = geo->bg[j].channel;
8983			range->freq[i].m = geo->bg[j].freq * 100000;
8984			range->freq[i].e = 1;
8985			i++;
8986		}
8987	}
8988
8989	if (priv->ieee->mode & IEEE_A) {
8990		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8991			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8992			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8993				continue;
8994
8995			range->freq[i].i = geo->a[j].channel;
8996			range->freq[i].m = geo->a[j].freq * 100000;
8997			range->freq[i].e = 1;
8998			i++;
8999		}
9000	}
9001
9002	range->num_channels = i;
9003	range->num_frequency = i;
9004
9005	mutex_unlock(&priv->mutex);
9006
9007	/* Event capability (kernel + driver) */
9008	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
9009				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
9010				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
9011				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
9012	range->event_capa[1] = IW_EVENT_CAPA_K_1;
9013
9014	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
9015		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
9016
9017	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
9018
9019	IPW_DEBUG_WX("GET Range\n");
9020	return 0;
9021}
9022
9023static int ipw_wx_set_wap(struct net_device *dev,
9024			  struct iw_request_info *info,
9025			  union iwreq_data *wrqu, char *extra)
9026{
9027	struct ipw_priv *priv = libipw_priv(dev);
9028
9029	static const unsigned char any[] = {
9030		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
9031	};
9032	static const unsigned char off[] = {
9033		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
9034	};
9035
9036	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
9037		return -EINVAL;
9038	mutex_lock(&priv->mutex);
9039	if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
9040	    !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9041		/* we disable mandatory BSSID association */
9042		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
9043		priv->config &= ~CFG_STATIC_BSSID;
9044		IPW_DEBUG_ASSOC("Attempting to associate with new "
9045				"parameters.\n");
9046		ipw_associate(priv);
9047		mutex_unlock(&priv->mutex);
9048		return 0;
9049	}
9050
9051	priv->config |= CFG_STATIC_BSSID;
9052	if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
9053		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
9054		mutex_unlock(&priv->mutex);
9055		return 0;
9056	}
9057
9058	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9059		     wrqu->ap_addr.sa_data);
9060
9061	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9062
9063	/* Network configuration changed -- force [re]association */
9064	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9065	if (!ipw_disassociate(priv))
9066		ipw_associate(priv);
9067
9068	mutex_unlock(&priv->mutex);
9069	return 0;
9070}
9071
9072static int ipw_wx_get_wap(struct net_device *dev,
9073			  struct iw_request_info *info,
9074			  union iwreq_data *wrqu, char *extra)
9075{
9076	struct ipw_priv *priv = libipw_priv(dev);
9077
9078	/* If we are associated, trying to associate, or have a statically
9079	 * configured BSSID then return that; otherwise return ANY */
9080	mutex_lock(&priv->mutex);
9081	if (priv->config & CFG_STATIC_BSSID ||
9082	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9083		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9084		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9085	} else
9086		memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9087
9088	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9089		     wrqu->ap_addr.sa_data);
9090	mutex_unlock(&priv->mutex);
9091	return 0;
9092}
9093
9094static int ipw_wx_set_essid(struct net_device *dev,
9095			    struct iw_request_info *info,
9096			    union iwreq_data *wrqu, char *extra)
9097{
9098	struct ipw_priv *priv = libipw_priv(dev);
9099        int length;
9100	DECLARE_SSID_BUF(ssid);
9101
9102        mutex_lock(&priv->mutex);
9103
9104        if (!wrqu->essid.flags)
9105        {
9106                IPW_DEBUG_WX("Setting ESSID to ANY\n");
9107                ipw_disassociate(priv);
9108                priv->config &= ~CFG_STATIC_ESSID;
9109                ipw_associate(priv);
9110                mutex_unlock(&priv->mutex);
9111                return 0;
9112        }
9113
9114	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9115
9116	priv->config |= CFG_STATIC_ESSID;
9117
9118	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9119	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9120		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9121		mutex_unlock(&priv->mutex);
9122		return 0;
9123	}
9124
9125	IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9126		     print_ssid(ssid, extra, length), length);
9127
9128	priv->essid_len = length;
9129	memcpy(priv->essid, extra, priv->essid_len);
9130
9131	/* Network configuration changed -- force [re]association */
9132	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9133	if (!ipw_disassociate(priv))
9134		ipw_associate(priv);
9135
9136	mutex_unlock(&priv->mutex);
9137	return 0;
9138}
9139
9140static int ipw_wx_get_essid(struct net_device *dev,
9141			    struct iw_request_info *info,
9142			    union iwreq_data *wrqu, char *extra)
9143{
9144	struct ipw_priv *priv = libipw_priv(dev);
9145	DECLARE_SSID_BUF(ssid);
9146
9147	/* If we are associated, trying to associate, or have a statically
9148	 * configured ESSID then return that; otherwise return ANY */
9149	mutex_lock(&priv->mutex);
9150	if (priv->config & CFG_STATIC_ESSID ||
9151	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9152		IPW_DEBUG_WX("Getting essid: '%s'\n",
9153			     print_ssid(ssid, priv->essid, priv->essid_len));
9154		memcpy(extra, priv->essid, priv->essid_len);
9155		wrqu->essid.length = priv->essid_len;
9156		wrqu->essid.flags = 1;	/* active */
9157	} else {
9158		IPW_DEBUG_WX("Getting essid: ANY\n");
9159		wrqu->essid.length = 0;
9160		wrqu->essid.flags = 0;	/* active */
9161	}
9162	mutex_unlock(&priv->mutex);
9163	return 0;
9164}
9165
9166static int ipw_wx_set_nick(struct net_device *dev,
9167			   struct iw_request_info *info,
9168			   union iwreq_data *wrqu, char *extra)
9169{
9170	struct ipw_priv *priv = libipw_priv(dev);
9171
9172	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9173	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9174		return -E2BIG;
9175	mutex_lock(&priv->mutex);
9176	wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9177	memset(priv->nick, 0, sizeof(priv->nick));
9178	memcpy(priv->nick, extra, wrqu->data.length);
9179	IPW_DEBUG_TRACE("<<\n");
9180	mutex_unlock(&priv->mutex);
9181	return 0;
9182
9183}
9184
9185static int ipw_wx_get_nick(struct net_device *dev,
9186			   struct iw_request_info *info,
9187			   union iwreq_data *wrqu, char *extra)
9188{
9189	struct ipw_priv *priv = libipw_priv(dev);
9190	IPW_DEBUG_WX("Getting nick\n");
9191	mutex_lock(&priv->mutex);
9192	wrqu->data.length = strlen(priv->nick);
9193	memcpy(extra, priv->nick, wrqu->data.length);
9194	wrqu->data.flags = 1;	/* active */
9195	mutex_unlock(&priv->mutex);
9196	return 0;
9197}
9198
9199static int ipw_wx_set_sens(struct net_device *dev,
9200			    struct iw_request_info *info,
9201			    union iwreq_data *wrqu, char *extra)
9202{
9203	struct ipw_priv *priv = libipw_priv(dev);
9204	int err = 0;
9205
9206	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9207	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9208	mutex_lock(&priv->mutex);
9209
9210	if (wrqu->sens.fixed == 0)
9211	{
9212		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9213		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9214		goto out;
9215	}
9216	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9217	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9218		err = -EINVAL;
9219		goto out;
9220	}
9221
9222	priv->roaming_threshold = wrqu->sens.value;
9223	priv->disassociate_threshold = 3*wrqu->sens.value;
9224      out:
9225	mutex_unlock(&priv->mutex);
9226	return err;
9227}
9228
9229static int ipw_wx_get_sens(struct net_device *dev,
9230			    struct iw_request_info *info,
9231			    union iwreq_data *wrqu, char *extra)
9232{
9233	struct ipw_priv *priv = libipw_priv(dev);
9234	mutex_lock(&priv->mutex);
9235	wrqu->sens.fixed = 1;
9236	wrqu->sens.value = priv->roaming_threshold;
9237	mutex_unlock(&priv->mutex);
9238
9239	IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9240		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9241
9242	return 0;
9243}
9244
9245static int ipw_wx_set_rate(struct net_device *dev,
9246			   struct iw_request_info *info,
9247			   union iwreq_data *wrqu, char *extra)
9248{
9249	/* TODO: We should use semaphores or locks for access to priv */
9250	struct ipw_priv *priv = libipw_priv(dev);
9251	u32 target_rate = wrqu->bitrate.value;
9252	u32 fixed, mask;
9253
9254	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9255	/* value = X, fixed = 1 means only rate X */
9256	/* value = X, fixed = 0 means all rates lower equal X */
9257
9258	if (target_rate == -1) {
9259		fixed = 0;
9260		mask = LIBIPW_DEFAULT_RATES_MASK;
9261		/* Now we should reassociate */
9262		goto apply;
9263	}
9264
9265	mask = 0;
9266	fixed = wrqu->bitrate.fixed;
9267
9268	if (target_rate == 1000000 || !fixed)
9269		mask |= LIBIPW_CCK_RATE_1MB_MASK;
9270	if (target_rate == 1000000)
9271		goto apply;
9272
9273	if (target_rate == 2000000 || !fixed)
9274		mask |= LIBIPW_CCK_RATE_2MB_MASK;
9275	if (target_rate == 2000000)
9276		goto apply;
9277
9278	if (target_rate == 5500000 || !fixed)
9279		mask |= LIBIPW_CCK_RATE_5MB_MASK;
9280	if (target_rate == 5500000)
9281		goto apply;
9282
9283	if (target_rate == 6000000 || !fixed)
9284		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9285	if (target_rate == 6000000)
9286		goto apply;
9287
9288	if (target_rate == 9000000 || !fixed)
9289		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9290	if (target_rate == 9000000)
9291		goto apply;
9292
9293	if (target_rate == 11000000 || !fixed)
9294		mask |= LIBIPW_CCK_RATE_11MB_MASK;
9295	if (target_rate == 11000000)
9296		goto apply;
9297
9298	if (target_rate == 12000000 || !fixed)
9299		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9300	if (target_rate == 12000000)
9301		goto apply;
9302
9303	if (target_rate == 18000000 || !fixed)
9304		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9305	if (target_rate == 18000000)
9306		goto apply;
9307
9308	if (target_rate == 24000000 || !fixed)
9309		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9310	if (target_rate == 24000000)
9311		goto apply;
9312
9313	if (target_rate == 36000000 || !fixed)
9314		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9315	if (target_rate == 36000000)
9316		goto apply;
9317
9318	if (target_rate == 48000000 || !fixed)
9319		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9320	if (target_rate == 48000000)
9321		goto apply;
9322
9323	if (target_rate == 54000000 || !fixed)
9324		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9325	if (target_rate == 54000000)
9326		goto apply;
9327
9328	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9329	return -EINVAL;
9330
9331      apply:
9332	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9333		     mask, fixed ? "fixed" : "sub-rates");
9334	mutex_lock(&priv->mutex);
9335	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9336		priv->config &= ~CFG_FIXED_RATE;
9337		ipw_set_fixed_rate(priv, priv->ieee->mode);
9338	} else
9339		priv->config |= CFG_FIXED_RATE;
9340
9341	if (priv->rates_mask == mask) {
9342		IPW_DEBUG_WX("Mask set to current mask.\n");
9343		mutex_unlock(&priv->mutex);
9344		return 0;
9345	}
9346
9347	priv->rates_mask = mask;
9348
9349	/* Network configuration changed -- force [re]association */
9350	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9351	if (!ipw_disassociate(priv))
9352		ipw_associate(priv);
9353
9354	mutex_unlock(&priv->mutex);
9355	return 0;
9356}
9357
9358static int ipw_wx_get_rate(struct net_device *dev,
9359			   struct iw_request_info *info,
9360			   union iwreq_data *wrqu, char *extra)
9361{
9362	struct ipw_priv *priv = libipw_priv(dev);
9363	mutex_lock(&priv->mutex);
9364	wrqu->bitrate.value = priv->last_rate;
9365	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9366	mutex_unlock(&priv->mutex);
9367	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9368	return 0;
9369}
9370
9371static int ipw_wx_set_rts(struct net_device *dev,
9372			  struct iw_request_info *info,
9373			  union iwreq_data *wrqu, char *extra)
9374{
9375	struct ipw_priv *priv = libipw_priv(dev);
9376	mutex_lock(&priv->mutex);
9377	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9378		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9379	else {
9380		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9381		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9382			mutex_unlock(&priv->mutex);
9383			return -EINVAL;
9384		}
9385		priv->rts_threshold = wrqu->rts.value;
9386	}
9387
9388	ipw_send_rts_threshold(priv, priv->rts_threshold);
9389	mutex_unlock(&priv->mutex);
9390	IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9391	return 0;
9392}
9393
9394static int ipw_wx_get_rts(struct net_device *dev,
9395			  struct iw_request_info *info,
9396			  union iwreq_data *wrqu, char *extra)
9397{
9398	struct ipw_priv *priv = libipw_priv(dev);
9399	mutex_lock(&priv->mutex);
9400	wrqu->rts.value = priv->rts_threshold;
9401	wrqu->rts.fixed = 0;	/* no auto select */
9402	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9403	mutex_unlock(&priv->mutex);
9404	IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9405	return 0;
9406}
9407
9408static int ipw_wx_set_txpow(struct net_device *dev,
9409			    struct iw_request_info *info,
9410			    union iwreq_data *wrqu, char *extra)
9411{
9412	struct ipw_priv *priv = libipw_priv(dev);
9413	int err = 0;
9414
9415	mutex_lock(&priv->mutex);
9416	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9417		err = -EINPROGRESS;
9418		goto out;
9419	}
9420
9421	if (!wrqu->power.fixed)
9422		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9423
9424	if (wrqu->power.flags != IW_TXPOW_DBM) {
9425		err = -EINVAL;
9426		goto out;
9427	}
9428
9429	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9430	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9431		err = -EINVAL;
9432		goto out;
9433	}
9434
9435	priv->tx_power = wrqu->power.value;
9436	err = ipw_set_tx_power(priv);
9437      out:
9438	mutex_unlock(&priv->mutex);
9439	return err;
9440}
9441
9442static int ipw_wx_get_txpow(struct net_device *dev,
9443			    struct iw_request_info *info,
9444			    union iwreq_data *wrqu, char *extra)
9445{
9446	struct ipw_priv *priv = libipw_priv(dev);
9447	mutex_lock(&priv->mutex);
9448	wrqu->power.value = priv->tx_power;
9449	wrqu->power.fixed = 1;
9450	wrqu->power.flags = IW_TXPOW_DBM;
9451	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9452	mutex_unlock(&priv->mutex);
9453
9454	IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9455		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9456
9457	return 0;
9458}
9459
9460static int ipw_wx_set_frag(struct net_device *dev,
9461			   struct iw_request_info *info,
9462			   union iwreq_data *wrqu, char *extra)
9463{
9464	struct ipw_priv *priv = libipw_priv(dev);
9465	mutex_lock(&priv->mutex);
9466	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9467		priv->ieee->fts = DEFAULT_FTS;
9468	else {
9469		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9470		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9471			mutex_unlock(&priv->mutex);
9472			return -EINVAL;
9473		}
9474
9475		priv->ieee->fts = wrqu->frag.value & ~0x1;
9476	}
9477
9478	ipw_send_frag_threshold(priv, wrqu->frag.value);
9479	mutex_unlock(&priv->mutex);
9480	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9481	return 0;
9482}
9483
9484static int ipw_wx_get_frag(struct net_device *dev,
9485			   struct iw_request_info *info,
9486			   union iwreq_data *wrqu, char *extra)
9487{
9488	struct ipw_priv *priv = libipw_priv(dev);
9489	mutex_lock(&priv->mutex);
9490	wrqu->frag.value = priv->ieee->fts;
9491	wrqu->frag.fixed = 0;	/* no auto select */
9492	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9493	mutex_unlock(&priv->mutex);
9494	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9495
9496	return 0;
9497}
9498
9499static int ipw_wx_set_retry(struct net_device *dev,
9500			    struct iw_request_info *info,
9501			    union iwreq_data *wrqu, char *extra)
9502{
9503	struct ipw_priv *priv = libipw_priv(dev);
9504
9505	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9506		return -EINVAL;
9507
9508	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9509		return 0;
9510
9511	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9512		return -EINVAL;
9513
9514	mutex_lock(&priv->mutex);
9515	if (wrqu->retry.flags & IW_RETRY_SHORT)
9516		priv->short_retry_limit = (u8) wrqu->retry.value;
9517	else if (wrqu->retry.flags & IW_RETRY_LONG)
9518		priv->long_retry_limit = (u8) wrqu->retry.value;
9519	else {
9520		priv->short_retry_limit = (u8) wrqu->retry.value;
9521		priv->long_retry_limit = (u8) wrqu->retry.value;
9522	}
9523
9524	ipw_send_retry_limit(priv, priv->short_retry_limit,
9525			     priv->long_retry_limit);
9526	mutex_unlock(&priv->mutex);
9527	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9528		     priv->short_retry_limit, priv->long_retry_limit);
9529	return 0;
9530}
9531
9532static int ipw_wx_get_retry(struct net_device *dev,
9533			    struct iw_request_info *info,
9534			    union iwreq_data *wrqu, char *extra)
9535{
9536	struct ipw_priv *priv = libipw_priv(dev);
9537
9538	mutex_lock(&priv->mutex);
9539	wrqu->retry.disabled = 0;
9540
9541	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9542		mutex_unlock(&priv->mutex);
9543		return -EINVAL;
9544	}
9545
9546	if (wrqu->retry.flags & IW_RETRY_LONG) {
9547		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9548		wrqu->retry.value = priv->long_retry_limit;
9549	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9550		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9551		wrqu->retry.value = priv->short_retry_limit;
9552	} else {
9553		wrqu->retry.flags = IW_RETRY_LIMIT;
9554		wrqu->retry.value = priv->short_retry_limit;
9555	}
9556	mutex_unlock(&priv->mutex);
9557
9558	IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9559
9560	return 0;
9561}
9562
9563static int ipw_wx_set_scan(struct net_device *dev,
9564			   struct iw_request_info *info,
9565			   union iwreq_data *wrqu, char *extra)
9566{
9567	struct ipw_priv *priv = libipw_priv(dev);
9568	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9569	struct delayed_work *work = NULL;
9570
9571	mutex_lock(&priv->mutex);
9572
9573	priv->user_requested_scan = 1;
9574
9575	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9576		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9577			int len = min((int)req->essid_len,
9578			              (int)sizeof(priv->direct_scan_ssid));
9579			memcpy(priv->direct_scan_ssid, req->essid, len);
9580			priv->direct_scan_ssid_len = len;
9581			work = &priv->request_direct_scan;
9582		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9583			work = &priv->request_passive_scan;
9584		}
9585	} else {
9586		/* Normal active broadcast scan */
9587		work = &priv->request_scan;
9588	}
9589
9590	mutex_unlock(&priv->mutex);
9591
9592	IPW_DEBUG_WX("Start scan\n");
9593
9594	queue_delayed_work(priv->workqueue, work, 0);
9595
9596	return 0;
9597}
9598
9599static int ipw_wx_get_scan(struct net_device *dev,
9600			   struct iw_request_info *info,
9601			   union iwreq_data *wrqu, char *extra)
9602{
9603	struct ipw_priv *priv = libipw_priv(dev);
9604	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9605}
9606
9607static int ipw_wx_set_encode(struct net_device *dev,
9608			     struct iw_request_info *info,
9609			     union iwreq_data *wrqu, char *key)
9610{
9611	struct ipw_priv *priv = libipw_priv(dev);
9612	int ret;
9613	u32 cap = priv->capability;
9614
9615	mutex_lock(&priv->mutex);
9616	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9617
9618	/* In IBSS mode, we need to notify the firmware to update
9619	 * the beacon info after we changed the capability. */
9620	if (cap != priv->capability &&
9621	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9622	    priv->status & STATUS_ASSOCIATED)
9623		ipw_disassociate(priv);
9624
9625	mutex_unlock(&priv->mutex);
9626	return ret;
9627}
9628
9629static int ipw_wx_get_encode(struct net_device *dev,
9630			     struct iw_request_info *info,
9631			     union iwreq_data *wrqu, char *key)
9632{
9633	struct ipw_priv *priv = libipw_priv(dev);
9634	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9635}
9636
9637static int ipw_wx_set_power(struct net_device *dev,
9638			    struct iw_request_info *info,
9639			    union iwreq_data *wrqu, char *extra)
9640{
9641	struct ipw_priv *priv = libipw_priv(dev);
9642	int err;
9643	mutex_lock(&priv->mutex);
9644	if (wrqu->power.disabled) {
9645		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9646		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9647		if (err) {
9648			IPW_DEBUG_WX("failed setting power mode.\n");
9649			mutex_unlock(&priv->mutex);
9650			return err;
9651		}
9652		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9653		mutex_unlock(&priv->mutex);
9654		return 0;
9655	}
9656
9657	switch (wrqu->power.flags & IW_POWER_MODE) {
9658	case IW_POWER_ON:	/* If not specified */
9659	case IW_POWER_MODE:	/* If set all mask */
9660	case IW_POWER_ALL_R:	/* If explicitly state all */
9661		break;
9662	default:		/* Otherwise we don't support it */
9663		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9664			     wrqu->power.flags);
9665		mutex_unlock(&priv->mutex);
9666		return -EOPNOTSUPP;
9667	}
9668
9669	/* If the user hasn't specified a power management mode yet, default
9670	 * to BATTERY */
9671	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9672		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9673	else
9674		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9675
9676	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9677	if (err) {
9678		IPW_DEBUG_WX("failed setting power mode.\n");
9679		mutex_unlock(&priv->mutex);
9680		return err;
9681	}
9682
9683	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9684	mutex_unlock(&priv->mutex);
9685	return 0;
9686}
9687
9688static int ipw_wx_get_power(struct net_device *dev,
9689			    struct iw_request_info *info,
9690			    union iwreq_data *wrqu, char *extra)
9691{
9692	struct ipw_priv *priv = libipw_priv(dev);
9693	mutex_lock(&priv->mutex);
9694	if (!(priv->power_mode & IPW_POWER_ENABLED))
9695		wrqu->power.disabled = 1;
9696	else
9697		wrqu->power.disabled = 0;
9698
9699	mutex_unlock(&priv->mutex);
9700	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9701
9702	return 0;
9703}
9704
9705static int ipw_wx_set_powermode(struct net_device *dev,
9706				struct iw_request_info *info,
9707				union iwreq_data *wrqu, char *extra)
9708{
9709	struct ipw_priv *priv = libipw_priv(dev);
9710	int mode = *(int *)extra;
9711	int err;
9712
9713	mutex_lock(&priv->mutex);
9714	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9715		mode = IPW_POWER_AC;
9716
9717	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9718		err = ipw_send_power_mode(priv, mode);
9719		if (err) {
9720			IPW_DEBUG_WX("failed setting power mode.\n");
9721			mutex_unlock(&priv->mutex);
9722			return err;
9723		}
9724		priv->power_mode = IPW_POWER_ENABLED | mode;
9725	}
9726	mutex_unlock(&priv->mutex);
9727	return 0;
9728}
9729
9730#define MAX_WX_STRING 80
9731static int ipw_wx_get_powermode(struct net_device *dev,
9732				struct iw_request_info *info,
9733				union iwreq_data *wrqu, char *extra)
9734{
9735	struct ipw_priv *priv = libipw_priv(dev);
9736	int level = IPW_POWER_LEVEL(priv->power_mode);
9737	char *p = extra;
9738
9739	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9740
9741	switch (level) {
9742	case IPW_POWER_AC:
9743		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9744		break;
9745	case IPW_POWER_BATTERY:
9746		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9747		break;
9748	default:
9749		p += snprintf(p, MAX_WX_STRING - (p - extra),
9750			      "(Timeout %dms, Period %dms)",
9751			      timeout_duration[level - 1] / 1000,
9752			      period_duration[level - 1] / 1000);
9753	}
9754
9755	if (!(priv->power_mode & IPW_POWER_ENABLED))
9756		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9757
9758	wrqu->data.length = p - extra + 1;
9759
9760	return 0;
9761}
9762
9763static int ipw_wx_set_wireless_mode(struct net_device *dev,
9764				    struct iw_request_info *info,
9765				    union iwreq_data *wrqu, char *extra)
9766{
9767	struct ipw_priv *priv = libipw_priv(dev);
9768	int mode = *(int *)extra;
9769	u8 band = 0, modulation = 0;
9770
9771	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9772		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9773		return -EINVAL;
9774	}
9775	mutex_lock(&priv->mutex);
9776	if (priv->adapter == IPW_2915ABG) {
9777		priv->ieee->abg_true = 1;
9778		if (mode & IEEE_A) {
9779			band |= LIBIPW_52GHZ_BAND;
9780			modulation |= LIBIPW_OFDM_MODULATION;
9781		} else
9782			priv->ieee->abg_true = 0;
9783	} else {
9784		if (mode & IEEE_A) {
9785			IPW_WARNING("Attempt to set 2200BG into "
9786				    "802.11a mode\n");
9787			mutex_unlock(&priv->mutex);
9788			return -EINVAL;
9789		}
9790
9791		priv->ieee->abg_true = 0;
9792	}
9793
9794	if (mode & IEEE_B) {
9795		band |= LIBIPW_24GHZ_BAND;
9796		modulation |= LIBIPW_CCK_MODULATION;
9797	} else
9798		priv->ieee->abg_true = 0;
9799
9800	if (mode & IEEE_G) {
9801		band |= LIBIPW_24GHZ_BAND;
9802		modulation |= LIBIPW_OFDM_MODULATION;
9803	} else
9804		priv->ieee->abg_true = 0;
9805
9806	priv->ieee->mode = mode;
9807	priv->ieee->freq_band = band;
9808	priv->ieee->modulation = modulation;
9809	init_supported_rates(priv, &priv->rates);
9810
9811	/* Network configuration changed -- force [re]association */
9812	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9813	if (!ipw_disassociate(priv)) {
9814		ipw_send_supported_rates(priv, &priv->rates);
9815		ipw_associate(priv);
9816	}
9817
9818	/* Update the band LEDs */
9819	ipw_led_band_on(priv);
9820
9821	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9822		     mode & IEEE_A ? 'a' : '.',
9823		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9824	mutex_unlock(&priv->mutex);
9825	return 0;
9826}
9827
9828static int ipw_wx_get_wireless_mode(struct net_device *dev,
9829				    struct iw_request_info *info,
9830				    union iwreq_data *wrqu, char *extra)
9831{
9832	struct ipw_priv *priv = libipw_priv(dev);
9833	mutex_lock(&priv->mutex);
9834	switch (priv->ieee->mode) {
9835	case IEEE_A:
9836		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9837		break;
9838	case IEEE_B:
9839		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9840		break;
9841	case IEEE_A | IEEE_B:
9842		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9843		break;
9844	case IEEE_G:
9845		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9846		break;
9847	case IEEE_A | IEEE_G:
9848		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9849		break;
9850	case IEEE_B | IEEE_G:
9851		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9852		break;
9853	case IEEE_A | IEEE_B | IEEE_G:
9854		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9855		break;
9856	default:
9857		strncpy(extra, "unknown", MAX_WX_STRING);
9858		break;
9859	}
9860
9861	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9862
9863	wrqu->data.length = strlen(extra) + 1;
9864	mutex_unlock(&priv->mutex);
9865
9866	return 0;
9867}
9868
9869static int ipw_wx_set_preamble(struct net_device *dev,
9870			       struct iw_request_info *info,
9871			       union iwreq_data *wrqu, char *extra)
9872{
9873	struct ipw_priv *priv = libipw_priv(dev);
9874	int mode = *(int *)extra;
9875	mutex_lock(&priv->mutex);
9876	/* Switching from SHORT -> LONG requires a disassociation */
9877	if (mode == 1) {
9878		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9879			priv->config |= CFG_PREAMBLE_LONG;
9880
9881			/* Network configuration changed -- force [re]association */
9882			IPW_DEBUG_ASSOC
9883			    ("[re]association triggered due to preamble change.\n");
9884			if (!ipw_disassociate(priv))
9885				ipw_associate(priv);
9886		}
9887		goto done;
9888	}
9889
9890	if (mode == 0) {
9891		priv->config &= ~CFG_PREAMBLE_LONG;
9892		goto done;
9893	}
9894	mutex_unlock(&priv->mutex);
9895	return -EINVAL;
9896
9897      done:
9898	mutex_unlock(&priv->mutex);
9899	return 0;
9900}
9901
9902static int ipw_wx_get_preamble(struct net_device *dev,
9903			       struct iw_request_info *info,
9904			       union iwreq_data *wrqu, char *extra)
9905{
9906	struct ipw_priv *priv = libipw_priv(dev);
9907	mutex_lock(&priv->mutex);
9908	if (priv->config & CFG_PREAMBLE_LONG)
9909		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9910	else
9911		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9912	mutex_unlock(&priv->mutex);
9913	return 0;
9914}
9915
9916#ifdef CONFIG_IPW2200_MONITOR
9917static int ipw_wx_set_monitor(struct net_device *dev,
9918			      struct iw_request_info *info,
9919			      union iwreq_data *wrqu, char *extra)
9920{
9921	struct ipw_priv *priv = libipw_priv(dev);
9922	int *parms = (int *)extra;
9923	int enable = (parms[0] > 0);
9924	mutex_lock(&priv->mutex);
9925	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9926	if (enable) {
9927		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9928#ifdef CONFIG_IPW2200_RADIOTAP
9929			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9930#else
9931			priv->net_dev->type = ARPHRD_IEEE80211;
9932#endif
9933			queue_work(priv->workqueue, &priv->adapter_restart);
9934		}
9935
9936		ipw_set_channel(priv, parms[1]);
9937	} else {
9938		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9939			mutex_unlock(&priv->mutex);
9940			return 0;
9941		}
9942		priv->net_dev->type = ARPHRD_ETHER;
9943		queue_work(priv->workqueue, &priv->adapter_restart);
9944	}
9945	mutex_unlock(&priv->mutex);
9946	return 0;
9947}
9948
9949#endif				/* CONFIG_IPW2200_MONITOR */
9950
9951static int ipw_wx_reset(struct net_device *dev,
9952			struct iw_request_info *info,
9953			union iwreq_data *wrqu, char *extra)
9954{
9955	struct ipw_priv *priv = libipw_priv(dev);
9956	IPW_DEBUG_WX("RESET\n");
9957	queue_work(priv->workqueue, &priv->adapter_restart);
9958	return 0;
9959}
9960
9961static int ipw_wx_sw_reset(struct net_device *dev,
9962			   struct iw_request_info *info,
9963			   union iwreq_data *wrqu, char *extra)
9964{
9965	struct ipw_priv *priv = libipw_priv(dev);
9966	union iwreq_data wrqu_sec = {
9967		.encoding = {
9968			     .flags = IW_ENCODE_DISABLED,
9969			     },
9970	};
9971	int ret;
9972
9973	IPW_DEBUG_WX("SW_RESET\n");
9974
9975	mutex_lock(&priv->mutex);
9976
9977	ret = ipw_sw_reset(priv, 2);
9978	if (!ret) {
9979		free_firmware();
9980		ipw_adapter_restart(priv);
9981	}
9982
9983	/* The SW reset bit might have been toggled on by the 'disable'
9984	 * module parameter, so take appropriate action */
9985	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9986
9987	mutex_unlock(&priv->mutex);
9988	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9989	mutex_lock(&priv->mutex);
9990
9991	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9992		/* Configuration likely changed -- force [re]association */
9993		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9994				"reset.\n");
9995		if (!ipw_disassociate(priv))
9996			ipw_associate(priv);
9997	}
9998
9999	mutex_unlock(&priv->mutex);
10000
10001	return 0;
10002}
10003
10004/* Rebase the WE IOCTLs to zero for the handler array */
10005static iw_handler ipw_wx_handlers[] = {
10006	IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
10007	IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
10008	IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10009	IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10010	IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10011	IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10012	IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10013	IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10014	IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10015	IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10016	IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10017	IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10018	IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10019	IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10020	IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10021	IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10022	IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10023	IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10024	IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10025	IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10026	IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10027	IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10028	IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10029	IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10030	IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10031	IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10032	IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10033	IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10034	IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10035	IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10036	IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10037	IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10038	IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10039	IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10040	IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10041	IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10042	IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10043	IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10044	IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10045	IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10046	IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10047};
10048
10049enum {
10050	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
10051	IPW_PRIV_GET_POWER,
10052	IPW_PRIV_SET_MODE,
10053	IPW_PRIV_GET_MODE,
10054	IPW_PRIV_SET_PREAMBLE,
10055	IPW_PRIV_GET_PREAMBLE,
10056	IPW_PRIV_RESET,
10057	IPW_PRIV_SW_RESET,
10058#ifdef CONFIG_IPW2200_MONITOR
10059	IPW_PRIV_SET_MONITOR,
10060#endif
10061};
10062
10063static struct iw_priv_args ipw_priv_args[] = {
10064	{
10065	 .cmd = IPW_PRIV_SET_POWER,
10066	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10067	 .name = "set_power"},
10068	{
10069	 .cmd = IPW_PRIV_GET_POWER,
10070	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10071	 .name = "get_power"},
10072	{
10073	 .cmd = IPW_PRIV_SET_MODE,
10074	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10075	 .name = "set_mode"},
10076	{
10077	 .cmd = IPW_PRIV_GET_MODE,
10078	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10079	 .name = "get_mode"},
10080	{
10081	 .cmd = IPW_PRIV_SET_PREAMBLE,
10082	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10083	 .name = "set_preamble"},
10084	{
10085	 .cmd = IPW_PRIV_GET_PREAMBLE,
10086	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10087	 .name = "get_preamble"},
10088	{
10089	 IPW_PRIV_RESET,
10090	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10091	{
10092	 IPW_PRIV_SW_RESET,
10093	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10094#ifdef CONFIG_IPW2200_MONITOR
10095	{
10096	 IPW_PRIV_SET_MONITOR,
10097	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10098#endif				/* CONFIG_IPW2200_MONITOR */
10099};
10100
10101static iw_handler ipw_priv_handler[] = {
10102	ipw_wx_set_powermode,
10103	ipw_wx_get_powermode,
10104	ipw_wx_set_wireless_mode,
10105	ipw_wx_get_wireless_mode,
10106	ipw_wx_set_preamble,
10107	ipw_wx_get_preamble,
10108	ipw_wx_reset,
10109	ipw_wx_sw_reset,
10110#ifdef CONFIG_IPW2200_MONITOR
10111	ipw_wx_set_monitor,
10112#endif
10113};
10114
10115static struct iw_handler_def ipw_wx_handler_def = {
10116	.standard = ipw_wx_handlers,
10117	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
10118	.num_private = ARRAY_SIZE(ipw_priv_handler),
10119	.num_private_args = ARRAY_SIZE(ipw_priv_args),
10120	.private = ipw_priv_handler,
10121	.private_args = ipw_priv_args,
10122	.get_wireless_stats = ipw_get_wireless_stats,
10123};
10124
10125/*
10126 * Get wireless statistics.
10127 * Called by /proc/net/wireless
10128 * Also called by SIOCGIWSTATS
10129 */
10130static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10131{
10132	struct ipw_priv *priv = libipw_priv(dev);
10133	struct iw_statistics *wstats;
10134
10135	wstats = &priv->wstats;
10136
10137	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10138	 * netdev->get_wireless_stats seems to be called before fw is
10139	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10140	 * and associated; if not associcated, the values are all meaningless
10141	 * anyway, so set them all to NULL and INVALID */
10142	if (!(priv->status & STATUS_ASSOCIATED)) {
10143		wstats->miss.beacon = 0;
10144		wstats->discard.retries = 0;
10145		wstats->qual.qual = 0;
10146		wstats->qual.level = 0;
10147		wstats->qual.noise = 0;
10148		wstats->qual.updated = 7;
10149		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10150		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10151		return wstats;
10152	}
10153
10154	wstats->qual.qual = priv->quality;
10155	wstats->qual.level = priv->exp_avg_rssi;
10156	wstats->qual.noise = priv->exp_avg_noise;
10157	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10158	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10159
10160	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10161	wstats->discard.retries = priv->last_tx_failures;
10162	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10163
10164/*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10165	goto fail_get_ordinal;
10166	wstats->discard.retries += tx_retry; */
10167
10168	return wstats;
10169}
10170
10171/* net device stuff */
10172
10173static  void init_sys_config(struct ipw_sys_config *sys_config)
10174{
10175	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10176	sys_config->bt_coexistence = 0;
10177	sys_config->answer_broadcast_ssid_probe = 0;
10178	sys_config->accept_all_data_frames = 0;
10179	sys_config->accept_non_directed_frames = 1;
10180	sys_config->exclude_unicast_unencrypted = 0;
10181	sys_config->disable_unicast_decryption = 1;
10182	sys_config->exclude_multicast_unencrypted = 0;
10183	sys_config->disable_multicast_decryption = 1;
10184	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10185		antenna = CFG_SYS_ANTENNA_BOTH;
10186	sys_config->antenna_diversity = antenna;
10187	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10188	sys_config->dot11g_auto_detection = 0;
10189	sys_config->enable_cts_to_self = 0;
10190	sys_config->bt_coexist_collision_thr = 0;
10191	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10192	sys_config->silence_threshold = 0x1e;
10193}
10194
10195static int ipw_net_open(struct net_device *dev)
10196{
10197	IPW_DEBUG_INFO("dev->open\n");
10198	netif_start_queue(dev);
10199	return 0;
10200}
10201
10202static int ipw_net_stop(struct net_device *dev)
10203{
10204	IPW_DEBUG_INFO("dev->close\n");
10205	netif_stop_queue(dev);
10206	return 0;
10207}
10208
10209/*
10210todo:
10211
10212modify to send one tfd per fragment instead of using chunking.  otherwise
10213we need to heavily modify the libipw_skb_to_txb.
10214*/
10215
10216static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10217			     int pri)
10218{
10219	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10220	    txb->fragments[0]->data;
10221	int i = 0;
10222	struct tfd_frame *tfd;
10223#ifdef CONFIG_IPW2200_QOS
10224	int tx_id = ipw_get_tx_queue_number(priv, pri);
10225	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10226#else
10227	struct clx2_tx_queue *txq = &priv->txq[0];
10228#endif
10229	struct clx2_queue *q = &txq->q;
10230	u8 id, hdr_len, unicast;
10231	int fc;
10232
10233	if (!(priv->status & STATUS_ASSOCIATED))
10234		goto drop;
10235
10236	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10237	switch (priv->ieee->iw_mode) {
10238	case IW_MODE_ADHOC:
10239		unicast = !is_multicast_ether_addr(hdr->addr1);
10240		id = ipw_find_station(priv, hdr->addr1);
10241		if (id == IPW_INVALID_STATION) {
10242			id = ipw_add_station(priv, hdr->addr1);
10243			if (id == IPW_INVALID_STATION) {
10244				IPW_WARNING("Attempt to send data to "
10245					    "invalid cell: %pM\n",
10246					    hdr->addr1);
10247				goto drop;
10248			}
10249		}
10250		break;
10251
10252	case IW_MODE_INFRA:
10253	default:
10254		unicast = !is_multicast_ether_addr(hdr->addr3);
10255		id = 0;
10256		break;
10257	}
10258
10259	tfd = &txq->bd[q->first_empty];
10260	txq->txb[q->first_empty] = txb;
10261	memset(tfd, 0, sizeof(*tfd));
10262	tfd->u.data.station_number = id;
10263
10264	tfd->control_flags.message_type = TX_FRAME_TYPE;
10265	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10266
10267	tfd->u.data.cmd_id = DINO_CMD_TX;
10268	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10269
10270	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10271		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10272	else
10273		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10274
10275	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10276		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10277
10278	fc = le16_to_cpu(hdr->frame_ctl);
10279	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10280
10281	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10282
10283	if (likely(unicast))
10284		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10285
10286	if (txb->encrypted && !priv->ieee->host_encrypt) {
10287		switch (priv->ieee->sec.level) {
10288		case SEC_LEVEL_3:
10289			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10290			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10291			/* XXX: ACK flag must be set for CCMP even if it
10292			 * is a multicast/broadcast packet, because CCMP
10293			 * group communication encrypted by GTK is
10294			 * actually done by the AP. */
10295			if (!unicast)
10296				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10297
10298			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10299			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10300			tfd->u.data.key_index = 0;
10301			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10302			break;
10303		case SEC_LEVEL_2:
10304			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10305			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10306			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10307			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10308			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10309			break;
10310		case SEC_LEVEL_1:
10311			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10312			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10313			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10314			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10315			    40)
10316				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10317			else
10318				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10319			break;
10320		case SEC_LEVEL_0:
10321			break;
10322		default:
10323			printk(KERN_ERR "Unknown security level %d\n",
10324			       priv->ieee->sec.level);
10325			break;
10326		}
10327	} else
10328		/* No hardware encryption */
10329		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10330
10331#ifdef CONFIG_IPW2200_QOS
10332	if (fc & IEEE80211_STYPE_QOS_DATA)
10333		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10334#endif				/* CONFIG_IPW2200_QOS */
10335
10336	/* payload */
10337	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10338						 txb->nr_frags));
10339	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10340		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10341	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10342		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10343			       i, le32_to_cpu(tfd->u.data.num_chunks),
10344			       txb->fragments[i]->len - hdr_len);
10345		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10346			     i, tfd->u.data.num_chunks,
10347			     txb->fragments[i]->len - hdr_len);
10348		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10349			   txb->fragments[i]->len - hdr_len);
10350
10351		tfd->u.data.chunk_ptr[i] =
10352		    cpu_to_le32(pci_map_single
10353				(priv->pci_dev,
10354				 txb->fragments[i]->data + hdr_len,
10355				 txb->fragments[i]->len - hdr_len,
10356				 PCI_DMA_TODEVICE));
10357		tfd->u.data.chunk_len[i] =
10358		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10359	}
10360
10361	if (i != txb->nr_frags) {
10362		struct sk_buff *skb;
10363		u16 remaining_bytes = 0;
10364		int j;
10365
10366		for (j = i; j < txb->nr_frags; j++)
10367			remaining_bytes += txb->fragments[j]->len - hdr_len;
10368
10369		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10370		       remaining_bytes);
10371		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10372		if (skb != NULL) {
10373			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10374			for (j = i; j < txb->nr_frags; j++) {
10375				int size = txb->fragments[j]->len - hdr_len;
10376
10377				printk(KERN_INFO "Adding frag %d %d...\n",
10378				       j, size);
10379				memcpy(skb_put(skb, size),
10380				       txb->fragments[j]->data + hdr_len, size);
10381			}
10382			dev_kfree_skb_any(txb->fragments[i]);
10383			txb->fragments[i] = skb;
10384			tfd->u.data.chunk_ptr[i] =
10385			    cpu_to_le32(pci_map_single
10386					(priv->pci_dev, skb->data,
10387					 remaining_bytes,
10388					 PCI_DMA_TODEVICE));
10389
10390			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10391		}
10392	}
10393
10394	/* kick DMA */
10395	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10396	ipw_write32(priv, q->reg_w, q->first_empty);
10397
10398	if (ipw_tx_queue_space(q) < q->high_mark)
10399		netif_stop_queue(priv->net_dev);
10400
10401	return NETDEV_TX_OK;
10402
10403      drop:
10404	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10405	libipw_txb_free(txb);
10406	return NETDEV_TX_OK;
10407}
10408
10409static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10410{
10411	struct ipw_priv *priv = libipw_priv(dev);
10412#ifdef CONFIG_IPW2200_QOS
10413	int tx_id = ipw_get_tx_queue_number(priv, pri);
10414	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10415#else
10416	struct clx2_tx_queue *txq = &priv->txq[0];
10417#endif				/* CONFIG_IPW2200_QOS */
10418
10419	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10420		return 1;
10421
10422	return 0;
10423}
10424
10425#ifdef CONFIG_IPW2200_PROMISCUOUS
10426static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10427				      struct libipw_txb *txb)
10428{
10429	struct libipw_rx_stats dummystats;
10430	struct ieee80211_hdr *hdr;
10431	u8 n;
10432	u16 filter = priv->prom_priv->filter;
10433	int hdr_only = 0;
10434
10435	if (filter & IPW_PROM_NO_TX)
10436		return;
10437
10438	memset(&dummystats, 0, sizeof(dummystats));
10439
10440	/* Filtering of fragment chains is done agains the first fragment */
10441	hdr = (void *)txb->fragments[0]->data;
10442	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10443		if (filter & IPW_PROM_NO_MGMT)
10444			return;
10445		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10446			hdr_only = 1;
10447	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10448		if (filter & IPW_PROM_NO_CTL)
10449			return;
10450		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10451			hdr_only = 1;
10452	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10453		if (filter & IPW_PROM_NO_DATA)
10454			return;
10455		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10456			hdr_only = 1;
10457	}
10458
10459	for(n=0; n<txb->nr_frags; ++n) {
10460		struct sk_buff *src = txb->fragments[n];
10461		struct sk_buff *dst;
10462		struct ieee80211_radiotap_header *rt_hdr;
10463		int len;
10464
10465		if (hdr_only) {
10466			hdr = (void *)src->data;
10467			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10468		} else
10469			len = src->len;
10470
10471		dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10472		if (!dst)
10473			continue;
10474
10475		rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10476
10477		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10478		rt_hdr->it_pad = 0;
10479		rt_hdr->it_present = 0; /* after all, it's just an idea */
10480		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10481
10482		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10483			ieee80211chan2mhz(priv->channel));
10484		if (priv->channel > 14) 	/* 802.11a */
10485			*(__le16*)skb_put(dst, sizeof(u16)) =
10486				cpu_to_le16(IEEE80211_CHAN_OFDM |
10487					     IEEE80211_CHAN_5GHZ);
10488		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10489			*(__le16*)skb_put(dst, sizeof(u16)) =
10490				cpu_to_le16(IEEE80211_CHAN_CCK |
10491					     IEEE80211_CHAN_2GHZ);
10492		else 		/* 802.11g */
10493			*(__le16*)skb_put(dst, sizeof(u16)) =
10494				cpu_to_le16(IEEE80211_CHAN_OFDM |
10495				 IEEE80211_CHAN_2GHZ);
10496
10497		rt_hdr->it_len = cpu_to_le16(dst->len);
10498
10499		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10500
10501		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10502			dev_kfree_skb_any(dst);
10503	}
10504}
10505#endif
10506
10507static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10508					   struct net_device *dev, int pri)
10509{
10510	struct ipw_priv *priv = libipw_priv(dev);
10511	unsigned long flags;
10512	netdev_tx_t ret;
10513
10514	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10515	spin_lock_irqsave(&priv->lock, flags);
10516
10517#ifdef CONFIG_IPW2200_PROMISCUOUS
10518	if (rtap_iface && netif_running(priv->prom_net_dev))
10519		ipw_handle_promiscuous_tx(priv, txb);
10520#endif
10521
10522	ret = ipw_tx_skb(priv, txb, pri);
10523	if (ret == NETDEV_TX_OK)
10524		__ipw_led_activity_on(priv);
10525	spin_unlock_irqrestore(&priv->lock, flags);
10526
10527	return ret;
10528}
10529
10530static void ipw_net_set_multicast_list(struct net_device *dev)
10531{
10532
10533}
10534
10535static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10536{
10537	struct ipw_priv *priv = libipw_priv(dev);
10538	struct sockaddr *addr = p;
10539
10540	if (!is_valid_ether_addr(addr->sa_data))
10541		return -EADDRNOTAVAIL;
10542	mutex_lock(&priv->mutex);
10543	priv->config |= CFG_CUSTOM_MAC;
10544	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10545	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10546	       priv->net_dev->name, priv->mac_addr);
10547	queue_work(priv->workqueue, &priv->adapter_restart);
10548	mutex_unlock(&priv->mutex);
10549	return 0;
10550}
10551
10552static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10553				    struct ethtool_drvinfo *info)
10554{
10555	struct ipw_priv *p = libipw_priv(dev);
10556	char vers[64];
10557	char date[32];
10558	u32 len;
10559
10560	strcpy(info->driver, DRV_NAME);
10561	strcpy(info->version, DRV_VERSION);
10562
10563	len = sizeof(vers);
10564	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10565	len = sizeof(date);
10566	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10567
10568	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10569		 vers, date);
10570	strcpy(info->bus_info, pci_name(p->pci_dev));
10571	info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10572}
10573
10574static u32 ipw_ethtool_get_link(struct net_device *dev)
10575{
10576	struct ipw_priv *priv = libipw_priv(dev);
10577	return (priv->status & STATUS_ASSOCIATED) != 0;
10578}
10579
10580static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10581{
10582	return IPW_EEPROM_IMAGE_SIZE;
10583}
10584
10585static int ipw_ethtool_get_eeprom(struct net_device *dev,
10586				  struct ethtool_eeprom *eeprom, u8 * bytes)
10587{
10588	struct ipw_priv *p = libipw_priv(dev);
10589
10590	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10591		return -EINVAL;
10592	mutex_lock(&p->mutex);
10593	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10594	mutex_unlock(&p->mutex);
10595	return 0;
10596}
10597
10598static int ipw_ethtool_set_eeprom(struct net_device *dev,
10599				  struct ethtool_eeprom *eeprom, u8 * bytes)
10600{
10601	struct ipw_priv *p = libipw_priv(dev);
10602	int i;
10603
10604	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10605		return -EINVAL;
10606	mutex_lock(&p->mutex);
10607	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10608	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10609		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10610	mutex_unlock(&p->mutex);
10611	return 0;
10612}
10613
10614static const struct ethtool_ops ipw_ethtool_ops = {
10615	.get_link = ipw_ethtool_get_link,
10616	.get_drvinfo = ipw_ethtool_get_drvinfo,
10617	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10618	.get_eeprom = ipw_ethtool_get_eeprom,
10619	.set_eeprom = ipw_ethtool_set_eeprom,
10620};
10621
10622static irqreturn_t ipw_isr(int irq, void *data)
10623{
10624	struct ipw_priv *priv = data;
10625	u32 inta, inta_mask;
10626
10627	if (!priv)
10628		return IRQ_NONE;
10629
10630	spin_lock(&priv->irq_lock);
10631
10632	if (!(priv->status & STATUS_INT_ENABLED)) {
10633		/* IRQ is disabled */
10634		goto none;
10635	}
10636
10637	inta = ipw_read32(priv, IPW_INTA_RW);
10638	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10639
10640	if (inta == 0xFFFFFFFF) {
10641		/* Hardware disappeared */
10642		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10643		goto none;
10644	}
10645
10646	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10647		/* Shared interrupt */
10648		goto none;
10649	}
10650
10651	/* tell the device to stop sending interrupts */
10652	__ipw_disable_interrupts(priv);
10653
10654	/* ack current interrupts */
10655	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10656	ipw_write32(priv, IPW_INTA_RW, inta);
10657
10658	/* Cache INTA value for our tasklet */
10659	priv->isr_inta = inta;
10660
10661	tasklet_schedule(&priv->irq_tasklet);
10662
10663	spin_unlock(&priv->irq_lock);
10664
10665	return IRQ_HANDLED;
10666      none:
10667	spin_unlock(&priv->irq_lock);
10668	return IRQ_NONE;
10669}
10670
10671static void ipw_rf_kill(void *adapter)
10672{
10673	struct ipw_priv *priv = adapter;
10674	unsigned long flags;
10675
10676	spin_lock_irqsave(&priv->lock, flags);
10677
10678	if (rf_kill_active(priv)) {
10679		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10680		if (priv->workqueue)
10681			queue_delayed_work(priv->workqueue,
10682					   &priv->rf_kill, 2 * HZ);
10683		goto exit_unlock;
10684	}
10685
10686	/* RF Kill is now disabled, so bring the device back up */
10687
10688	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10689		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10690				  "device\n");
10691
10692		/* we can not do an adapter restart while inside an irq lock */
10693		queue_work(priv->workqueue, &priv->adapter_restart);
10694	} else
10695		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10696				  "enabled\n");
10697
10698      exit_unlock:
10699	spin_unlock_irqrestore(&priv->lock, flags);
10700}
10701
10702static void ipw_bg_rf_kill(struct work_struct *work)
10703{
10704	struct ipw_priv *priv =
10705		container_of(work, struct ipw_priv, rf_kill.work);
10706	mutex_lock(&priv->mutex);
10707	ipw_rf_kill(priv);
10708	mutex_unlock(&priv->mutex);
10709}
10710
10711static void ipw_link_up(struct ipw_priv *priv)
10712{
10713	priv->last_seq_num = -1;
10714	priv->last_frag_num = -1;
10715	priv->last_packet_time = 0;
10716
10717	netif_carrier_on(priv->net_dev);
10718
10719	cancel_delayed_work(&priv->request_scan);
10720	cancel_delayed_work(&priv->request_direct_scan);
10721	cancel_delayed_work(&priv->request_passive_scan);
10722	cancel_delayed_work(&priv->scan_event);
10723	ipw_reset_stats(priv);
10724	/* Ensure the rate is updated immediately */
10725	priv->last_rate = ipw_get_current_rate(priv);
10726	ipw_gather_stats(priv);
10727	ipw_led_link_up(priv);
10728	notify_wx_assoc_event(priv);
10729
10730	if (priv->config & CFG_BACKGROUND_SCAN)
10731		queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10732}
10733
10734static void ipw_bg_link_up(struct work_struct *work)
10735{
10736	struct ipw_priv *priv =
10737		container_of(work, struct ipw_priv, link_up);
10738	mutex_lock(&priv->mutex);
10739	ipw_link_up(priv);
10740	mutex_unlock(&priv->mutex);
10741}
10742
10743static void ipw_link_down(struct ipw_priv *priv)
10744{
10745	ipw_led_link_down(priv);
10746	netif_carrier_off(priv->net_dev);
10747	notify_wx_assoc_event(priv);
10748
10749	/* Cancel any queued work ... */
10750	cancel_delayed_work(&priv->request_scan);
10751	cancel_delayed_work(&priv->request_direct_scan);
10752	cancel_delayed_work(&priv->request_passive_scan);
10753	cancel_delayed_work(&priv->adhoc_check);
10754	cancel_delayed_work(&priv->gather_stats);
10755
10756	ipw_reset_stats(priv);
10757
10758	if (!(priv->status & STATUS_EXIT_PENDING)) {
10759		/* Queue up another scan... */
10760		queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10761	} else
10762		cancel_delayed_work(&priv->scan_event);
10763}
10764
10765static void ipw_bg_link_down(struct work_struct *work)
10766{
10767	struct ipw_priv *priv =
10768		container_of(work, struct ipw_priv, link_down);
10769	mutex_lock(&priv->mutex);
10770	ipw_link_down(priv);
10771	mutex_unlock(&priv->mutex);
10772}
10773
10774static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10775{
10776	int ret = 0;
10777
10778	priv->workqueue = create_workqueue(DRV_NAME);
10779	init_waitqueue_head(&priv->wait_command_queue);
10780	init_waitqueue_head(&priv->wait_state);
10781
10782	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10783	INIT_WORK(&priv->associate, ipw_bg_associate);
10784	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10785	INIT_WORK(&priv->system_config, ipw_system_config);
10786	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10787	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10788	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10789	INIT_WORK(&priv->up, ipw_bg_up);
10790	INIT_WORK(&priv->down, ipw_bg_down);
10791	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10792	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10793	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10794	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10795	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10796	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10797	INIT_WORK(&priv->roam, ipw_bg_roam);
10798	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10799	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10800	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10801	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10802	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10803	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10804	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10805
10806#ifdef CONFIG_IPW2200_QOS
10807	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10808#endif				/* CONFIG_IPW2200_QOS */
10809
10810	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10811		     ipw_irq_tasklet, (unsigned long)priv);
10812
10813	return ret;
10814}
10815
10816static void shim__set_security(struct net_device *dev,
10817			       struct libipw_security *sec)
10818{
10819	struct ipw_priv *priv = libipw_priv(dev);
10820	int i;
10821	for (i = 0; i < 4; i++) {
10822		if (sec->flags & (1 << i)) {
10823			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10824			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10825			if (sec->key_sizes[i] == 0)
10826				priv->ieee->sec.flags &= ~(1 << i);
10827			else {
10828				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10829				       sec->key_sizes[i]);
10830				priv->ieee->sec.flags |= (1 << i);
10831			}
10832			priv->status |= STATUS_SECURITY_UPDATED;
10833		} else if (sec->level != SEC_LEVEL_1)
10834			priv->ieee->sec.flags &= ~(1 << i);
10835	}
10836
10837	if (sec->flags & SEC_ACTIVE_KEY) {
10838		if (sec->active_key <= 3) {
10839			priv->ieee->sec.active_key = sec->active_key;
10840			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10841		} else
10842			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10843		priv->status |= STATUS_SECURITY_UPDATED;
10844	} else
10845		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10846
10847	if ((sec->flags & SEC_AUTH_MODE) &&
10848	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10849		priv->ieee->sec.auth_mode = sec->auth_mode;
10850		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10851		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10852			priv->capability |= CAP_SHARED_KEY;
10853		else
10854			priv->capability &= ~CAP_SHARED_KEY;
10855		priv->status |= STATUS_SECURITY_UPDATED;
10856	}
10857
10858	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10859		priv->ieee->sec.flags |= SEC_ENABLED;
10860		priv->ieee->sec.enabled = sec->enabled;
10861		priv->status |= STATUS_SECURITY_UPDATED;
10862		if (sec->enabled)
10863			priv->capability |= CAP_PRIVACY_ON;
10864		else
10865			priv->capability &= ~CAP_PRIVACY_ON;
10866	}
10867
10868	if (sec->flags & SEC_ENCRYPT)
10869		priv->ieee->sec.encrypt = sec->encrypt;
10870
10871	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10872		priv->ieee->sec.level = sec->level;
10873		priv->ieee->sec.flags |= SEC_LEVEL;
10874		priv->status |= STATUS_SECURITY_UPDATED;
10875	}
10876
10877	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10878		ipw_set_hwcrypto_keys(priv);
10879
10880	/* To match current functionality of ipw2100 (which works well w/
10881	 * various supplicants, we don't force a disassociate if the
10882	 * privacy capability changes ... */
10883#if 0
10884	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10885	    (((priv->assoc_request.capability &
10886	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10887	     (!(priv->assoc_request.capability &
10888		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10889		IPW_DEBUG_ASSOC("Disassociating due to capability "
10890				"change.\n");
10891		ipw_disassociate(priv);
10892	}
10893#endif
10894}
10895
10896static int init_supported_rates(struct ipw_priv *priv,
10897				struct ipw_supported_rates *rates)
10898{
10899	/* TODO: Mask out rates based on priv->rates_mask */
10900
10901	memset(rates, 0, sizeof(*rates));
10902	/* configure supported rates */
10903	switch (priv->ieee->freq_band) {
10904	case LIBIPW_52GHZ_BAND:
10905		rates->ieee_mode = IPW_A_MODE;
10906		rates->purpose = IPW_RATE_CAPABILITIES;
10907		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10908					LIBIPW_OFDM_DEFAULT_RATES_MASK);
10909		break;
10910
10911	default:		/* Mixed or 2.4Ghz */
10912		rates->ieee_mode = IPW_G_MODE;
10913		rates->purpose = IPW_RATE_CAPABILITIES;
10914		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10915				       LIBIPW_CCK_DEFAULT_RATES_MASK);
10916		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10917			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10918						LIBIPW_OFDM_DEFAULT_RATES_MASK);
10919		}
10920		break;
10921	}
10922
10923	return 0;
10924}
10925
10926static int ipw_config(struct ipw_priv *priv)
10927{
10928	/* This is only called from ipw_up, which resets/reloads the firmware
10929	   so, we don't need to first disable the card before we configure
10930	   it */
10931	if (ipw_set_tx_power(priv))
10932		goto error;
10933
10934	/* initialize adapter address */
10935	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10936		goto error;
10937
10938	/* set basic system config settings */
10939	init_sys_config(&priv->sys_config);
10940
10941	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10942	 * Does not support BT priority yet (don't abort or defer our Tx) */
10943	if (bt_coexist) {
10944		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10945
10946		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10947			priv->sys_config.bt_coexistence
10948			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10949		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10950			priv->sys_config.bt_coexistence
10951			    |= CFG_BT_COEXISTENCE_OOB;
10952	}
10953
10954#ifdef CONFIG_IPW2200_PROMISCUOUS
10955	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10956		priv->sys_config.accept_all_data_frames = 1;
10957		priv->sys_config.accept_non_directed_frames = 1;
10958		priv->sys_config.accept_all_mgmt_bcpr = 1;
10959		priv->sys_config.accept_all_mgmt_frames = 1;
10960	}
10961#endif
10962
10963	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10964		priv->sys_config.answer_broadcast_ssid_probe = 1;
10965	else
10966		priv->sys_config.answer_broadcast_ssid_probe = 0;
10967
10968	if (ipw_send_system_config(priv))
10969		goto error;
10970
10971	init_supported_rates(priv, &priv->rates);
10972	if (ipw_send_supported_rates(priv, &priv->rates))
10973		goto error;
10974
10975	/* Set request-to-send threshold */
10976	if (priv->rts_threshold) {
10977		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10978			goto error;
10979	}
10980#ifdef CONFIG_IPW2200_QOS
10981	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10982	ipw_qos_activate(priv, NULL);
10983#endif				/* CONFIG_IPW2200_QOS */
10984
10985	if (ipw_set_random_seed(priv))
10986		goto error;
10987
10988	/* final state transition to the RUN state */
10989	if (ipw_send_host_complete(priv))
10990		goto error;
10991
10992	priv->status |= STATUS_INIT;
10993
10994	ipw_led_init(priv);
10995	ipw_led_radio_on(priv);
10996	priv->notif_missed_beacons = 0;
10997
10998	/* Set hardware WEP key if it is configured. */
10999	if ((priv->capability & CAP_PRIVACY_ON) &&
11000	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
11001	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
11002		ipw_set_hwcrypto_keys(priv);
11003
11004	return 0;
11005
11006      error:
11007	return -EIO;
11008}
11009
11010/*
11011 * NOTE:
11012 *
11013 * These tables have been tested in conjunction with the
11014 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
11015 *
11016 * Altering this values, using it on other hardware, or in geographies
11017 * not intended for resale of the above mentioned Intel adapters has
11018 * not been tested.
11019 *
11020 * Remember to update the table in README.ipw2200 when changing this
11021 * table.
11022 *
11023 */
11024static const struct libipw_geo ipw_geos[] = {
11025	{			/* Restricted */
11026	 "---",
11027	 .bg_channels = 11,
11028	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11029		{2427, 4}, {2432, 5}, {2437, 6},
11030		{2442, 7}, {2447, 8}, {2452, 9},
11031		{2457, 10}, {2462, 11}},
11032	 },
11033
11034	{			/* Custom US/Canada */
11035	 "ZZF",
11036	 .bg_channels = 11,
11037	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11038		{2427, 4}, {2432, 5}, {2437, 6},
11039		{2442, 7}, {2447, 8}, {2452, 9},
11040		{2457, 10}, {2462, 11}},
11041	 .a_channels = 8,
11042	 .a = {{5180, 36},
11043	       {5200, 40},
11044	       {5220, 44},
11045	       {5240, 48},
11046	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11047	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11048	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11049	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
11050	 },
11051
11052	{			/* Rest of World */
11053	 "ZZD",
11054	 .bg_channels = 13,
11055	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11056		{2427, 4}, {2432, 5}, {2437, 6},
11057		{2442, 7}, {2447, 8}, {2452, 9},
11058		{2457, 10}, {2462, 11}, {2467, 12},
11059		{2472, 13}},
11060	 },
11061
11062	{			/* Custom USA & Europe & High */
11063	 "ZZA",
11064	 .bg_channels = 11,
11065	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11066		{2427, 4}, {2432, 5}, {2437, 6},
11067		{2442, 7}, {2447, 8}, {2452, 9},
11068		{2457, 10}, {2462, 11}},
11069	 .a_channels = 13,
11070	 .a = {{5180, 36},
11071	       {5200, 40},
11072	       {5220, 44},
11073	       {5240, 48},
11074	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11075	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11076	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11077	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11078	       {5745, 149},
11079	       {5765, 153},
11080	       {5785, 157},
11081	       {5805, 161},
11082	       {5825, 165}},
11083	 },
11084
11085	{			/* Custom NA & Europe */
11086	 "ZZB",
11087	 .bg_channels = 11,
11088	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11089		{2427, 4}, {2432, 5}, {2437, 6},
11090		{2442, 7}, {2447, 8}, {2452, 9},
11091		{2457, 10}, {2462, 11}},
11092	 .a_channels = 13,
11093	 .a = {{5180, 36},
11094	       {5200, 40},
11095	       {5220, 44},
11096	       {5240, 48},
11097	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11098	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11099	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11100	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11101	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11102	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11103	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11104	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11105	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11106	 },
11107
11108	{			/* Custom Japan */
11109	 "ZZC",
11110	 .bg_channels = 11,
11111	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11112		{2427, 4}, {2432, 5}, {2437, 6},
11113		{2442, 7}, {2447, 8}, {2452, 9},
11114		{2457, 10}, {2462, 11}},
11115	 .a_channels = 4,
11116	 .a = {{5170, 34}, {5190, 38},
11117	       {5210, 42}, {5230, 46}},
11118	 },
11119
11120	{			/* Custom */
11121	 "ZZM",
11122	 .bg_channels = 11,
11123	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11124		{2427, 4}, {2432, 5}, {2437, 6},
11125		{2442, 7}, {2447, 8}, {2452, 9},
11126		{2457, 10}, {2462, 11}},
11127	 },
11128
11129	{			/* Europe */
11130	 "ZZE",
11131	 .bg_channels = 13,
11132	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11133		{2427, 4}, {2432, 5}, {2437, 6},
11134		{2442, 7}, {2447, 8}, {2452, 9},
11135		{2457, 10}, {2462, 11}, {2467, 12},
11136		{2472, 13}},
11137	 .a_channels = 19,
11138	 .a = {{5180, 36},
11139	       {5200, 40},
11140	       {5220, 44},
11141	       {5240, 48},
11142	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11143	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11144	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11145	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11146	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11147	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11148	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11149	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11150	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11151	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11152	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11153	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11154	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11155	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11156	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11157	 },
11158
11159	{			/* Custom Japan */
11160	 "ZZJ",
11161	 .bg_channels = 14,
11162	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11163		{2427, 4}, {2432, 5}, {2437, 6},
11164		{2442, 7}, {2447, 8}, {2452, 9},
11165		{2457, 10}, {2462, 11}, {2467, 12},
11166		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11167	 .a_channels = 4,
11168	 .a = {{5170, 34}, {5190, 38},
11169	       {5210, 42}, {5230, 46}},
11170	 },
11171
11172	{			/* Rest of World */
11173	 "ZZR",
11174	 .bg_channels = 14,
11175	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11176		{2427, 4}, {2432, 5}, {2437, 6},
11177		{2442, 7}, {2447, 8}, {2452, 9},
11178		{2457, 10}, {2462, 11}, {2467, 12},
11179		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11180			     LIBIPW_CH_PASSIVE_ONLY}},
11181	 },
11182
11183	{			/* High Band */
11184	 "ZZH",
11185	 .bg_channels = 13,
11186	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11187		{2427, 4}, {2432, 5}, {2437, 6},
11188		{2442, 7}, {2447, 8}, {2452, 9},
11189		{2457, 10}, {2462, 11},
11190		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11191		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11192	 .a_channels = 4,
11193	 .a = {{5745, 149}, {5765, 153},
11194	       {5785, 157}, {5805, 161}},
11195	 },
11196
11197	{			/* Custom Europe */
11198	 "ZZG",
11199	 .bg_channels = 13,
11200	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11201		{2427, 4}, {2432, 5}, {2437, 6},
11202		{2442, 7}, {2447, 8}, {2452, 9},
11203		{2457, 10}, {2462, 11},
11204		{2467, 12}, {2472, 13}},
11205	 .a_channels = 4,
11206	 .a = {{5180, 36}, {5200, 40},
11207	       {5220, 44}, {5240, 48}},
11208	 },
11209
11210	{			/* Europe */
11211	 "ZZK",
11212	 .bg_channels = 13,
11213	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11214		{2427, 4}, {2432, 5}, {2437, 6},
11215		{2442, 7}, {2447, 8}, {2452, 9},
11216		{2457, 10}, {2462, 11},
11217		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11218		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11219	 .a_channels = 24,
11220	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11221	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11222	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11223	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11224	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11225	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11226	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11227	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11228	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11229	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11230	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11231	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11232	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11233	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11234	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11235	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11236	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11237	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11238	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11239	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11240	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11241	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11242	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11243	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11244	 },
11245
11246	{			/* Europe */
11247	 "ZZL",
11248	 .bg_channels = 11,
11249	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11250		{2427, 4}, {2432, 5}, {2437, 6},
11251		{2442, 7}, {2447, 8}, {2452, 9},
11252		{2457, 10}, {2462, 11}},
11253	 .a_channels = 13,
11254	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11255	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11256	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11257	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11258	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11259	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11260	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11261	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11262	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11263	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11264	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11265	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11266	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11267	 }
11268};
11269
11270#define MAX_HW_RESTARTS 5
11271static int ipw_up(struct ipw_priv *priv)
11272{
11273	int rc, i, j;
11274
11275	/* Age scan list entries found before suspend */
11276	if (priv->suspend_time) {
11277		libipw_networks_age(priv->ieee, priv->suspend_time);
11278		priv->suspend_time = 0;
11279	}
11280
11281	if (priv->status & STATUS_EXIT_PENDING)
11282		return -EIO;
11283
11284	if (cmdlog && !priv->cmdlog) {
11285		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11286				       GFP_KERNEL);
11287		if (priv->cmdlog == NULL) {
11288			IPW_ERROR("Error allocating %d command log entries.\n",
11289				  cmdlog);
11290			return -ENOMEM;
11291		} else {
11292			priv->cmdlog_len = cmdlog;
11293		}
11294	}
11295
11296	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11297		/* Load the microcode, firmware, and eeprom.
11298		 * Also start the clocks. */
11299		rc = ipw_load(priv);
11300		if (rc) {
11301			IPW_ERROR("Unable to load firmware: %d\n", rc);
11302			return rc;
11303		}
11304
11305		ipw_init_ordinals(priv);
11306		if (!(priv->config & CFG_CUSTOM_MAC))
11307			eeprom_parse_mac(priv, priv->mac_addr);
11308		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11309		memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11310
11311		for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11312			if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11313				    ipw_geos[j].name, 3))
11314				break;
11315		}
11316		if (j == ARRAY_SIZE(ipw_geos)) {
11317			IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11318				    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11319				    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11320				    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11321			j = 0;
11322		}
11323		if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11324			IPW_WARNING("Could not set geography.");
11325			return 0;
11326		}
11327
11328		if (priv->status & STATUS_RF_KILL_SW) {
11329			IPW_WARNING("Radio disabled by module parameter.\n");
11330			return 0;
11331		} else if (rf_kill_active(priv)) {
11332			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11333				    "Kill switch must be turned off for "
11334				    "wireless networking to work.\n");
11335			queue_delayed_work(priv->workqueue, &priv->rf_kill,
11336					   2 * HZ);
11337			return 0;
11338		}
11339
11340		rc = ipw_config(priv);
11341		if (!rc) {
11342			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11343
11344			/* If configure to try and auto-associate, kick
11345			 * off a scan. */
11346			queue_delayed_work(priv->workqueue,
11347					   &priv->request_scan, 0);
11348
11349			return 0;
11350		}
11351
11352		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11353		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11354			       i, MAX_HW_RESTARTS);
11355
11356		/* We had an error bringing up the hardware, so take it
11357		 * all the way back down so we can try again */
11358		ipw_down(priv);
11359	}
11360
11361	/* tried to restart and config the device for as long as our
11362	 * patience could withstand */
11363	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11364
11365	return -EIO;
11366}
11367
11368static void ipw_bg_up(struct work_struct *work)
11369{
11370	struct ipw_priv *priv =
11371		container_of(work, struct ipw_priv, up);
11372	mutex_lock(&priv->mutex);
11373	ipw_up(priv);
11374	mutex_unlock(&priv->mutex);
11375}
11376
11377static void ipw_deinit(struct ipw_priv *priv)
11378{
11379	int i;
11380
11381	if (priv->status & STATUS_SCANNING) {
11382		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11383		ipw_abort_scan(priv);
11384	}
11385
11386	if (priv->status & STATUS_ASSOCIATED) {
11387		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11388		ipw_disassociate(priv);
11389	}
11390
11391	ipw_led_shutdown(priv);
11392
11393	/* Wait up to 1s for status to change to not scanning and not
11394	 * associated (disassociation can take a while for a ful 802.11
11395	 * exchange */
11396	for (i = 1000; i && (priv->status &
11397			     (STATUS_DISASSOCIATING |
11398			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11399		udelay(10);
11400
11401	if (priv->status & (STATUS_DISASSOCIATING |
11402			    STATUS_ASSOCIATED | STATUS_SCANNING))
11403		IPW_DEBUG_INFO("Still associated or scanning...\n");
11404	else
11405		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11406
11407	/* Attempt to disable the card */
11408	ipw_send_card_disable(priv, 0);
11409
11410	priv->status &= ~STATUS_INIT;
11411}
11412
11413static void ipw_down(struct ipw_priv *priv)
11414{
11415	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11416
11417	priv->status |= STATUS_EXIT_PENDING;
11418
11419	if (ipw_is_init(priv))
11420		ipw_deinit(priv);
11421
11422	/* Wipe out the EXIT_PENDING status bit if we are not actually
11423	 * exiting the module */
11424	if (!exit_pending)
11425		priv->status &= ~STATUS_EXIT_PENDING;
11426
11427	/* tell the device to stop sending interrupts */
11428	ipw_disable_interrupts(priv);
11429
11430	/* Clear all bits but the RF Kill */
11431	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11432	netif_carrier_off(priv->net_dev);
11433
11434	ipw_stop_nic(priv);
11435
11436	ipw_led_radio_off(priv);
11437}
11438
11439static void ipw_bg_down(struct work_struct *work)
11440{
11441	struct ipw_priv *priv =
11442		container_of(work, struct ipw_priv, down);
11443	mutex_lock(&priv->mutex);
11444	ipw_down(priv);
11445	mutex_unlock(&priv->mutex);
11446}
11447
11448/* Called by register_netdev() */
11449static int ipw_net_init(struct net_device *dev)
11450{
11451	int i, rc = 0;
11452	struct ipw_priv *priv = libipw_priv(dev);
11453	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11454	struct wireless_dev *wdev = &priv->ieee->wdev;
11455	mutex_lock(&priv->mutex);
11456
11457	if (ipw_up(priv)) {
11458		rc = -EIO;
11459		goto out;
11460	}
11461
11462	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11463
11464	/* fill-out priv->ieee->bg_band */
11465	if (geo->bg_channels) {
11466		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11467
11468		bg_band->band = IEEE80211_BAND_2GHZ;
11469		bg_band->n_channels = geo->bg_channels;
11470		bg_band->channels = kcalloc(geo->bg_channels,
11471					    sizeof(struct ieee80211_channel),
11472					    GFP_KERNEL);
11473		/* translate geo->bg to bg_band.channels */
11474		for (i = 0; i < geo->bg_channels; i++) {
11475			bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11476			bg_band->channels[i].center_freq = geo->bg[i].freq;
11477			bg_band->channels[i].hw_value = geo->bg[i].channel;
11478			bg_band->channels[i].max_power = geo->bg[i].max_power;
11479			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11480				bg_band->channels[i].flags |=
11481					IEEE80211_CHAN_PASSIVE_SCAN;
11482			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11483				bg_band->channels[i].flags |=
11484					IEEE80211_CHAN_NO_IBSS;
11485			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11486				bg_band->channels[i].flags |=
11487					IEEE80211_CHAN_RADAR;
11488			/* No equivalent for LIBIPW_CH_80211H_RULES,
11489			   LIBIPW_CH_UNIFORM_SPREADING, or
11490			   LIBIPW_CH_B_ONLY... */
11491		}
11492		/* point at bitrate info */
11493		bg_band->bitrates = ipw2200_bg_rates;
11494		bg_band->n_bitrates = ipw2200_num_bg_rates;
11495
11496		wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11497	}
11498
11499	/* fill-out priv->ieee->a_band */
11500	if (geo->a_channels) {
11501		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11502
11503		a_band->band = IEEE80211_BAND_5GHZ;
11504		a_band->n_channels = geo->a_channels;
11505		a_band->channels = kcalloc(geo->a_channels,
11506					   sizeof(struct ieee80211_channel),
11507					   GFP_KERNEL);
11508		/* translate geo->bg to a_band.channels */
11509		for (i = 0; i < geo->a_channels; i++) {
11510			a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11511			a_band->channels[i].center_freq = geo->a[i].freq;
11512			a_band->channels[i].hw_value = geo->a[i].channel;
11513			a_band->channels[i].max_power = geo->a[i].max_power;
11514			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11515				a_band->channels[i].flags |=
11516					IEEE80211_CHAN_PASSIVE_SCAN;
11517			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11518				a_band->channels[i].flags |=
11519					IEEE80211_CHAN_NO_IBSS;
11520			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11521				a_band->channels[i].flags |=
11522					IEEE80211_CHAN_RADAR;
11523			/* No equivalent for LIBIPW_CH_80211H_RULES,
11524			   LIBIPW_CH_UNIFORM_SPREADING, or
11525			   LIBIPW_CH_B_ONLY... */
11526		}
11527		/* point at bitrate info */
11528		a_band->bitrates = ipw2200_a_rates;
11529		a_band->n_bitrates = ipw2200_num_a_rates;
11530
11531		wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11532	}
11533
11534	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11535
11536	/* With that information in place, we can now register the wiphy... */
11537	if (wiphy_register(wdev->wiphy)) {
11538		rc = -EIO;
11539		goto out;
11540	}
11541
11542out:
11543	mutex_unlock(&priv->mutex);
11544	return rc;
11545}
11546
11547/* PCI driver stuff */
11548static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
11549	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11550	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11551	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11552	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11553	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11554	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11555	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11556	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11557	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11558	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11559	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11560	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11561	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11562	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11563	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11564	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11565	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11566	{PCI_VDEVICE(INTEL, 0x104f), 0},
11567	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
11568	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
11569	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
11570	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
11571
11572	/* required last entry */
11573	{0,}
11574};
11575
11576MODULE_DEVICE_TABLE(pci, card_ids);
11577
11578static struct attribute *ipw_sysfs_entries[] = {
11579	&dev_attr_rf_kill.attr,
11580	&dev_attr_direct_dword.attr,
11581	&dev_attr_indirect_byte.attr,
11582	&dev_attr_indirect_dword.attr,
11583	&dev_attr_mem_gpio_reg.attr,
11584	&dev_attr_command_event_reg.attr,
11585	&dev_attr_nic_type.attr,
11586	&dev_attr_status.attr,
11587	&dev_attr_cfg.attr,
11588	&dev_attr_error.attr,
11589	&dev_attr_event_log.attr,
11590	&dev_attr_cmd_log.attr,
11591	&dev_attr_eeprom_delay.attr,
11592	&dev_attr_ucode_version.attr,
11593	&dev_attr_rtc.attr,
11594	&dev_attr_scan_age.attr,
11595	&dev_attr_led.attr,
11596	&dev_attr_speed_scan.attr,
11597	&dev_attr_net_stats.attr,
11598	&dev_attr_channels.attr,
11599#ifdef CONFIG_IPW2200_PROMISCUOUS
11600	&dev_attr_rtap_iface.attr,
11601	&dev_attr_rtap_filter.attr,
11602#endif
11603	NULL
11604};
11605
11606static struct attribute_group ipw_attribute_group = {
11607	.name = NULL,		/* put in device directory */
11608	.attrs = ipw_sysfs_entries,
11609};
11610
11611#ifdef CONFIG_IPW2200_PROMISCUOUS
11612static int ipw_prom_open(struct net_device *dev)
11613{
11614	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11615	struct ipw_priv *priv = prom_priv->priv;
11616
11617	IPW_DEBUG_INFO("prom dev->open\n");
11618	netif_carrier_off(dev);
11619
11620	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11621		priv->sys_config.accept_all_data_frames = 1;
11622		priv->sys_config.accept_non_directed_frames = 1;
11623		priv->sys_config.accept_all_mgmt_bcpr = 1;
11624		priv->sys_config.accept_all_mgmt_frames = 1;
11625
11626		ipw_send_system_config(priv);
11627	}
11628
11629	return 0;
11630}
11631
11632static int ipw_prom_stop(struct net_device *dev)
11633{
11634	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11635	struct ipw_priv *priv = prom_priv->priv;
11636
11637	IPW_DEBUG_INFO("prom dev->stop\n");
11638
11639	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11640		priv->sys_config.accept_all_data_frames = 0;
11641		priv->sys_config.accept_non_directed_frames = 0;
11642		priv->sys_config.accept_all_mgmt_bcpr = 0;
11643		priv->sys_config.accept_all_mgmt_frames = 0;
11644
11645		ipw_send_system_config(priv);
11646	}
11647
11648	return 0;
11649}
11650
11651static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11652					    struct net_device *dev)
11653{
11654	IPW_DEBUG_INFO("prom dev->xmit\n");
11655	dev_kfree_skb(skb);
11656	return NETDEV_TX_OK;
11657}
11658
11659static const struct net_device_ops ipw_prom_netdev_ops = {
11660	.ndo_open 		= ipw_prom_open,
11661	.ndo_stop		= ipw_prom_stop,
11662	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
11663	.ndo_change_mtu		= libipw_change_mtu,
11664	.ndo_set_mac_address 	= eth_mac_addr,
11665	.ndo_validate_addr	= eth_validate_addr,
11666};
11667
11668static int ipw_prom_alloc(struct ipw_priv *priv)
11669{
11670	int rc = 0;
11671
11672	if (priv->prom_net_dev)
11673		return -EPERM;
11674
11675	priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11676	if (priv->prom_net_dev == NULL)
11677		return -ENOMEM;
11678
11679	priv->prom_priv = libipw_priv(priv->prom_net_dev);
11680	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11681	priv->prom_priv->priv = priv;
11682
11683	strcpy(priv->prom_net_dev->name, "rtap%d");
11684	memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11685
11686	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11687	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11688
11689	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11690	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11691
11692	rc = register_netdev(priv->prom_net_dev);
11693	if (rc) {
11694		free_libipw(priv->prom_net_dev, 1);
11695		priv->prom_net_dev = NULL;
11696		return rc;
11697	}
11698
11699	return 0;
11700}
11701
11702static void ipw_prom_free(struct ipw_priv *priv)
11703{
11704	if (!priv->prom_net_dev)
11705		return;
11706
11707	unregister_netdev(priv->prom_net_dev);
11708	free_libipw(priv->prom_net_dev, 1);
11709
11710	priv->prom_net_dev = NULL;
11711}
11712
11713#endif
11714
11715static const struct net_device_ops ipw_netdev_ops = {
11716	.ndo_init		= ipw_net_init,
11717	.ndo_open		= ipw_net_open,
11718	.ndo_stop		= ipw_net_stop,
11719	.ndo_set_multicast_list	= ipw_net_set_multicast_list,
11720	.ndo_set_mac_address	= ipw_net_set_mac_address,
11721	.ndo_start_xmit		= libipw_xmit,
11722	.ndo_change_mtu		= libipw_change_mtu,
11723	.ndo_validate_addr	= eth_validate_addr,
11724};
11725
11726static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11727				   const struct pci_device_id *ent)
11728{
11729	int err = 0;
11730	struct net_device *net_dev;
11731	void __iomem *base;
11732	u32 length, val;
11733	struct ipw_priv *priv;
11734	int i;
11735
11736	net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11737	if (net_dev == NULL) {
11738		err = -ENOMEM;
11739		goto out;
11740	}
11741
11742	priv = libipw_priv(net_dev);
11743	priv->ieee = netdev_priv(net_dev);
11744
11745	priv->net_dev = net_dev;
11746	priv->pci_dev = pdev;
11747	ipw_debug_level = debug;
11748	spin_lock_init(&priv->irq_lock);
11749	spin_lock_init(&priv->lock);
11750	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11751		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11752
11753	mutex_init(&priv->mutex);
11754	if (pci_enable_device(pdev)) {
11755		err = -ENODEV;
11756		goto out_free_libipw;
11757	}
11758
11759	pci_set_master(pdev);
11760
11761	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11762	if (!err)
11763		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11764	if (err) {
11765		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11766		goto out_pci_disable_device;
11767	}
11768
11769	pci_set_drvdata(pdev, priv);
11770
11771	err = pci_request_regions(pdev, DRV_NAME);
11772	if (err)
11773		goto out_pci_disable_device;
11774
11775	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11776	 * PCI Tx retries from interfering with C3 CPU state */
11777	pci_read_config_dword(pdev, 0x40, &val);
11778	if ((val & 0x0000ff00) != 0)
11779		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11780
11781	length = pci_resource_len(pdev, 0);
11782	priv->hw_len = length;
11783
11784	base = pci_ioremap_bar(pdev, 0);
11785	if (!base) {
11786		err = -ENODEV;
11787		goto out_pci_release_regions;
11788	}
11789
11790	priv->hw_base = base;
11791	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11792	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11793
11794	err = ipw_setup_deferred_work(priv);
11795	if (err) {
11796		IPW_ERROR("Unable to setup deferred work\n");
11797		goto out_iounmap;
11798	}
11799
11800	ipw_sw_reset(priv, 1);
11801
11802	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11803	if (err) {
11804		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11805		goto out_destroy_workqueue;
11806	}
11807
11808	SET_NETDEV_DEV(net_dev, &pdev->dev);
11809
11810	mutex_lock(&priv->mutex);
11811
11812	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11813	priv->ieee->set_security = shim__set_security;
11814	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11815
11816#ifdef CONFIG_IPW2200_QOS
11817	priv->ieee->is_qos_active = ipw_is_qos_active;
11818	priv->ieee->handle_probe_response = ipw_handle_beacon;
11819	priv->ieee->handle_beacon = ipw_handle_probe_response;
11820	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11821#endif				/* CONFIG_IPW2200_QOS */
11822
11823	priv->ieee->perfect_rssi = -20;
11824	priv->ieee->worst_rssi = -85;
11825
11826	net_dev->netdev_ops = &ipw_netdev_ops;
11827	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11828	net_dev->wireless_data = &priv->wireless_data;
11829	net_dev->wireless_handlers = &ipw_wx_handler_def;
11830	net_dev->ethtool_ops = &ipw_ethtool_ops;
11831	net_dev->irq = pdev->irq;
11832	net_dev->base_addr = (unsigned long)priv->hw_base;
11833	net_dev->mem_start = pci_resource_start(pdev, 0);
11834	net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11835
11836	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11837	if (err) {
11838		IPW_ERROR("failed to create sysfs device attributes\n");
11839		mutex_unlock(&priv->mutex);
11840		goto out_release_irq;
11841	}
11842
11843	mutex_unlock(&priv->mutex);
11844	err = register_netdev(net_dev);
11845	if (err) {
11846		IPW_ERROR("failed to register network device\n");
11847		goto out_remove_sysfs;
11848	}
11849
11850#ifdef CONFIG_IPW2200_PROMISCUOUS
11851	if (rtap_iface) {
11852	        err = ipw_prom_alloc(priv);
11853		if (err) {
11854			IPW_ERROR("Failed to register promiscuous network "
11855				  "device (error %d).\n", err);
11856			unregister_netdev(priv->net_dev);
11857			goto out_remove_sysfs;
11858		}
11859	}
11860#endif
11861
11862	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11863	       "channels, %d 802.11a channels)\n",
11864	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11865	       priv->ieee->geo.a_channels);
11866
11867	return 0;
11868
11869      out_remove_sysfs:
11870	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11871      out_release_irq:
11872	free_irq(pdev->irq, priv);
11873      out_destroy_workqueue:
11874	destroy_workqueue(priv->workqueue);
11875	priv->workqueue = NULL;
11876      out_iounmap:
11877	iounmap(priv->hw_base);
11878      out_pci_release_regions:
11879	pci_release_regions(pdev);
11880      out_pci_disable_device:
11881	pci_disable_device(pdev);
11882	pci_set_drvdata(pdev, NULL);
11883      out_free_libipw:
11884	free_libipw(priv->net_dev, 0);
11885      out:
11886	return err;
11887}
11888
11889static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11890{
11891	struct ipw_priv *priv = pci_get_drvdata(pdev);
11892	struct list_head *p, *q;
11893	int i;
11894
11895	if (!priv)
11896		return;
11897
11898	mutex_lock(&priv->mutex);
11899
11900	priv->status |= STATUS_EXIT_PENDING;
11901	ipw_down(priv);
11902	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11903
11904	mutex_unlock(&priv->mutex);
11905
11906	unregister_netdev(priv->net_dev);
11907
11908	if (priv->rxq) {
11909		ipw_rx_queue_free(priv, priv->rxq);
11910		priv->rxq = NULL;
11911	}
11912	ipw_tx_queue_free(priv);
11913
11914	if (priv->cmdlog) {
11915		kfree(priv->cmdlog);
11916		priv->cmdlog = NULL;
11917	}
11918	/* ipw_down will ensure that there is no more pending work
11919	 * in the workqueue's, so we can safely remove them now. */
11920	cancel_delayed_work(&priv->adhoc_check);
11921	cancel_delayed_work(&priv->gather_stats);
11922	cancel_delayed_work(&priv->request_scan);
11923	cancel_delayed_work(&priv->request_direct_scan);
11924	cancel_delayed_work(&priv->request_passive_scan);
11925	cancel_delayed_work(&priv->scan_event);
11926	cancel_delayed_work(&priv->rf_kill);
11927	cancel_delayed_work(&priv->scan_check);
11928	destroy_workqueue(priv->workqueue);
11929	priv->workqueue = NULL;
11930
11931	/* Free MAC hash list for ADHOC */
11932	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11933		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11934			list_del(p);
11935			kfree(list_entry(p, struct ipw_ibss_seq, list));
11936		}
11937	}
11938
11939	kfree(priv->error);
11940	priv->error = NULL;
11941
11942#ifdef CONFIG_IPW2200_PROMISCUOUS
11943	ipw_prom_free(priv);
11944#endif
11945
11946	free_irq(pdev->irq, priv);
11947	iounmap(priv->hw_base);
11948	pci_release_regions(pdev);
11949	pci_disable_device(pdev);
11950	pci_set_drvdata(pdev, NULL);
11951	/* wiphy_unregister needs to be here, before free_libipw */
11952	wiphy_unregister(priv->ieee->wdev.wiphy);
11953	kfree(priv->ieee->a_band.channels);
11954	kfree(priv->ieee->bg_band.channels);
11955	free_libipw(priv->net_dev, 0);
11956	free_firmware();
11957}
11958
11959#ifdef CONFIG_PM
11960static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11961{
11962	struct ipw_priv *priv = pci_get_drvdata(pdev);
11963	struct net_device *dev = priv->net_dev;
11964
11965	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11966
11967	/* Take down the device; powers it off, etc. */
11968	ipw_down(priv);
11969
11970	/* Remove the PRESENT state of the device */
11971	netif_device_detach(dev);
11972
11973	pci_save_state(pdev);
11974	pci_disable_device(pdev);
11975	pci_set_power_state(pdev, pci_choose_state(pdev, state));
11976
11977	priv->suspend_at = get_seconds();
11978
11979	return 0;
11980}
11981
11982static int ipw_pci_resume(struct pci_dev *pdev)
11983{
11984	struct ipw_priv *priv = pci_get_drvdata(pdev);
11985	struct net_device *dev = priv->net_dev;
11986	int err;
11987	u32 val;
11988
11989	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11990
11991	pci_set_power_state(pdev, PCI_D0);
11992	err = pci_enable_device(pdev);
11993	if (err) {
11994		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11995		       dev->name);
11996		return err;
11997	}
11998	pci_restore_state(pdev);
11999
12000	/*
12001	 * Suspend/Resume resets the PCI configuration space, so we have to
12002	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
12003	 * from interfering with C3 CPU state. pci_restore_state won't help
12004	 * here since it only restores the first 64 bytes pci config header.
12005	 */
12006	pci_read_config_dword(pdev, 0x40, &val);
12007	if ((val & 0x0000ff00) != 0)
12008		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
12009
12010	/* Set the device back into the PRESENT state; this will also wake
12011	 * the queue of needed */
12012	netif_device_attach(dev);
12013
12014	priv->suspend_time = get_seconds() - priv->suspend_at;
12015
12016	/* Bring the device back up */
12017	queue_work(priv->workqueue, &priv->up);
12018
12019	return 0;
12020}
12021#endif
12022
12023static void ipw_pci_shutdown(struct pci_dev *pdev)
12024{
12025	struct ipw_priv *priv = pci_get_drvdata(pdev);
12026
12027	/* Take down the device; powers it off, etc. */
12028	ipw_down(priv);
12029
12030	pci_disable_device(pdev);
12031}
12032
12033/* driver initialization stuff */
12034static struct pci_driver ipw_driver = {
12035	.name = DRV_NAME,
12036	.id_table = card_ids,
12037	.probe = ipw_pci_probe,
12038	.remove = __devexit_p(ipw_pci_remove),
12039#ifdef CONFIG_PM
12040	.suspend = ipw_pci_suspend,
12041	.resume = ipw_pci_resume,
12042#endif
12043	.shutdown = ipw_pci_shutdown,
12044};
12045
12046static int __init ipw_init(void)
12047{
12048	int ret;
12049
12050	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
12051	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
12052
12053	ret = pci_register_driver(&ipw_driver);
12054	if (ret) {
12055		IPW_ERROR("Unable to initialize PCI module\n");
12056		return ret;
12057	}
12058
12059	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
12060	if (ret) {
12061		IPW_ERROR("Unable to create driver sysfs file\n");
12062		pci_unregister_driver(&ipw_driver);
12063		return ret;
12064	}
12065
12066	return ret;
12067}
12068
12069static void __exit ipw_exit(void)
12070{
12071	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
12072	pci_unregister_driver(&ipw_driver);
12073}
12074
12075module_param(disable, int, 0444);
12076MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
12077
12078module_param(associate, int, 0444);
12079MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12080
12081module_param(auto_create, int, 0444);
12082MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12083
12084module_param_named(led, led_support, int, 0444);
12085MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12086
12087module_param(debug, int, 0444);
12088MODULE_PARM_DESC(debug, "debug output mask");
12089
12090module_param_named(channel, default_channel, int, 0444);
12091MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12092
12093#ifdef CONFIG_IPW2200_PROMISCUOUS
12094module_param(rtap_iface, int, 0444);
12095MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12096#endif
12097
12098#ifdef CONFIG_IPW2200_QOS
12099module_param(qos_enable, int, 0444);
12100MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12101
12102module_param(qos_burst_enable, int, 0444);
12103MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12104
12105module_param(qos_no_ack_mask, int, 0444);
12106MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12107
12108module_param(burst_duration_CCK, int, 0444);
12109MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12110
12111module_param(burst_duration_OFDM, int, 0444);
12112MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12113#endif				/* CONFIG_IPW2200_QOS */
12114
12115#ifdef CONFIG_IPW2200_MONITOR
12116module_param_named(mode, network_mode, int, 0444);
12117MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12118#else
12119module_param_named(mode, network_mode, int, 0444);
12120MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12121#endif
12122
12123module_param(bt_coexist, int, 0444);
12124MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12125
12126module_param(hwcrypto, int, 0444);
12127MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12128
12129module_param(cmdlog, int, 0444);
12130MODULE_PARM_DESC(cmdlog,
12131		 "allocate a ring buffer for logging firmware commands");
12132
12133module_param(roaming, int, 0444);
12134MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12135
12136module_param(antenna, int, 0444);
12137MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12138
12139module_exit(ipw_exit);
12140module_init(ipw_init);
12141