main.c revision fa99e963b1976374db1d89aea854e8740b92796d
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hif-ops.h"
19#include "cfg80211.h"
20#include "target.h"
21#include "debug.h"
22
23struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
24{
25	struct ath6kl *ar = vif->ar;
26	struct ath6kl_sta *conn = NULL;
27	u8 i, max_conn;
28
29	max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
30
31	for (i = 0; i < max_conn; i++) {
32		if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
33			conn = &ar->sta_list[i];
34			break;
35		}
36	}
37
38	return conn;
39}
40
41struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
42{
43	struct ath6kl_sta *conn = NULL;
44	u8 ctr;
45
46	for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
47		if (ar->sta_list[ctr].aid == aid) {
48			conn = &ar->sta_list[ctr];
49			break;
50		}
51	}
52	return conn;
53}
54
55static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
56			u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
57{
58	struct ath6kl_sta *sta;
59	u8 free_slot;
60
61	free_slot = aid - 1;
62
63	sta = &ar->sta_list[free_slot];
64	memcpy(sta->mac, mac, ETH_ALEN);
65	if (ielen <= ATH6KL_MAX_IE)
66		memcpy(sta->wpa_ie, wpaie, ielen);
67	sta->aid = aid;
68	sta->keymgmt = keymgmt;
69	sta->ucipher = ucipher;
70	sta->auth = auth;
71
72	ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
73	ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
74}
75
76static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
77{
78	struct ath6kl_sta *sta = &ar->sta_list[i];
79
80	/* empty the queued pkts in the PS queue if any */
81	spin_lock_bh(&sta->psq_lock);
82	skb_queue_purge(&sta->psq);
83	spin_unlock_bh(&sta->psq_lock);
84
85	memset(&ar->ap_stats.sta[sta->aid - 1], 0,
86	       sizeof(struct wmi_per_sta_stat));
87	memset(sta->mac, 0, ETH_ALEN);
88	memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
89	sta->aid = 0;
90	sta->sta_flags = 0;
91
92	ar->sta_list_index = ar->sta_list_index & ~(1 << i);
93
94}
95
96static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
97{
98	u8 i, removed = 0;
99
100	if (is_zero_ether_addr(mac))
101		return removed;
102
103	if (is_broadcast_ether_addr(mac)) {
104		ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n");
105
106		for (i = 0; i < AP_MAX_NUM_STA; i++) {
107			if (!is_zero_ether_addr(ar->sta_list[i].mac)) {
108				ath6kl_sta_cleanup(ar, i);
109				removed = 1;
110			}
111		}
112	} else {
113		for (i = 0; i < AP_MAX_NUM_STA; i++) {
114			if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) {
115				ath6kl_dbg(ATH6KL_DBG_TRC,
116					   "deleting station %pM aid=%d reason=%d\n",
117					   mac, ar->sta_list[i].aid, reason);
118				ath6kl_sta_cleanup(ar, i);
119				removed = 1;
120				break;
121			}
122		}
123	}
124
125	return removed;
126}
127
128enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac)
129{
130	struct ath6kl *ar = devt;
131	return ar->ac2ep_map[ac];
132}
133
134struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar)
135{
136	struct ath6kl_cookie *cookie;
137
138	cookie = ar->cookie_list;
139	if (cookie != NULL) {
140		ar->cookie_list = cookie->arc_list_next;
141		ar->cookie_count--;
142	}
143
144	return cookie;
145}
146
147void ath6kl_cookie_init(struct ath6kl *ar)
148{
149	u32 i;
150
151	ar->cookie_list = NULL;
152	ar->cookie_count = 0;
153
154	memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem));
155
156	for (i = 0; i < MAX_COOKIE_NUM; i++)
157		ath6kl_free_cookie(ar, &ar->cookie_mem[i]);
158}
159
160void ath6kl_cookie_cleanup(struct ath6kl *ar)
161{
162	ar->cookie_list = NULL;
163	ar->cookie_count = 0;
164}
165
166void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
167{
168	/* Insert first */
169
170	if (!ar || !cookie)
171		return;
172
173	cookie->arc_list_next = ar->cookie_list;
174	ar->cookie_list = cookie;
175	ar->cookie_count++;
176}
177
178/* set the window address register (using 4-byte register access ). */
179static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
180{
181	int status;
182	s32 i;
183	__le32 addr_val;
184
185	/*
186	 * Write bytes 1,2,3 of the register to set the upper address bytes,
187	 * the LSB is written last to initiate the access cycle
188	 */
189
190	for (i = 1; i <= 3; i++) {
191		/*
192		 * Fill the buffer with the address byte value we want to
193		 * hit 4 times. No need to worry about endianness as the
194		 * same byte is copied to all four bytes of addr_val at
195		 * any time.
196		 */
197		memset((u8 *)&addr_val, ((u8 *)&addr)[i], 4);
198
199		/*
200		 * Hit each byte of the register address with a 4-byte
201		 * write operation to the same address, this is a harmless
202		 * operation.
203		 */
204		status = hif_read_write_sync(ar, reg_addr + i, (u8 *)&addr_val,
205					     4, HIF_WR_SYNC_BYTE_FIX);
206		if (status)
207			break;
208	}
209
210	if (status) {
211		ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
212			   addr, reg_addr);
213		return status;
214	}
215
216	/*
217	 * Write the address register again, this time write the whole
218	 * 4-byte value. The effect here is that the LSB write causes the
219	 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
220	 * effect since we are writing the same values again
221	 */
222	addr_val = cpu_to_le32(addr);
223	status = hif_read_write_sync(ar, reg_addr,
224				     (u8 *)&(addr_val),
225				     4, HIF_WR_SYNC_BYTE_INC);
226
227	if (status) {
228		ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
229			   addr, reg_addr);
230		return status;
231	}
232
233	return 0;
234}
235
236/*
237 * Read from the hardware through its diagnostic window. No cooperation
238 * from the firmware is required for this.
239 */
240int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value)
241{
242	int ret;
243
244	/* set window register to start read cycle */
245	ret = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address);
246	if (ret)
247		return ret;
248
249	/* read the data */
250	ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) value,
251				  sizeof(*value), HIF_RD_SYNC_BYTE_INC);
252	if (ret) {
253		ath6kl_warn("failed to read32 through diagnose window: %d\n",
254			    ret);
255		return ret;
256	}
257
258	return 0;
259}
260
261/*
262 * Write to the ATH6KL through its diagnostic window. No cooperation from
263 * the Target is required for this.
264 */
265int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value)
266{
267	int ret;
268
269	/* set write data */
270	ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &value,
271				  sizeof(value), HIF_WR_SYNC_BYTE_INC);
272	if (ret) {
273		ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n",
274			   address, value);
275		return ret;
276	}
277
278	/* set window register, which starts the write cycle */
279	return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
280				      address);
281}
282
283int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length)
284{
285	u32 count, *buf = data;
286	int ret;
287
288	if (WARN_ON(length % 4))
289		return -EINVAL;
290
291	for (count = 0; count < length / 4; count++, address += 4) {
292		ret = ath6kl_diag_read32(ar, address, &buf[count]);
293		if (ret)
294			return ret;
295	}
296
297	return 0;
298}
299
300int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length)
301{
302	u32 count;
303	__le32 *buf = data;
304	int ret;
305
306	if (WARN_ON(length % 4))
307		return -EINVAL;
308
309	for (count = 0; count < length / 4; count++, address += 4) {
310		ret = ath6kl_diag_write32(ar, address, buf[count]);
311		if (ret)
312			return ret;
313	}
314
315	return 0;
316}
317
318int ath6kl_read_fwlogs(struct ath6kl *ar)
319{
320	struct ath6kl_dbglog_hdr debug_hdr;
321	struct ath6kl_dbglog_buf debug_buf;
322	u32 address, length, dropped, firstbuf, debug_hdr_addr;
323	int ret = 0, loop;
324	u8 *buf;
325
326	buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL);
327	if (!buf)
328		return -ENOMEM;
329
330	address = TARG_VTOP(ar->target_type,
331			    ath6kl_get_hi_item_addr(ar,
332						    HI_ITEM(hi_dbglog_hdr)));
333
334	ret = ath6kl_diag_read32(ar, address, &debug_hdr_addr);
335	if (ret)
336		goto out;
337
338	/* Get the contents of the ring buffer */
339	if (debug_hdr_addr == 0) {
340		ath6kl_warn("Invalid address for debug_hdr_addr\n");
341		ret = -EINVAL;
342		goto out;
343	}
344
345	address = TARG_VTOP(ar->target_type, debug_hdr_addr);
346	ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
347
348	address = TARG_VTOP(ar->target_type,
349			    le32_to_cpu(debug_hdr.dbuf_addr));
350	firstbuf = address;
351	dropped = le32_to_cpu(debug_hdr.dropped);
352	ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
353
354	loop = 100;
355
356	do {
357		address = TARG_VTOP(ar->target_type,
358				    le32_to_cpu(debug_buf.buffer_addr));
359		length = le32_to_cpu(debug_buf.length);
360
361		if (length != 0 && (le32_to_cpu(debug_buf.length) <=
362				    le32_to_cpu(debug_buf.bufsize))) {
363			length = ALIGN(length, 4);
364
365			ret = ath6kl_diag_read(ar, address,
366					       buf, length);
367			if (ret)
368				goto out;
369
370			ath6kl_debug_fwlog_event(ar, buf, length);
371		}
372
373		address = TARG_VTOP(ar->target_type,
374				    le32_to_cpu(debug_buf.next));
375		ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
376		if (ret)
377			goto out;
378
379		loop--;
380
381		if (WARN_ON(loop == 0)) {
382			ret = -ETIMEDOUT;
383			goto out;
384		}
385	} while (address != firstbuf);
386
387out:
388	kfree(buf);
389
390	return ret;
391}
392
393/* FIXME: move to a better place, target.h? */
394#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
395#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
396
397void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
398			 bool wait_fot_compltn, bool cold_reset)
399{
400	int status = 0;
401	u32 address;
402	__le32 data;
403
404	if (target_type != TARGET_TYPE_AR6003 &&
405		target_type != TARGET_TYPE_AR6004)
406		return;
407
408	data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
409			    cpu_to_le32(RESET_CONTROL_MBOX_RST);
410
411	switch (target_type) {
412	case TARGET_TYPE_AR6003:
413		address = AR6003_RESET_CONTROL_ADDRESS;
414		break;
415	case TARGET_TYPE_AR6004:
416		address = AR6004_RESET_CONTROL_ADDRESS;
417		break;
418	default:
419		address = AR6003_RESET_CONTROL_ADDRESS;
420		break;
421	}
422
423	status = ath6kl_diag_write32(ar, address, data);
424
425	if (status)
426		ath6kl_err("failed to reset target\n");
427}
428
429static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
430{
431	u8 index;
432	u8 keyusage;
433
434	for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
435		if (vif->wep_key_list[index].key_len) {
436			keyusage = GROUP_USAGE;
437			if (index == vif->def_txkey_index)
438				keyusage |= TX_USAGE;
439
440			ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
441					      index,
442					      WEP_CRYPT,
443					      keyusage,
444					      vif->wep_key_list[index].key_len,
445					      NULL,
446					      vif->wep_key_list[index].key,
447					      KEY_OP_INIT_VAL, NULL,
448					      NO_SYNC_WMIFLAG);
449		}
450	}
451}
452
453void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
454{
455	struct ath6kl *ar = vif->ar;
456	struct ath6kl_req_key *ik;
457	int res;
458	u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
459
460	ik = &ar->ap_mode_bkey;
461
462	ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
463
464	switch (vif->auth_mode) {
465	case NONE_AUTH:
466		if (vif->prwise_crypto == WEP_CRYPT)
467			ath6kl_install_static_wep_keys(vif);
468		break;
469	case WPA_PSK_AUTH:
470	case WPA2_PSK_AUTH:
471	case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
472		if (!ik->valid)
473			break;
474
475		ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed addkey for "
476			   "the initial group key for AP mode\n");
477		memset(key_rsc, 0, sizeof(key_rsc));
478		res = ath6kl_wmi_addkey_cmd(
479			ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
480			GROUP_USAGE, ik->key_len, key_rsc, ik->key,
481			KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
482		if (res) {
483			ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
484				   "addkey failed: %d\n", res);
485		}
486		break;
487	}
488
489	ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
490	set_bit(CONNECTED, &vif->flags);
491	netif_carrier_on(vif->ndev);
492}
493
494void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
495				u8 keymgmt, u8 ucipher, u8 auth,
496				u8 assoc_req_len, u8 *assoc_info)
497{
498	struct ath6kl *ar = vif->ar;
499	u8 *ies = NULL, *wpa_ie = NULL, *pos;
500	size_t ies_len = 0;
501	struct station_info sinfo;
502
503	ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
504
505	if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
506		struct ieee80211_mgmt *mgmt =
507			(struct ieee80211_mgmt *) assoc_info;
508		if (ieee80211_is_assoc_req(mgmt->frame_control) &&
509		    assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) +
510		    sizeof(mgmt->u.assoc_req)) {
511			ies = mgmt->u.assoc_req.variable;
512			ies_len = assoc_info + assoc_req_len - ies;
513		} else if (ieee80211_is_reassoc_req(mgmt->frame_control) &&
514			   assoc_req_len >= sizeof(struct ieee80211_hdr_3addr)
515			   + sizeof(mgmt->u.reassoc_req)) {
516			ies = mgmt->u.reassoc_req.variable;
517			ies_len = assoc_info + assoc_req_len - ies;
518		}
519	}
520
521	pos = ies;
522	while (pos && pos + 1 < ies + ies_len) {
523		if (pos + 2 + pos[1] > ies + ies_len)
524			break;
525		if (pos[0] == WLAN_EID_RSN)
526			wpa_ie = pos; /* RSN IE */
527		else if (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
528			 pos[1] >= 4 &&
529			 pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2) {
530			if (pos[5] == 0x01)
531				wpa_ie = pos; /* WPA IE */
532			else if (pos[5] == 0x04) {
533				wpa_ie = pos; /* WPS IE */
534				break; /* overrides WPA/RSN IE */
535			}
536		}
537		pos += 2 + pos[1];
538	}
539
540	ath6kl_add_new_sta(ar, mac_addr, aid, wpa_ie,
541			   wpa_ie ? 2 + wpa_ie[1] : 0,
542			   keymgmt, ucipher, auth);
543
544	/* send event to application */
545	memset(&sinfo, 0, sizeof(sinfo));
546
547	/* TODO: sinfo.generation */
548
549	sinfo.assoc_req_ies = ies;
550	sinfo.assoc_req_ies_len = ies_len;
551	sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
552
553	cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
554
555	netif_wake_queue(vif->ndev);
556}
557
558/* Functions for Tx credit handling */
559void ath6kl_credit_init(struct htc_credit_state_info *cred_info,
560			struct list_head *ep_list,
561			int tot_credits)
562{
563	struct htc_endpoint_credit_dist *cur_ep_dist;
564	int count;
565
566	cred_info->cur_free_credits = tot_credits;
567	cred_info->total_avail_credits = tot_credits;
568
569	list_for_each_entry(cur_ep_dist, ep_list, list) {
570		if (cur_ep_dist->endpoint == ENDPOINT_0)
571			continue;
572
573		cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
574
575		if (tot_credits > 4) {
576			if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
577			    (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
578				ath6kl_deposit_credit_to_ep(cred_info,
579						cur_ep_dist,
580						cur_ep_dist->cred_min);
581				cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
582			}
583		}
584
585		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
586			ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
587						    cur_ep_dist->cred_min);
588			/*
589			 * Control service is always marked active, it
590			 * never goes inactive EVER.
591			 */
592			cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
593		} else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
594			/* this is the lowest priority data endpoint */
595			cred_info->lowestpri_ep_dist = cur_ep_dist->list;
596
597		/*
598		 * Streams have to be created (explicit | implicit) for all
599		 * kinds of traffic. BE endpoints are also inactive in the
600		 * beginning. When BE traffic starts it creates implicit
601		 * streams that redistributes credits.
602		 *
603		 * Note: all other endpoints have minimums set but are
604		 * initially given NO credits. credits will be distributed
605		 * as traffic activity demands
606		 */
607	}
608
609	WARN_ON(cred_info->cur_free_credits <= 0);
610
611	list_for_each_entry(cur_ep_dist, ep_list, list) {
612		if (cur_ep_dist->endpoint == ENDPOINT_0)
613			continue;
614
615		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
616			cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
617		else {
618			/*
619			 * For the remaining data endpoints, we assume that
620			 * each cred_per_msg are the same. We use a simple
621			 * calculation here, we take the remaining credits
622			 * and determine how many max messages this can
623			 * cover and then set each endpoint's normal value
624			 * equal to 3/4 this amount.
625			 */
626			count = (cred_info->cur_free_credits /
627				 cur_ep_dist->cred_per_msg)
628				* cur_ep_dist->cred_per_msg;
629			count = (count * 3) >> 2;
630			count = max(count, cur_ep_dist->cred_per_msg);
631			cur_ep_dist->cred_norm = count;
632
633		}
634	}
635}
636
637/* initialize and setup credit distribution */
638int ath6kl_setup_credit_dist(void *htc_handle,
639			     struct htc_credit_state_info *cred_info)
640{
641	u16 servicepriority[5];
642
643	memset(cred_info, 0, sizeof(struct htc_credit_state_info));
644
645	servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
646	servicepriority[1] = WMI_DATA_VO_SVC;
647	servicepriority[2] = WMI_DATA_VI_SVC;
648	servicepriority[3] = WMI_DATA_BE_SVC;
649	servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
650
651	/* set priority list */
652	ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
653
654	return 0;
655}
656
657/* reduce an ep's credits back to a set limit */
658static void ath6kl_reduce_credits(struct htc_credit_state_info *cred_info,
659				  struct htc_endpoint_credit_dist *ep_dist,
660				  int limit)
661{
662	int credits;
663
664	ep_dist->cred_assngd = limit;
665
666	if (ep_dist->credits <= limit)
667		return;
668
669	credits = ep_dist->credits - limit;
670	ep_dist->credits -= credits;
671	cred_info->cur_free_credits += credits;
672}
673
674static void ath6kl_credit_update(struct htc_credit_state_info *cred_info,
675				 struct list_head *epdist_list)
676{
677	struct htc_endpoint_credit_dist *cur_dist_list;
678
679	list_for_each_entry(cur_dist_list, epdist_list, list) {
680		if (cur_dist_list->endpoint == ENDPOINT_0)
681			continue;
682
683		if (cur_dist_list->cred_to_dist > 0) {
684			cur_dist_list->credits +=
685					cur_dist_list->cred_to_dist;
686			cur_dist_list->cred_to_dist = 0;
687			if (cur_dist_list->credits >
688			    cur_dist_list->cred_assngd)
689				ath6kl_reduce_credits(cred_info,
690						cur_dist_list,
691						cur_dist_list->cred_assngd);
692
693			if (cur_dist_list->credits >
694			    cur_dist_list->cred_norm)
695				ath6kl_reduce_credits(cred_info, cur_dist_list,
696						      cur_dist_list->cred_norm);
697
698			if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
699				if (cur_dist_list->txq_depth == 0)
700					ath6kl_reduce_credits(cred_info,
701							      cur_dist_list, 0);
702			}
703		}
704	}
705}
706
707/*
708 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
709 * question.
710 */
711void ath6kl_seek_credits(struct htc_credit_state_info *cred_info,
712			 struct htc_endpoint_credit_dist *ep_dist)
713{
714	struct htc_endpoint_credit_dist *curdist_list;
715	int credits = 0;
716	int need;
717
718	if (ep_dist->svc_id == WMI_CONTROL_SVC)
719		goto out;
720
721	if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
722	    (ep_dist->svc_id == WMI_DATA_VO_SVC))
723		if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
724			goto out;
725
726	/*
727	 * For all other services, we follow a simple algorithm of:
728	 *
729	 * 1. checking the free pool for credits
730	 * 2. checking lower priority endpoints for credits to take
731	 */
732
733	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
734
735	if (credits >= ep_dist->seek_cred)
736		goto out;
737
738	/*
739	 * We don't have enough in the free pool, try taking away from
740	 * lower priority services The rule for taking away credits:
741	 *
742	 *   1. Only take from lower priority endpoints
743	 *   2. Only take what is allocated above the minimum (never
744	 *      starve an endpoint completely)
745	 *   3. Only take what you need.
746	 */
747
748	list_for_each_entry_reverse(curdist_list,
749				    &cred_info->lowestpri_ep_dist,
750				    list) {
751		if (curdist_list == ep_dist)
752			break;
753
754		need = ep_dist->seek_cred - cred_info->cur_free_credits;
755
756		if ((curdist_list->cred_assngd - need) >=
757		     curdist_list->cred_min) {
758			/*
759			 * The current one has been allocated more than
760			 * it's minimum and it has enough credits assigned
761			 * above it's minimum to fulfill our need try to
762			 * take away just enough to fulfill our need.
763			 */
764			ath6kl_reduce_credits(cred_info, curdist_list,
765					      curdist_list->cred_assngd - need);
766
767			if (cred_info->cur_free_credits >=
768			    ep_dist->seek_cred)
769				break;
770		}
771
772		if (curdist_list->endpoint == ENDPOINT_0)
773			break;
774	}
775
776	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
777
778out:
779	/* did we find some credits? */
780	if (credits)
781		ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
782
783	ep_dist->seek_cred = 0;
784}
785
786/* redistribute credits based on activity change */
787static void ath6kl_redistribute_credits(struct htc_credit_state_info *info,
788					struct list_head *ep_dist_list)
789{
790	struct htc_endpoint_credit_dist *curdist_list;
791
792	list_for_each_entry(curdist_list, ep_dist_list, list) {
793		if (curdist_list->endpoint == ENDPOINT_0)
794			continue;
795
796		if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
797		    (curdist_list->svc_id == WMI_DATA_BE_SVC))
798			curdist_list->dist_flags |= HTC_EP_ACTIVE;
799
800		if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
801		    !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
802			if (curdist_list->txq_depth == 0)
803				ath6kl_reduce_credits(info, curdist_list, 0);
804			else
805				ath6kl_reduce_credits(info,
806						curdist_list,
807						curdist_list->cred_min);
808		}
809	}
810}
811
812/*
813 *
814 * This function is invoked whenever endpoints require credit
815 * distributions. A lock is held while this function is invoked, this
816 * function shall NOT block. The ep_dist_list is a list of distribution
817 * structures in prioritized order as defined by the call to the
818 * htc_set_credit_dist() api.
819 */
820void ath6kl_credit_distribute(struct htc_credit_state_info *cred_info,
821			      struct list_head *ep_dist_list,
822			      enum htc_credit_dist_reason reason)
823{
824	switch (reason) {
825	case HTC_CREDIT_DIST_SEND_COMPLETE:
826		ath6kl_credit_update(cred_info, ep_dist_list);
827		break;
828	case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
829		ath6kl_redistribute_credits(cred_info, ep_dist_list);
830		break;
831	default:
832		break;
833	}
834
835	WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
836	WARN_ON(cred_info->cur_free_credits < 0);
837}
838
839void disconnect_timer_handler(unsigned long ptr)
840{
841	struct net_device *dev = (struct net_device *)ptr;
842	struct ath6kl_vif *vif = netdev_priv(dev);
843
844	ath6kl_init_profile_info(vif);
845	ath6kl_disconnect(vif);
846}
847
848void ath6kl_disconnect(struct ath6kl_vif *vif)
849{
850	if (test_bit(CONNECTED, &vif->flags) ||
851	    test_bit(CONNECT_PEND, &vif->flags)) {
852		ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
853		/*
854		 * Disconnect command is issued, clear the connect pending
855		 * flag. The connected flag will be cleared in
856		 * disconnect event notification.
857		 */
858		clear_bit(CONNECT_PEND, &vif->flags);
859	}
860}
861
862void ath6kl_deep_sleep_enable(struct ath6kl *ar)
863{
864	struct ath6kl_vif *vif;
865
866	/* FIXME: for multi vif */
867	vif = ath6kl_vif_first(ar);
868	if (!vif) {
869		/* save the current power mode before enabling power save */
870		ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
871
872		if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
873			ath6kl_warn("ath6kl_deep_sleep_enable: "
874				    "wmi_powermode_cmd failed\n");
875		return;
876	}
877
878	switch (vif->sme_state) {
879	case SME_CONNECTING:
880		cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
881					NULL, 0,
882					WLAN_STATUS_UNSPECIFIED_FAILURE,
883					GFP_KERNEL);
884		break;
885	case SME_CONNECTED:
886	default:
887		/*
888		 * FIXME: oddly enough smeState is in DISCONNECTED during
889		 * suspend, why? Need to send disconnected event in that
890		 * state.
891		 */
892		cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
893		break;
894	}
895
896	if (test_bit(CONNECTED, &vif->flags) ||
897	    test_bit(CONNECT_PEND, &vif->flags))
898		ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
899
900	vif->sme_state = SME_DISCONNECTED;
901
902	/* disable scanning */
903	if (ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0xFFFF, 0, 0,
904				      0, 0, 0, 0, 0, 0, 0) != 0)
905		printk(KERN_WARNING "ath6kl: failed to disable scan "
906		       "during suspend\n");
907
908	ath6kl_cfg80211_scan_complete_event(vif, -ECANCELED);
909
910	/* save the current power mode before enabling power save */
911	ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
912
913	if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
914		ath6kl_warn("ath6kl_deep_sleep_enable: "
915			"wmi_powermode_cmd failed\n");
916}
917
918/* WMI Event handlers */
919
920static const char *get_hw_id_string(u32 id)
921{
922	switch (id) {
923	case AR6003_REV1_VERSION:
924		return "1.0";
925	case AR6003_REV2_VERSION:
926		return "2.0";
927	case AR6003_REV3_VERSION:
928		return "2.1.1";
929	default:
930		return "unknown";
931	}
932}
933
934void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
935{
936	struct ath6kl *ar = devt;
937
938	memcpy(ar->mac_addr, datap, ETH_ALEN);
939	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
940		   __func__, ar->mac_addr);
941
942	ar->version.wlan_ver = sw_ver;
943	ar->version.abi_ver = abi_ver;
944
945	snprintf(ar->wiphy->fw_version,
946		 sizeof(ar->wiphy->fw_version),
947		 "%u.%u.%u.%u",
948		 (ar->version.wlan_ver & 0xf0000000) >> 28,
949		 (ar->version.wlan_ver & 0x0f000000) >> 24,
950		 (ar->version.wlan_ver & 0x00ff0000) >> 16,
951		 (ar->version.wlan_ver & 0x0000ffff));
952
953	/* indicate to the waiting thread that the ready event was received */
954	set_bit(WMI_READY, &ar->flag);
955	wake_up(&ar->event_wq);
956
957	ath6kl_info("hw %s fw %s%s\n",
958		    get_hw_id_string(ar->wiphy->hw_version),
959		    ar->wiphy->fw_version,
960		    test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
961}
962
963void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
964{
965	struct ath6kl *ar = vif->ar;
966
967	ath6kl_cfg80211_scan_complete_event(vif, status);
968
969	if (!ar->usr_bss_filter) {
970		clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
971		ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
972					 NONE_BSS_FILTER, 0);
973	}
974
975	ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
976}
977
978void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
979			  u16 listen_int, u16 beacon_int,
980			  enum network_type net_type, u8 beacon_ie_len,
981			  u8 assoc_req_len, u8 assoc_resp_len,
982			  u8 *assoc_info)
983{
984	struct ath6kl *ar = vif->ar;
985
986	ath6kl_cfg80211_connect_event(vif, channel, bssid,
987				      listen_int, beacon_int,
988				      net_type, beacon_ie_len,
989				      assoc_req_len, assoc_resp_len,
990				      assoc_info);
991
992	memcpy(vif->bssid, bssid, sizeof(vif->bssid));
993	vif->bss_ch = channel;
994
995	if ((vif->nw_type == INFRA_NETWORK))
996		ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
997					      ar->listen_intvl_t,
998					      ar->listen_intvl_b);
999
1000	netif_wake_queue(vif->ndev);
1001
1002	/* Update connect & link status atomically */
1003	spin_lock_bh(&vif->if_lock);
1004	set_bit(CONNECTED, &vif->flags);
1005	clear_bit(CONNECT_PEND, &vif->flags);
1006	netif_carrier_on(vif->ndev);
1007	spin_unlock_bh(&vif->if_lock);
1008
1009	aggr_reset_state(vif->aggr_cntxt);
1010	vif->reconnect_flag = 0;
1011
1012	if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
1013		memset(ar->node_map, 0, sizeof(ar->node_map));
1014		ar->node_num = 0;
1015		ar->next_ep_id = ENDPOINT_2;
1016	}
1017
1018	if (!ar->usr_bss_filter) {
1019		set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
1020		ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
1021					 CURRENT_BSS_FILTER, 0);
1022	}
1023}
1024
1025void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
1026{
1027	struct ath6kl_sta *sta;
1028	struct ath6kl *ar = vif->ar;
1029	u8 tsc[6];
1030
1031	/*
1032	 * For AP case, keyid will have aid of STA which sent pkt with
1033	 * MIC error. Use this aid to get MAC & send it to hostapd.
1034	 */
1035	if (vif->nw_type == AP_NETWORK) {
1036		sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
1037		if (!sta)
1038			return;
1039
1040		ath6kl_dbg(ATH6KL_DBG_TRC,
1041			   "ap tkip mic error received from aid=%d\n", keyid);
1042
1043		memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
1044		cfg80211_michael_mic_failure(vif->ndev, sta->mac,
1045					     NL80211_KEYTYPE_PAIRWISE, keyid,
1046					     tsc, GFP_KERNEL);
1047	} else
1048		ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
1049
1050}
1051
1052static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
1053{
1054	struct wmi_target_stats *tgt_stats =
1055		(struct wmi_target_stats *) ptr;
1056	struct ath6kl *ar = vif->ar;
1057	struct target_stats *stats = &vif->target_stats;
1058	struct tkip_ccmp_stats *ccmp_stats;
1059	u8 ac;
1060
1061	if (len < sizeof(*tgt_stats))
1062		return;
1063
1064	ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n");
1065
1066	stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
1067	stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte);
1068	stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt);
1069	stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte);
1070	stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt);
1071	stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte);
1072	stats->tx_bcast_pkt  += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt);
1073	stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte);
1074	stats->tx_rts_success_cnt +=
1075		le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt);
1076
1077	for (ac = 0; ac < WMM_NUM_AC; ac++)
1078		stats->tx_pkt_per_ac[ac] +=
1079			le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]);
1080
1081	stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err);
1082	stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt);
1083	stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt);
1084	stats->tx_mult_retry_cnt +=
1085		le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
1086	stats->tx_rts_fail_cnt +=
1087		le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
1088	stats->tx_ucast_rate =
1089	    ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate));
1090
1091	stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
1092	stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
1093	stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt);
1094	stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte);
1095	stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt);
1096	stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte);
1097	stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt);
1098	stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte);
1099	stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt);
1100	stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err);
1101	stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err);
1102	stats->rx_key_cache_miss +=
1103		le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
1104	stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
1105	stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
1106	stats->rx_ucast_rate =
1107	    ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate));
1108
1109	ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
1110
1111	stats->tkip_local_mic_fail +=
1112		le32_to_cpu(ccmp_stats->tkip_local_mic_fail);
1113	stats->tkip_cnter_measures_invoked +=
1114		le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked);
1115	stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err);
1116
1117	stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err);
1118	stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays);
1119
1120	stats->pwr_save_fail_cnt +=
1121		le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt);
1122	stats->noise_floor_calib =
1123		a_sle32_to_cpu(tgt_stats->noise_floor_calib);
1124
1125	stats->cs_bmiss_cnt +=
1126		le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt);
1127	stats->cs_low_rssi_cnt +=
1128		le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt);
1129	stats->cs_connect_cnt +=
1130		le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt);
1131	stats->cs_discon_cnt +=
1132		le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt);
1133
1134	stats->cs_ave_beacon_rssi =
1135		a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi);
1136
1137	stats->cs_last_roam_msec =
1138		tgt_stats->cserv_stats.cs_last_roam_msec;
1139	stats->cs_snr = tgt_stats->cserv_stats.cs_snr;
1140	stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi);
1141
1142	stats->lq_val = le32_to_cpu(tgt_stats->lq_val);
1143
1144	stats->wow_pkt_dropped +=
1145		le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped);
1146	stats->wow_host_pkt_wakeups +=
1147		tgt_stats->wow_stats.wow_host_pkt_wakeups;
1148	stats->wow_host_evt_wakeups +=
1149		tgt_stats->wow_stats.wow_host_evt_wakeups;
1150	stats->wow_evt_discarded +=
1151		le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
1152
1153	if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
1154		clear_bit(STATS_UPDATE_PEND, &vif->flags);
1155		wake_up(&ar->event_wq);
1156	}
1157}
1158
1159static void ath6kl_add_le32(__le32 *var, __le32 val)
1160{
1161	*var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
1162}
1163
1164void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
1165{
1166	struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
1167	struct ath6kl *ar = vif->ar;
1168	struct wmi_ap_mode_stat *ap = &ar->ap_stats;
1169	struct wmi_per_sta_stat *st_ap, *st_p;
1170	u8 ac;
1171
1172	if (vif->nw_type == AP_NETWORK) {
1173		if (len < sizeof(*p))
1174			return;
1175
1176		for (ac = 0; ac < AP_MAX_NUM_STA; ac++) {
1177			st_ap = &ap->sta[ac];
1178			st_p = &p->sta[ac];
1179
1180			ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes);
1181			ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts);
1182			ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error);
1183			ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard);
1184			ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes);
1185			ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts);
1186			ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error);
1187			ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard);
1188		}
1189
1190	} else {
1191		ath6kl_update_target_stats(vif, ptr, len);
1192	}
1193}
1194
1195void ath6kl_wakeup_event(void *dev)
1196{
1197	struct ath6kl *ar = (struct ath6kl *) dev;
1198
1199	wake_up(&ar->event_wq);
1200}
1201
1202void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
1203{
1204	struct ath6kl *ar = (struct ath6kl *) devt;
1205
1206	ar->tx_pwr = tx_pwr;
1207	wake_up(&ar->event_wq);
1208}
1209
1210void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
1211{
1212	struct ath6kl_sta *conn;
1213	struct sk_buff *skb;
1214	bool psq_empty = false;
1215	struct ath6kl *ar = vif->ar;
1216
1217	conn = ath6kl_find_sta_by_aid(ar, aid);
1218
1219	if (!conn)
1220		return;
1221	/*
1222	 * Send out a packet queued on ps queue. When the ps queue
1223	 * becomes empty update the PVB for this station.
1224	 */
1225	spin_lock_bh(&conn->psq_lock);
1226	psq_empty  = skb_queue_empty(&conn->psq);
1227	spin_unlock_bh(&conn->psq_lock);
1228
1229	if (psq_empty)
1230		/* TODO: Send out a NULL data frame */
1231		return;
1232
1233	spin_lock_bh(&conn->psq_lock);
1234	skb = skb_dequeue(&conn->psq);
1235	spin_unlock_bh(&conn->psq_lock);
1236
1237	conn->sta_flags |= STA_PS_POLLED;
1238	ath6kl_data_tx(skb, vif->ndev);
1239	conn->sta_flags &= ~STA_PS_POLLED;
1240
1241	spin_lock_bh(&conn->psq_lock);
1242	psq_empty  = skb_queue_empty(&conn->psq);
1243	spin_unlock_bh(&conn->psq_lock);
1244
1245	if (psq_empty)
1246		ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
1247}
1248
1249void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
1250{
1251	bool mcastq_empty = false;
1252	struct sk_buff *skb;
1253	struct ath6kl *ar = vif->ar;
1254
1255	/*
1256	 * If there are no associated STAs, ignore the DTIM expiry event.
1257	 * There can be potential race conditions where the last associated
1258	 * STA may disconnect & before the host could clear the 'Indicate
1259	 * DTIM' request to the firmware, the firmware would have just
1260	 * indicated a DTIM expiry event. The race is between 'clear DTIM
1261	 * expiry cmd' going from the host to the firmware & the DTIM
1262	 * expiry event happening from the firmware to the host.
1263	 */
1264	if (!ar->sta_list_index)
1265		return;
1266
1267	spin_lock_bh(&ar->mcastpsq_lock);
1268	mcastq_empty = skb_queue_empty(&ar->mcastpsq);
1269	spin_unlock_bh(&ar->mcastpsq_lock);
1270
1271	if (mcastq_empty)
1272		return;
1273
1274	/* set the STA flag to dtim_expired for the frame to go out */
1275	set_bit(DTIM_EXPIRED, &vif->flags);
1276
1277	spin_lock_bh(&ar->mcastpsq_lock);
1278	while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
1279		spin_unlock_bh(&ar->mcastpsq_lock);
1280
1281		ath6kl_data_tx(skb, vif->ndev);
1282
1283		spin_lock_bh(&ar->mcastpsq_lock);
1284	}
1285	spin_unlock_bh(&ar->mcastpsq_lock);
1286
1287	clear_bit(DTIM_EXPIRED, &vif->flags);
1288
1289	/* clear the LSB of the BitMapCtl field of the TIM IE */
1290	ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
1291}
1292
1293void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
1294			     u8 assoc_resp_len, u8 *assoc_info,
1295			     u16 prot_reason_status)
1296{
1297	struct ath6kl *ar = vif->ar;
1298
1299	if (vif->nw_type == AP_NETWORK) {
1300		if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
1301			return;
1302
1303		/* if no more associated STAs, empty the mcast PS q */
1304		if (ar->sta_list_index == 0) {
1305			spin_lock_bh(&ar->mcastpsq_lock);
1306			skb_queue_purge(&ar->mcastpsq);
1307			spin_unlock_bh(&ar->mcastpsq_lock);
1308
1309			/* clear the LSB of the TIM IE's BitMapCtl field */
1310			if (test_bit(WMI_READY, &ar->flag))
1311				ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1312						       MCAST_AID, 0);
1313		}
1314
1315		if (!is_broadcast_ether_addr(bssid)) {
1316			/* send event to application */
1317			cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
1318		}
1319
1320		if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
1321			memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
1322			clear_bit(CONNECTED, &vif->flags);
1323		}
1324		return;
1325	}
1326
1327	ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
1328				       assoc_resp_len, assoc_info,
1329				       prot_reason_status);
1330
1331	aggr_reset_state(vif->aggr_cntxt);
1332
1333	del_timer(&vif->disconnect_timer);
1334
1335	ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
1336		   "disconnect reason is %d\n", reason);
1337
1338	/*
1339	 * If the event is due to disconnect cmd from the host, only they
1340	 * the target would stop trying to connect. Under any other
1341	 * condition, target would keep trying to connect.
1342	 */
1343	if (reason == DISCONNECT_CMD) {
1344		if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
1345			ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
1346						 NONE_BSS_FILTER, 0);
1347	} else {
1348		set_bit(CONNECT_PEND, &vif->flags);
1349		if (((reason == ASSOC_FAILED) &&
1350		    (prot_reason_status == 0x11)) ||
1351		    ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
1352		     && (vif->reconnect_flag == 1))) {
1353			set_bit(CONNECTED, &vif->flags);
1354			return;
1355		}
1356	}
1357
1358	/* update connect & link status atomically */
1359	spin_lock_bh(&vif->if_lock);
1360	clear_bit(CONNECTED, &vif->flags);
1361	netif_carrier_off(vif->ndev);
1362	spin_unlock_bh(&vif->if_lock);
1363
1364	if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
1365		vif->reconnect_flag = 0;
1366
1367	if (reason != CSERV_DISCONNECT)
1368		ar->user_key_ctrl = 0;
1369
1370	netif_stop_queue(vif->ndev);
1371	memset(vif->bssid, 0, sizeof(vif->bssid));
1372	vif->bss_ch = 0;
1373
1374	ath6kl_tx_data_cleanup(ar);
1375}
1376
1377struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
1378{
1379	struct ath6kl_vif *vif;
1380
1381	spin_lock(&ar->list_lock);
1382	if (list_empty(&ar->vif_list)) {
1383		spin_unlock(&ar->list_lock);
1384		return NULL;
1385	}
1386
1387	vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
1388
1389	spin_unlock(&ar->list_lock);
1390
1391	return vif;
1392}
1393
1394static int ath6kl_open(struct net_device *dev)
1395{
1396	struct ath6kl_vif *vif = netdev_priv(dev);
1397
1398	set_bit(WLAN_ENABLED, &vif->flags);
1399
1400	if (test_bit(CONNECTED, &vif->flags)) {
1401		netif_carrier_on(dev);
1402		netif_wake_queue(dev);
1403	} else
1404		netif_carrier_off(dev);
1405
1406	return 0;
1407}
1408
1409static int ath6kl_close(struct net_device *dev)
1410{
1411	struct ath6kl *ar = ath6kl_priv(dev);
1412	struct ath6kl_vif *vif = netdev_priv(dev);
1413
1414	netif_stop_queue(dev);
1415
1416	ath6kl_disconnect(vif);
1417
1418	if (test_bit(WMI_READY, &ar->flag)) {
1419		if (ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0xFFFF,
1420					      0, 0, 0, 0, 0, 0, 0, 0, 0))
1421			return -EIO;
1422
1423		clear_bit(WLAN_ENABLED, &vif->flags);
1424	}
1425
1426	ath6kl_cfg80211_scan_complete_event(vif, -ECANCELED);
1427
1428	return 0;
1429}
1430
1431static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
1432{
1433	struct ath6kl_vif *vif = netdev_priv(dev);
1434
1435	return &vif->net_stats;
1436}
1437
1438static struct net_device_ops ath6kl_netdev_ops = {
1439	.ndo_open               = ath6kl_open,
1440	.ndo_stop               = ath6kl_close,
1441	.ndo_start_xmit         = ath6kl_data_tx,
1442	.ndo_get_stats          = ath6kl_get_stats,
1443};
1444
1445void init_netdev(struct net_device *dev)
1446{
1447	dev->netdev_ops = &ath6kl_netdev_ops;
1448	dev->destructor = free_netdev;
1449	dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1450
1451	dev->needed_headroom = ETH_HLEN;
1452	dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
1453				sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
1454				+ WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
1455
1456	return;
1457}
1458