main.c revision 31024d99003486c90c793dea58b55f7920f0488b
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hif-ops.h"
19#include "cfg80211.h"
20#include "target.h"
21#include "debug.h"
22
23struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
24{
25	struct ath6kl_sta *conn = NULL;
26	u8 i, max_conn;
27
28	max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
29
30	for (i = 0; i < max_conn; i++) {
31		if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
32			conn = &ar->sta_list[i];
33			break;
34		}
35	}
36
37	return conn;
38}
39
40struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
41{
42	struct ath6kl_sta *conn = NULL;
43	u8 ctr;
44
45	for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
46		if (ar->sta_list[ctr].aid == aid) {
47			conn = &ar->sta_list[ctr];
48			break;
49		}
50	}
51	return conn;
52}
53
54static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
55			u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
56{
57	struct ath6kl_sta *sta;
58	u8 free_slot;
59
60	free_slot = aid - 1;
61
62	sta = &ar->sta_list[free_slot];
63	memcpy(sta->mac, mac, ETH_ALEN);
64	memcpy(sta->wpa_ie, wpaie, ielen);
65	sta->aid = aid;
66	sta->keymgmt = keymgmt;
67	sta->ucipher = ucipher;
68	sta->auth = auth;
69
70	ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
71	ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
72}
73
74static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
75{
76	struct ath6kl_sta *sta = &ar->sta_list[i];
77
78	/* empty the queued pkts in the PS queue if any */
79	spin_lock_bh(&sta->psq_lock);
80	skb_queue_purge(&sta->psq);
81	spin_unlock_bh(&sta->psq_lock);
82
83	memset(&ar->ap_stats.sta[sta->aid - 1], 0,
84	       sizeof(struct wmi_per_sta_stat));
85	memset(sta->mac, 0, ETH_ALEN);
86	memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
87	sta->aid = 0;
88	sta->sta_flags = 0;
89
90	ar->sta_list_index = ar->sta_list_index & ~(1 << i);
91
92}
93
94static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
95{
96	u8 i, removed = 0;
97
98	if (is_zero_ether_addr(mac))
99		return removed;
100
101	if (is_broadcast_ether_addr(mac)) {
102		ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n");
103
104		for (i = 0; i < AP_MAX_NUM_STA; i++) {
105			if (!is_zero_ether_addr(ar->sta_list[i].mac)) {
106				ath6kl_sta_cleanup(ar, i);
107				removed = 1;
108			}
109		}
110	} else {
111		for (i = 0; i < AP_MAX_NUM_STA; i++) {
112			if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) {
113				ath6kl_dbg(ATH6KL_DBG_TRC,
114					   "deleting station %pM aid=%d reason=%d\n",
115					   mac, ar->sta_list[i].aid, reason);
116				ath6kl_sta_cleanup(ar, i);
117				removed = 1;
118				break;
119			}
120		}
121	}
122
123	return removed;
124}
125
126enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac)
127{
128	struct ath6kl *ar = devt;
129	return ar->ac2ep_map[ac];
130}
131
132struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar)
133{
134	struct ath6kl_cookie *cookie;
135
136	cookie = ar->cookie_list;
137	if (cookie != NULL) {
138		ar->cookie_list = cookie->arc_list_next;
139		ar->cookie_count--;
140	}
141
142	return cookie;
143}
144
145void ath6kl_cookie_init(struct ath6kl *ar)
146{
147	u32 i;
148
149	ar->cookie_list = NULL;
150	ar->cookie_count = 0;
151
152	memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem));
153
154	for (i = 0; i < MAX_COOKIE_NUM; i++)
155		ath6kl_free_cookie(ar, &ar->cookie_mem[i]);
156}
157
158void ath6kl_cookie_cleanup(struct ath6kl *ar)
159{
160	ar->cookie_list = NULL;
161	ar->cookie_count = 0;
162}
163
164void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
165{
166	/* Insert first */
167
168	if (!ar || !cookie)
169		return;
170
171	cookie->arc_list_next = ar->cookie_list;
172	ar->cookie_list = cookie;
173	ar->cookie_count++;
174}
175
176/* set the window address register (using 4-byte register access ). */
177static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
178{
179	int status;
180	u8 addr_val[4];
181	s32 i;
182
183	/*
184	 * Write bytes 1,2,3 of the register to set the upper address bytes,
185	 * the LSB is written last to initiate the access cycle
186	 */
187
188	for (i = 1; i <= 3; i++) {
189		/*
190		 * Fill the buffer with the address byte value we want to
191		 * hit 4 times.
192		 */
193		memset(addr_val, ((u8 *)&addr)[i], 4);
194
195		/*
196		 * Hit each byte of the register address with a 4-byte
197		 * write operation to the same address, this is a harmless
198		 * operation.
199		 */
200		status = hif_read_write_sync(ar, reg_addr + i, addr_val,
201					     4, HIF_WR_SYNC_BYTE_FIX);
202		if (status)
203			break;
204	}
205
206	if (status) {
207		ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
208			   addr, reg_addr);
209		return status;
210	}
211
212	/*
213	 * Write the address register again, this time write the whole
214	 * 4-byte value. The effect here is that the LSB write causes the
215	 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
216	 * effect since we are writing the same values again
217	 */
218	status = hif_read_write_sync(ar, reg_addr, (u8 *)(&addr),
219				     4, HIF_WR_SYNC_BYTE_INC);
220
221	if (status) {
222		ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
223			   addr, reg_addr);
224		return status;
225	}
226
227	return 0;
228}
229
230/*
231 * Read from the ATH6KL through its diagnostic window. No cooperation from
232 * the Target is required for this.
233 */
234int ath6kl_read_reg_diag(struct ath6kl *ar, u32 *address, u32 *data)
235{
236	int status;
237
238	/* set window register to start read cycle */
239	status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
240					*address);
241
242	if (status)
243		return status;
244
245	/* read the data */
246	status = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data,
247				     sizeof(u32), HIF_RD_SYNC_BYTE_INC);
248	if (status) {
249		ath6kl_err("failed to read from window data addr\n");
250		return status;
251	}
252
253	return status;
254}
255
256
257/*
258 * Write to the ATH6KL through its diagnostic window. No cooperation from
259 * the Target is required for this.
260 */
261static int ath6kl_write_reg_diag(struct ath6kl *ar, u32 *address, u32 *data)
262{
263	int status;
264
265	/* set write data */
266	status = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data,
267				     sizeof(u32), HIF_WR_SYNC_BYTE_INC);
268	if (status) {
269		ath6kl_err("failed to write 0x%x to window data addr\n", *data);
270		return status;
271	}
272
273	/* set window register, which starts the write cycle */
274	return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
275				      *address);
276}
277
278int ath6kl_access_datadiag(struct ath6kl *ar, u32 address,
279			   u8 *data, u32 length, bool read)
280{
281	u32 count;
282	int status = 0;
283
284	for (count = 0; count < length; count += 4, address += 4) {
285		if (read) {
286			status = ath6kl_read_reg_diag(ar, &address,
287						      (u32 *) &data[count]);
288			if (status)
289				break;
290		} else {
291			status = ath6kl_write_reg_diag(ar, &address,
292						       (u32 *) &data[count]);
293			if (status)
294				break;
295		}
296	}
297
298	return status;
299}
300
301/* FIXME: move to a better place, target.h? */
302#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
303#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
304
305static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
306				bool wait_fot_compltn, bool cold_reset)
307{
308	int status = 0;
309	u32 address;
310	u32 data;
311
312	if (target_type != TARGET_TYPE_AR6003 &&
313		target_type != TARGET_TYPE_AR6004)
314		return;
315
316	data = cold_reset ? RESET_CONTROL_COLD_RST : RESET_CONTROL_MBOX_RST;
317
318	switch (target_type) {
319	case TARGET_TYPE_AR6003:
320		address = AR6003_RESET_CONTROL_ADDRESS;
321		break;
322	case TARGET_TYPE_AR6004:
323		address = AR6004_RESET_CONTROL_ADDRESS;
324		break;
325	default:
326		address = AR6003_RESET_CONTROL_ADDRESS;
327		break;
328	}
329
330	status = ath6kl_write_reg_diag(ar, &address, &data);
331
332	if (status)
333		ath6kl_err("failed to reset target\n");
334}
335
336void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
337			  bool get_dbglogs)
338{
339	struct ath6kl *ar = ath6kl_priv(dev);
340	static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
341	bool discon_issued;
342
343	netif_stop_queue(dev);
344
345	/* disable the target and the interrupts associated with it */
346	if (test_bit(WMI_READY, &ar->flag)) {
347		discon_issued = (test_bit(CONNECTED, &ar->flag) ||
348				 test_bit(CONNECT_PEND, &ar->flag));
349		ath6kl_disconnect(ar);
350		if (!keep_profile)
351			ath6kl_init_profile_info(ar);
352
353		del_timer(&ar->disconnect_timer);
354
355		clear_bit(WMI_READY, &ar->flag);
356		ath6kl_wmi_shutdown(ar->wmi);
357		clear_bit(WMI_ENABLED, &ar->flag);
358		ar->wmi = NULL;
359
360		/*
361		 * After wmi_shudown all WMI events will be dropped. We
362		 * need to cleanup the buffers allocated in AP mode and
363		 * give disconnect notification to stack, which usually
364		 * happens in the disconnect_event. Simulate the disconnect
365		 * event by calling the function directly. Sometimes
366		 * disconnect_event will be received when the debug logs
367		 * are collected.
368		 */
369		if (discon_issued)
370			ath6kl_disconnect_event(ar, DISCONNECT_CMD,
371						(ar->nw_type & AP_NETWORK) ?
372						bcast_mac : ar->bssid,
373						0, NULL, 0);
374
375		ar->user_key_ctrl = 0;
376
377	} else {
378		ath6kl_dbg(ATH6KL_DBG_TRC,
379			   "%s: wmi is not ready 0x%p 0x%p\n",
380			   __func__, ar, ar->wmi);
381
382		/* Shut down WMI if we have started it */
383		if (test_bit(WMI_ENABLED, &ar->flag)) {
384			ath6kl_dbg(ATH6KL_DBG_TRC,
385				   "%s: shut down wmi\n", __func__);
386			ath6kl_wmi_shutdown(ar->wmi);
387			clear_bit(WMI_ENABLED, &ar->flag);
388			ar->wmi = NULL;
389		}
390	}
391
392	if (ar->htc_target) {
393		ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
394		ath6kl_htc_stop(ar->htc_target);
395	}
396
397	/*
398	 * Try to reset the device if we can. The driver may have been
399	 * configure NOT to reset the target during a debug session.
400	 */
401	ath6kl_dbg(ATH6KL_DBG_TRC,
402		   "attempting to reset target on instance destroy\n");
403	ath6kl_reset_device(ar, ar->target_type, true, true);
404}
405
406static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
407{
408	u8 index;
409	u8 keyusage;
410
411	for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
412		if (ar->wep_key_list[index].key_len) {
413			keyusage = GROUP_USAGE;
414			if (index == ar->def_txkey_index)
415				keyusage |= TX_USAGE;
416
417			ath6kl_wmi_addkey_cmd(ar->wmi,
418					      index,
419					      WEP_CRYPT,
420					      keyusage,
421					      ar->wep_key_list[index].key_len,
422					      NULL,
423					      ar->wep_key_list[index].key,
424					      KEY_OP_INIT_VAL, NULL,
425					      NO_SYNC_WMIFLAG);
426		}
427	}
428}
429
430static void ath6kl_connect_ap_mode(struct ath6kl *ar, u16 channel, u8 *bssid,
431				   u16 listen_int, u16 beacon_int,
432				   u8 assoc_resp_len, u8 *assoc_info)
433{
434	struct net_device *dev = ar->net_dev;
435	struct station_info sinfo;
436	struct ath6kl_req_key *ik;
437	enum crypto_type keyType = NONE_CRYPT;
438
439	if (memcmp(dev->dev_addr, bssid, ETH_ALEN) == 0) {
440		ik = &ar->ap_mode_bkey;
441
442		switch (ar->auth_mode) {
443		case NONE_AUTH:
444			if (ar->prwise_crypto == WEP_CRYPT)
445				ath6kl_install_static_wep_keys(ar);
446			break;
447		case WPA_PSK_AUTH:
448		case WPA2_PSK_AUTH:
449		case (WPA_PSK_AUTH|WPA2_PSK_AUTH):
450			switch (ik->ik_type) {
451			case ATH6KL_CIPHER_TKIP:
452				keyType = TKIP_CRYPT;
453				break;
454			case ATH6KL_CIPHER_AES_CCM:
455				keyType = AES_CRYPT;
456				break;
457			default:
458				goto skip_key;
459			}
460			ath6kl_wmi_addkey_cmd(ar->wmi, ik->ik_keyix, keyType,
461					      GROUP_USAGE, ik->ik_keylen,
462					      (u8 *)&ik->ik_keyrsc,
463					      ik->ik_keydata,
464					      KEY_OP_INIT_VAL, ik->ik_macaddr,
465					      SYNC_BOTH_WMIFLAG);
466			break;
467		}
468skip_key:
469		set_bit(CONNECTED, &ar->flag);
470		return;
471	}
472
473	ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n",
474		   bssid, channel);
475
476	ath6kl_add_new_sta(ar, bssid, channel, assoc_info, assoc_resp_len,
477			   listen_int & 0xFF, beacon_int,
478			   (listen_int >> 8) & 0xFF);
479
480	/* send event to application */
481	memset(&sinfo, 0, sizeof(sinfo));
482
483	/* TODO: sinfo.generation */
484	/* TODO: need to deliver (Re)AssocReq IEs somehow.. change in
485	 * cfg80211 needed, e.g., by adding those into sinfo
486	 */
487	cfg80211_new_sta(ar->net_dev, bssid, &sinfo, GFP_KERNEL);
488
489	netif_wake_queue(ar->net_dev);
490
491	return;
492}
493
494/* Functions for Tx credit handling */
495void ath6k_credit_init(struct htc_credit_state_info *cred_info,
496		       struct list_head *ep_list,
497		       int tot_credits)
498{
499	struct htc_endpoint_credit_dist *cur_ep_dist;
500	int count;
501
502	cred_info->cur_free_credits = tot_credits;
503	cred_info->total_avail_credits = tot_credits;
504
505	list_for_each_entry(cur_ep_dist, ep_list, list) {
506		if (cur_ep_dist->endpoint == ENDPOINT_0)
507			continue;
508
509		cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
510
511		if (tot_credits > 4)
512			if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
513			    (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
514				ath6kl_deposit_credit_to_ep(cred_info,
515						cur_ep_dist,
516						cur_ep_dist->cred_min);
517				cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
518			}
519
520		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
521			ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
522						    cur_ep_dist->cred_min);
523			/*
524			 * Control service is always marked active, it
525			 * never goes inactive EVER.
526			 */
527			cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
528		} else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
529			/* this is the lowest priority data endpoint */
530			cred_info->lowestpri_ep_dist = cur_ep_dist->list;
531
532		/*
533		 * Streams have to be created (explicit | implicit) for all
534		 * kinds of traffic. BE endpoints are also inactive in the
535		 * beginning. When BE traffic starts it creates implicit
536		 * streams that redistributes credits.
537		 *
538		 * Note: all other endpoints have minimums set but are
539		 * initially given NO credits. credits will be distributed
540		 * as traffic activity demands
541		 */
542	}
543
544	WARN_ON(cred_info->cur_free_credits <= 0);
545
546	list_for_each_entry(cur_ep_dist, ep_list, list) {
547		if (cur_ep_dist->endpoint == ENDPOINT_0)
548			continue;
549
550		if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
551			cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
552		else {
553			/*
554			 * For the remaining data endpoints, we assume that
555			 * each cred_per_msg are the same. We use a simple
556			 * calculation here, we take the remaining credits
557			 * and determine how many max messages this can
558			 * cover and then set each endpoint's normal value
559			 * equal to 3/4 this amount.
560			 */
561			count = (cred_info->cur_free_credits /
562				 cur_ep_dist->cred_per_msg)
563				* cur_ep_dist->cred_per_msg;
564			count = (count * 3) >> 2;
565			count = max(count, cur_ep_dist->cred_per_msg);
566			cur_ep_dist->cred_norm = count;
567
568		}
569	}
570}
571
572/* initialize and setup credit distribution */
573int ath6k_setup_credit_dist(void *htc_handle,
574			    struct htc_credit_state_info *cred_info)
575{
576	u16 servicepriority[5];
577
578	memset(cred_info, 0, sizeof(struct htc_credit_state_info));
579
580	servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
581	servicepriority[1] = WMI_DATA_VO_SVC;
582	servicepriority[2] = WMI_DATA_VI_SVC;
583	servicepriority[3] = WMI_DATA_BE_SVC;
584	servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
585
586	/* set priority list */
587	ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
588
589	return 0;
590}
591
592/* reduce an ep's credits back to a set limit */
593static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
594				 struct htc_endpoint_credit_dist  *ep_dist,
595				 int limit)
596{
597	int credits;
598
599	ep_dist->cred_assngd = limit;
600
601	if (ep_dist->credits <= limit)
602		return;
603
604	credits = ep_dist->credits - limit;
605	ep_dist->credits -= credits;
606	cred_info->cur_free_credits += credits;
607}
608
609static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
610				struct list_head *epdist_list)
611{
612	struct htc_endpoint_credit_dist *cur_dist_list;
613
614	list_for_each_entry(cur_dist_list, epdist_list, list) {
615		if (cur_dist_list->endpoint == ENDPOINT_0)
616			continue;
617
618		if (cur_dist_list->cred_to_dist > 0) {
619			cur_dist_list->credits +=
620					cur_dist_list->cred_to_dist;
621			cur_dist_list->cred_to_dist = 0;
622			if (cur_dist_list->credits >
623			    cur_dist_list->cred_assngd)
624				ath6k_reduce_credits(cred_info,
625						cur_dist_list,
626						cur_dist_list->cred_assngd);
627
628			if (cur_dist_list->credits >
629			    cur_dist_list->cred_norm)
630				ath6k_reduce_credits(cred_info, cur_dist_list,
631						     cur_dist_list->cred_norm);
632
633			if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
634				if (cur_dist_list->txq_depth == 0)
635					ath6k_reduce_credits(cred_info,
636							     cur_dist_list, 0);
637			}
638		}
639	}
640}
641
642/*
643 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
644 * question.
645 */
646void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
647			struct htc_endpoint_credit_dist *ep_dist)
648{
649	struct htc_endpoint_credit_dist *curdist_list;
650	int credits = 0;
651	int need;
652
653	if (ep_dist->svc_id == WMI_CONTROL_SVC)
654		goto out;
655
656	if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
657	    (ep_dist->svc_id == WMI_DATA_VO_SVC))
658		if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
659			goto out;
660
661	/*
662	 * For all other services, we follow a simple algorithm of:
663	 *
664	 * 1. checking the free pool for credits
665	 * 2. checking lower priority endpoints for credits to take
666	 */
667
668	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
669
670	if (credits >= ep_dist->seek_cred)
671		goto out;
672
673	/*
674	 * We don't have enough in the free pool, try taking away from
675	 * lower priority services The rule for taking away credits:
676	 *
677	 *   1. Only take from lower priority endpoints
678	 *   2. Only take what is allocated above the minimum (never
679	 *      starve an endpoint completely)
680	 *   3. Only take what you need.
681	 */
682
683	list_for_each_entry_reverse(curdist_list,
684				    &cred_info->lowestpri_ep_dist,
685				    list) {
686		if (curdist_list == ep_dist)
687			break;
688
689		need = ep_dist->seek_cred - cred_info->cur_free_credits;
690
691		if ((curdist_list->cred_assngd - need) >=
692		     curdist_list->cred_min) {
693			/*
694			 * The current one has been allocated more than
695			 * it's minimum and it has enough credits assigned
696			 * above it's minimum to fulfill our need try to
697			 * take away just enough to fulfill our need.
698			 */
699			ath6k_reduce_credits(cred_info, curdist_list,
700					curdist_list->cred_assngd - need);
701
702			if (cred_info->cur_free_credits >=
703			    ep_dist->seek_cred)
704				break;
705		}
706
707		if (curdist_list->endpoint == ENDPOINT_0)
708			break;
709	}
710
711	credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
712
713out:
714	/* did we find some credits? */
715	if (credits)
716		ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
717
718	ep_dist->seek_cred = 0;
719}
720
721/* redistribute credits based on activity change */
722static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
723				       struct list_head *ep_dist_list)
724{
725	struct htc_endpoint_credit_dist *curdist_list;
726
727	list_for_each_entry(curdist_list, ep_dist_list, list) {
728		if (curdist_list->endpoint == ENDPOINT_0)
729			continue;
730
731		if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
732		    (curdist_list->svc_id == WMI_DATA_BE_SVC))
733			curdist_list->dist_flags |= HTC_EP_ACTIVE;
734
735		if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
736		    !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
737			if (curdist_list->txq_depth == 0)
738				ath6k_reduce_credits(info,
739						curdist_list, 0);
740			else
741				ath6k_reduce_credits(info,
742						curdist_list,
743						curdist_list->cred_min);
744		}
745	}
746}
747
748/*
749 *
750 * This function is invoked whenever endpoints require credit
751 * distributions. A lock is held while this function is invoked, this
752 * function shall NOT block. The ep_dist_list is a list of distribution
753 * structures in prioritized order as defined by the call to the
754 * htc_set_credit_dist() api.
755 */
756void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
757			     struct list_head *ep_dist_list,
758			     enum htc_credit_dist_reason reason)
759{
760	switch (reason) {
761	case HTC_CREDIT_DIST_SEND_COMPLETE:
762		ath6k_credit_update(cred_info, ep_dist_list);
763		break;
764	case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
765		ath6k_redistribute_credits(cred_info, ep_dist_list);
766		break;
767	default:
768		break;
769	}
770
771	WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
772	WARN_ON(cred_info->cur_free_credits < 0);
773}
774
775void disconnect_timer_handler(unsigned long ptr)
776{
777	struct net_device *dev = (struct net_device *)ptr;
778	struct ath6kl *ar = ath6kl_priv(dev);
779
780	ath6kl_init_profile_info(ar);
781	ath6kl_disconnect(ar);
782}
783
784void ath6kl_disconnect(struct ath6kl *ar)
785{
786	if (test_bit(CONNECTED, &ar->flag) ||
787	    test_bit(CONNECT_PEND, &ar->flag)) {
788		ath6kl_wmi_disconnect_cmd(ar->wmi);
789		/*
790		 * Disconnect command is issued, clear the connect pending
791		 * flag. The connected flag will be cleared in
792		 * disconnect event notification.
793		 */
794		clear_bit(CONNECT_PEND, &ar->flag);
795	}
796}
797
798/* WMI Event handlers */
799
800static const char *get_hw_id_string(u32 id)
801{
802	switch (id) {
803	case AR6003_REV1_VERSION:
804		return "1.0";
805	case AR6003_REV2_VERSION:
806		return "2.0";
807	case AR6003_REV3_VERSION:
808		return "2.1.1";
809	default:
810		return "unknown";
811	}
812}
813
814void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
815{
816	struct ath6kl *ar = devt;
817	struct net_device *dev = ar->net_dev;
818
819	memcpy(dev->dev_addr, datap, ETH_ALEN);
820	ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
821		   __func__, dev->dev_addr);
822
823	ar->version.wlan_ver = sw_ver;
824	ar->version.abi_ver = abi_ver;
825
826	snprintf(ar->wdev->wiphy->fw_version,
827		 sizeof(ar->wdev->wiphy->fw_version),
828		 "%u.%u.%u.%u",
829		 (ar->version.wlan_ver & 0xf0000000) >> 28,
830		 (ar->version.wlan_ver & 0x0f000000) >> 24,
831		 (ar->version.wlan_ver & 0x00ff0000) >> 16,
832		 (ar->version.wlan_ver & 0x0000ffff));
833
834	/* indicate to the waiting thread that the ready event was received */
835	set_bit(WMI_READY, &ar->flag);
836	wake_up(&ar->event_wq);
837
838	ath6kl_info("hw %s fw %s\n",
839		    get_hw_id_string(ar->wdev->wiphy->hw_version),
840		    ar->wdev->wiphy->fw_version);
841}
842
843void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
844{
845	ath6kl_cfg80211_scan_complete_event(ar, status);
846
847	if (!ar->usr_bss_filter)
848		ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
849
850	ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
851}
852
853void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
854			  u16 listen_int, u16 beacon_int,
855			  enum network_type net_type, u8 beacon_ie_len,
856			  u8 assoc_req_len, u8 assoc_resp_len,
857			  u8 *assoc_info)
858{
859	unsigned long flags;
860
861	if (ar->nw_type == AP_NETWORK) {
862		ath6kl_connect_ap_mode(ar, channel, bssid, listen_int,
863				       beacon_int, assoc_resp_len,
864				       assoc_info);
865		return;
866	}
867
868	ath6kl_cfg80211_connect_event(ar, channel, bssid,
869				      listen_int, beacon_int,
870				      net_type, beacon_ie_len,
871				      assoc_req_len, assoc_resp_len,
872				      assoc_info);
873
874	memcpy(ar->bssid, bssid, sizeof(ar->bssid));
875	ar->bss_ch = channel;
876
877	if ((ar->nw_type == INFRA_NETWORK))
878		ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
879					      ar->listen_intvl_b);
880
881	netif_wake_queue(ar->net_dev);
882
883	/* Update connect & link status atomically */
884	spin_lock_irqsave(&ar->lock, flags);
885	set_bit(CONNECTED, &ar->flag);
886	clear_bit(CONNECT_PEND, &ar->flag);
887	netif_carrier_on(ar->net_dev);
888	spin_unlock_irqrestore(&ar->lock, flags);
889
890	aggr_reset_state(ar->aggr_cntxt);
891	ar->reconnect_flag = 0;
892
893	if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
894		memset(ar->node_map, 0, sizeof(ar->node_map));
895		ar->node_num = 0;
896		ar->next_ep_id = ENDPOINT_2;
897	}
898
899	if (!ar->usr_bss_filter)
900		ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
901}
902
903void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
904{
905	struct ath6kl_sta *sta;
906	u8 tsc[6];
907	/*
908	 * For AP case, keyid will have aid of STA which sent pkt with
909	 * MIC error. Use this aid to get MAC & send it to hostapd.
910	 */
911	if (ar->nw_type == AP_NETWORK) {
912		sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
913		if (!sta)
914			return;
915
916		ath6kl_dbg(ATH6KL_DBG_TRC,
917			   "ap tkip mic error received from aid=%d\n", keyid);
918
919		memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
920		cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
921					     NL80211_KEYTYPE_PAIRWISE, keyid,
922					     tsc, GFP_KERNEL);
923	} else
924		ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
925
926}
927
928static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
929{
930	struct wmi_target_stats *tgt_stats =
931		(struct wmi_target_stats *) ptr;
932	struct target_stats *stats = &ar->target_stats;
933	struct tkip_ccmp_stats *ccmp_stats;
934	struct bss *conn_bss = NULL;
935	struct cserv_stats *c_stats;
936	u8 ac;
937
938	if (len < sizeof(*tgt_stats))
939		return;
940
941	/* update the RSSI of the connected bss */
942	if (test_bit(CONNECTED, &ar->flag)) {
943		conn_bss = ath6kl_wmi_find_node(ar->wmi, ar->bssid);
944		if (conn_bss) {
945			c_stats = &tgt_stats->cserv_stats;
946			conn_bss->ni_rssi =
947				a_sle16_to_cpu(c_stats->cs_ave_beacon_rssi);
948			conn_bss->ni_snr =
949				tgt_stats->cserv_stats.cs_ave_beacon_snr;
950			ath6kl_wmi_node_return(ar->wmi, conn_bss);
951		}
952	}
953
954	ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n");
955
956	stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
957	stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte);
958	stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt);
959	stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte);
960	stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt);
961	stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte);
962	stats->tx_bcast_pkt  += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt);
963	stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte);
964	stats->tx_rts_success_cnt +=
965		le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt);
966
967	for (ac = 0; ac < WMM_NUM_AC; ac++)
968		stats->tx_pkt_per_ac[ac] +=
969			le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]);
970
971	stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err);
972	stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt);
973	stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt);
974	stats->tx_mult_retry_cnt +=
975		le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
976	stats->tx_rts_fail_cnt +=
977		le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
978	stats->tx_ucast_rate =
979	    ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate));
980
981	stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
982	stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
983	stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt);
984	stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte);
985	stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt);
986	stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte);
987	stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt);
988	stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte);
989	stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt);
990	stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err);
991	stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err);
992	stats->rx_key_cache_miss +=
993		le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
994	stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
995	stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
996	stats->rx_ucast_rate =
997	    ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate));
998
999	ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
1000
1001	stats->tkip_local_mic_fail +=
1002		le32_to_cpu(ccmp_stats->tkip_local_mic_fail);
1003	stats->tkip_cnter_measures_invoked +=
1004		le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked);
1005	stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err);
1006
1007	stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err);
1008	stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays);
1009
1010	stats->pwr_save_fail_cnt +=
1011		le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt);
1012	stats->noise_floor_calib =
1013		a_sle32_to_cpu(tgt_stats->noise_floor_calib);
1014
1015	stats->cs_bmiss_cnt +=
1016		le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt);
1017	stats->cs_low_rssi_cnt +=
1018		le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt);
1019	stats->cs_connect_cnt +=
1020		le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt);
1021	stats->cs_discon_cnt +=
1022		le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt);
1023
1024	stats->cs_ave_beacon_rssi =
1025		a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi);
1026
1027	stats->cs_last_roam_msec =
1028		tgt_stats->cserv_stats.cs_last_roam_msec;
1029	stats->cs_snr = tgt_stats->cserv_stats.cs_snr;
1030	stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi);
1031
1032	stats->lq_val = le32_to_cpu(tgt_stats->lq_val);
1033
1034	stats->wow_pkt_dropped +=
1035		le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped);
1036	stats->wow_host_pkt_wakeups +=
1037		tgt_stats->wow_stats.wow_host_pkt_wakeups;
1038	stats->wow_host_evt_wakeups +=
1039		tgt_stats->wow_stats.wow_host_evt_wakeups;
1040	stats->wow_evt_discarded +=
1041		le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
1042
1043	if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
1044		clear_bit(STATS_UPDATE_PEND, &ar->flag);
1045		wake_up(&ar->event_wq);
1046	}
1047}
1048
1049static void ath6kl_add_le32(__le32 *var, __le32 val)
1050{
1051	*var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
1052}
1053
1054void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
1055{
1056	struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
1057	struct wmi_ap_mode_stat *ap = &ar->ap_stats;
1058	struct wmi_per_sta_stat *st_ap, *st_p;
1059	u8 ac;
1060
1061	if (ar->nw_type == AP_NETWORK) {
1062		if (len < sizeof(*p))
1063			return;
1064
1065		for (ac = 0; ac < AP_MAX_NUM_STA; ac++) {
1066			st_ap = &ap->sta[ac];
1067			st_p = &p->sta[ac];
1068
1069			ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes);
1070			ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts);
1071			ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error);
1072			ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard);
1073			ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes);
1074			ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts);
1075			ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error);
1076			ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard);
1077		}
1078
1079	} else {
1080		ath6kl_update_target_stats(ar, ptr, len);
1081	}
1082}
1083
1084void ath6kl_wakeup_event(void *dev)
1085{
1086	struct ath6kl *ar = (struct ath6kl *) dev;
1087
1088	wake_up(&ar->event_wq);
1089}
1090
1091void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
1092{
1093	struct ath6kl *ar = (struct ath6kl *) devt;
1094
1095	ar->tx_pwr = tx_pwr;
1096	wake_up(&ar->event_wq);
1097}
1098
1099void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
1100{
1101	struct ath6kl_sta *conn;
1102	struct sk_buff *skb;
1103	bool psq_empty = false;
1104
1105	conn = ath6kl_find_sta_by_aid(ar, aid);
1106
1107	if (!conn)
1108		return;
1109	/*
1110	 * Send out a packet queued on ps queue. When the ps queue
1111	 * becomes empty update the PVB for this station.
1112	 */
1113	spin_lock_bh(&conn->psq_lock);
1114	psq_empty  = skb_queue_empty(&conn->psq);
1115	spin_unlock_bh(&conn->psq_lock);
1116
1117	if (psq_empty)
1118		/* TODO: Send out a NULL data frame */
1119		return;
1120
1121	spin_lock_bh(&conn->psq_lock);
1122	skb = skb_dequeue(&conn->psq);
1123	spin_unlock_bh(&conn->psq_lock);
1124
1125	conn->sta_flags |= STA_PS_POLLED;
1126	ath6kl_data_tx(skb, ar->net_dev);
1127	conn->sta_flags &= ~STA_PS_POLLED;
1128
1129	spin_lock_bh(&conn->psq_lock);
1130	psq_empty  = skb_queue_empty(&conn->psq);
1131	spin_unlock_bh(&conn->psq_lock);
1132
1133	if (psq_empty)
1134		ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
1135}
1136
1137void ath6kl_dtimexpiry_event(struct ath6kl *ar)
1138{
1139	bool mcastq_empty = false;
1140	struct sk_buff *skb;
1141
1142	/*
1143	 * If there are no associated STAs, ignore the DTIM expiry event.
1144	 * There can be potential race conditions where the last associated
1145	 * STA may disconnect & before the host could clear the 'Indicate
1146	 * DTIM' request to the firmware, the firmware would have just
1147	 * indicated a DTIM expiry event. The race is between 'clear DTIM
1148	 * expiry cmd' going from the host to the firmware & the DTIM
1149	 * expiry event happening from the firmware to the host.
1150	 */
1151	if (!ar->sta_list_index)
1152		return;
1153
1154	spin_lock_bh(&ar->mcastpsq_lock);
1155	mcastq_empty = skb_queue_empty(&ar->mcastpsq);
1156	spin_unlock_bh(&ar->mcastpsq_lock);
1157
1158	if (mcastq_empty)
1159		return;
1160
1161	/* set the STA flag to dtim_expired for the frame to go out */
1162	set_bit(DTIM_EXPIRED, &ar->flag);
1163
1164	spin_lock_bh(&ar->mcastpsq_lock);
1165	while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
1166		spin_unlock_bh(&ar->mcastpsq_lock);
1167
1168		ath6kl_data_tx(skb, ar->net_dev);
1169
1170		spin_lock_bh(&ar->mcastpsq_lock);
1171	}
1172	spin_unlock_bh(&ar->mcastpsq_lock);
1173
1174	clear_bit(DTIM_EXPIRED, &ar->flag);
1175
1176	/* clear the LSB of the BitMapCtl field of the TIM IE */
1177	ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
1178}
1179
1180void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
1181			     u8 assoc_resp_len, u8 *assoc_info,
1182			     u16 prot_reason_status)
1183{
1184	struct bss *wmi_ssid_node = NULL;
1185	unsigned long flags;
1186
1187	if (ar->nw_type == AP_NETWORK) {
1188		if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
1189			return;
1190
1191		/* if no more associated STAs, empty the mcast PS q */
1192		if (ar->sta_list_index == 0) {
1193			spin_lock_bh(&ar->mcastpsq_lock);
1194			skb_queue_purge(&ar->mcastpsq);
1195			spin_unlock_bh(&ar->mcastpsq_lock);
1196
1197			/* clear the LSB of the TIM IE's BitMapCtl field */
1198			if (test_bit(WMI_READY, &ar->flag))
1199				ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
1200		}
1201
1202		if (!is_broadcast_ether_addr(bssid)) {
1203			/* send event to application */
1204			cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
1205		}
1206
1207		clear_bit(CONNECTED, &ar->flag);
1208		return;
1209	}
1210
1211	ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
1212				       assoc_resp_len, assoc_info,
1213				       prot_reason_status);
1214
1215	aggr_reset_state(ar->aggr_cntxt);
1216
1217	del_timer(&ar->disconnect_timer);
1218
1219	ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
1220		   "disconnect reason is %d\n", reason);
1221
1222	/*
1223	 * If the event is due to disconnect cmd from the host, only they
1224	 * the target would stop trying to connect. Under any other
1225	 * condition, target would keep trying to connect.
1226	 */
1227	if (reason == DISCONNECT_CMD) {
1228		if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
1229			ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
1230	} else {
1231		set_bit(CONNECT_PEND, &ar->flag);
1232		if (((reason == ASSOC_FAILED) &&
1233		    (prot_reason_status == 0x11)) ||
1234		    ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
1235		     && (ar->reconnect_flag == 1))) {
1236			set_bit(CONNECTED, &ar->flag);
1237			return;
1238		}
1239	}
1240
1241	if ((reason == NO_NETWORK_AVAIL) && test_bit(WMI_READY, &ar->flag))  {
1242		ath6kl_wmi_node_free(ar->wmi, bssid);
1243
1244		/*
1245		 * In case any other same SSID nodes are present remove it,
1246		 * since those nodes also not available now.
1247		 */
1248		do {
1249			/*
1250			 * Find the nodes based on SSID and remove it
1251			 *
1252			 * Note: This case will not work out for
1253			 * Hidden-SSID
1254			 */
1255			wmi_ssid_node = ath6kl_wmi_find_ssid_node(ar->wmi,
1256								  ar->ssid,
1257								  ar->ssid_len,
1258								  false,
1259								  true);
1260
1261			if (wmi_ssid_node)
1262				ath6kl_wmi_node_free(ar->wmi,
1263						     wmi_ssid_node->ni_macaddr);
1264
1265		} while (wmi_ssid_node);
1266	}
1267
1268	/* update connect & link status atomically */
1269	spin_lock_irqsave(&ar->lock, flags);
1270	clear_bit(CONNECTED, &ar->flag);
1271	netif_carrier_off(ar->net_dev);
1272	spin_unlock_irqrestore(&ar->lock, flags);
1273
1274	if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
1275		ar->reconnect_flag = 0;
1276
1277	if (reason != CSERV_DISCONNECT)
1278		ar->user_key_ctrl = 0;
1279
1280	netif_stop_queue(ar->net_dev);
1281	memset(ar->bssid, 0, sizeof(ar->bssid));
1282	ar->bss_ch = 0;
1283
1284	ath6kl_tx_data_cleanup(ar);
1285}
1286
1287static int ath6kl_open(struct net_device *dev)
1288{
1289	struct ath6kl *ar = ath6kl_priv(dev);
1290	unsigned long flags;
1291
1292	spin_lock_irqsave(&ar->lock, flags);
1293
1294	set_bit(WLAN_ENABLED, &ar->flag);
1295
1296	if (test_bit(CONNECTED, &ar->flag)) {
1297		netif_carrier_on(dev);
1298		netif_wake_queue(dev);
1299	} else
1300		netif_carrier_off(dev);
1301
1302	spin_unlock_irqrestore(&ar->lock, flags);
1303
1304	return 0;
1305}
1306
1307static int ath6kl_close(struct net_device *dev)
1308{
1309	struct ath6kl *ar = ath6kl_priv(dev);
1310
1311	netif_stop_queue(dev);
1312
1313	ath6kl_disconnect(ar);
1314
1315	if (test_bit(WMI_READY, &ar->flag)) {
1316		if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
1317					      0, 0, 0))
1318			return -EIO;
1319
1320		clear_bit(WLAN_ENABLED, &ar->flag);
1321	}
1322
1323	ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
1324
1325	return 0;
1326}
1327
1328static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
1329{
1330	struct ath6kl *ar = ath6kl_priv(dev);
1331
1332	return &ar->net_stats;
1333}
1334
1335static struct net_device_ops ath6kl_netdev_ops = {
1336	.ndo_open               = ath6kl_open,
1337	.ndo_stop               = ath6kl_close,
1338	.ndo_start_xmit         = ath6kl_data_tx,
1339	.ndo_get_stats          = ath6kl_get_stats,
1340};
1341
1342void init_netdev(struct net_device *dev)
1343{
1344	dev->netdev_ops = &ath6kl_netdev_ops;
1345	dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1346
1347	dev->needed_headroom = ETH_HLEN;
1348	dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
1349				sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
1350				+ WMI_MAX_TX_META_SZ;
1351
1352	return;
1353}
1354