tx.c revision a620865edf62ea2d024bbfe62162244473badfcb
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/etherdevice.h>
27
28#include "wl12xx.h"
29#include "io.h"
30#include "reg.h"
31#include "ps.h"
32#include "tx.h"
33
34static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
35{
36	int ret;
37	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
38
39	if (is_ap)
40		ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
41	else
42		ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
43
44	if (ret < 0)
45		return ret;
46
47	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
48	return 0;
49}
50
51static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
52{
53	int id;
54
55	id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
56	if (id >= ACX_TX_DESCRIPTORS)
57		return -EBUSY;
58
59	__set_bit(id, wl->tx_frames_map);
60	wl->tx_frames[id] = skb;
61	wl->tx_frames_cnt++;
62	return id;
63}
64
65static void wl1271_free_tx_id(struct wl1271 *wl, int id)
66{
67	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
68		wl->tx_frames[id] = NULL;
69		wl->tx_frames_cnt--;
70	}
71}
72
73static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
74						 struct sk_buff *skb)
75{
76	struct ieee80211_hdr *hdr;
77
78	/*
79	 * add the station to the known list before transmitting the
80	 * authentication response. this way it won't get de-authed by FW
81	 * when transmitting too soon.
82	 */
83	hdr = (struct ieee80211_hdr *)(skb->data +
84				       sizeof(struct wl1271_tx_hw_descr));
85	if (ieee80211_is_auth(hdr->frame_control))
86		wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
87}
88
89static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
90{
91	bool fw_ps;
92	u8 tx_blks;
93
94	/* only regulate station links */
95	if (hlid < WL1271_AP_STA_HLID_START)
96		return;
97
98	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
99	tx_blks = wl->links[hlid].allocated_blks;
100
101	/*
102	 * if in FW PS and there is enough data in FW we can put the link
103	 * into high-level PS and clean out its TX queues.
104	 */
105	if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
106		wl1271_ps_link_start(wl, hlid, true);
107}
108
109u8 wl1271_tx_get_hlid(struct sk_buff *skb)
110{
111	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
112
113	if (control->control.sta) {
114		struct wl1271_station *wl_sta;
115
116		wl_sta = (struct wl1271_station *)
117				control->control.sta->drv_priv;
118		return wl_sta->hlid;
119	} else {
120		struct ieee80211_hdr *hdr;
121
122		hdr = (struct ieee80211_hdr *)skb->data;
123		if (ieee80211_is_mgmt(hdr->frame_control))
124			return WL1271_AP_GLOBAL_HLID;
125		else
126			return WL1271_AP_BROADCAST_HLID;
127	}
128}
129
130static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
131				u32 buf_offset, u8 hlid)
132{
133	struct wl1271_tx_hw_descr *desc;
134	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
135	u32 total_blocks;
136	int id, ret = -EBUSY;
137
138	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
139		return -EAGAIN;
140
141	/* allocate free identifier for the packet */
142	id = wl1271_alloc_tx_id(wl, skb);
143	if (id < 0)
144		return id;
145
146	/* approximate the number of blocks required for this packet
147	   in the firmware */
148	total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
149	total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
150	if (total_blocks <= wl->tx_blocks_available) {
151		desc = (struct wl1271_tx_hw_descr *)skb_push(
152			skb, total_len - skb->len);
153
154		desc->extra_mem_blocks = TX_HW_BLOCK_SPARE;
155		desc->total_mem_blocks = total_blocks;
156		desc->id = id;
157
158		wl->tx_blocks_available -= total_blocks;
159
160		if (wl->bss_type == BSS_TYPE_AP_BSS)
161			wl->links[hlid].allocated_blks += total_blocks;
162
163		ret = 0;
164
165		wl1271_debug(DEBUG_TX,
166			     "tx_allocate: size: %d, blocks: %d, id: %d",
167			     total_len, total_blocks, id);
168	} else {
169		wl1271_free_tx_id(wl, id);
170	}
171
172	return ret;
173}
174
175static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
176			      u32 extra, struct ieee80211_tx_info *control,
177			      u8 hlid)
178{
179	struct timespec ts;
180	struct wl1271_tx_hw_descr *desc;
181	int pad, ac, rate_idx;
182	s64 hosttime;
183	u16 tx_attr;
184
185	desc = (struct wl1271_tx_hw_descr *) skb->data;
186
187	/* relocate space for security header */
188	if (extra) {
189		void *framestart = skb->data + sizeof(*desc);
190		u16 fc = *(u16 *)(framestart + extra);
191		int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
192		memmove(framestart, framestart + extra, hdrlen);
193	}
194
195	/* configure packet life time */
196	getnstimeofday(&ts);
197	hosttime = (timespec_to_ns(&ts) >> 10);
198	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
199
200	if (wl->bss_type != BSS_TYPE_AP_BSS)
201		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
202	else
203		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
204
205	/* configure the tx attributes */
206	tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
207
208	/* queue (we use same identifiers for tid's and ac's */
209	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
210	desc->tid = ac;
211
212	if (wl->bss_type != BSS_TYPE_AP_BSS) {
213		desc->aid = hlid;
214
215		/* if the packets are destined for AP (have a STA entry)
216		   send them with AP rate policies, otherwise use default
217		   basic rates */
218		if (control->control.sta)
219			rate_idx = ACX_TX_AP_FULL_RATE;
220		else
221			rate_idx = ACX_TX_BASIC_RATE;
222	} else {
223		desc->hlid = hlid;
224		switch (hlid) {
225		case WL1271_AP_GLOBAL_HLID:
226			rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
227			break;
228		case WL1271_AP_BROADCAST_HLID:
229			rate_idx = ACX_TX_AP_MODE_BCST_RATE;
230			break;
231		default:
232			rate_idx = ac;
233			break;
234		}
235	}
236
237	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
238	desc->reserved = 0;
239
240	/* align the length (and store in terms of words) */
241	pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
242	desc->length = cpu_to_le16(pad >> 2);
243
244	/* calculate number of padding bytes */
245	pad = pad - skb->len;
246	tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
247
248	desc->tx_attr = cpu_to_le16(tx_attr);
249
250	wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
251		"tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid,
252		le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length),
253		le16_to_cpu(desc->life_time), desc->total_mem_blocks);
254}
255
256/* caller must hold wl->mutex */
257static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
258							u32 buf_offset)
259{
260	struct ieee80211_tx_info *info;
261	u32 extra = 0;
262	int ret = 0;
263	u32 total_len;
264	u8 hlid;
265
266	if (!skb)
267		return -EINVAL;
268
269	info = IEEE80211_SKB_CB(skb);
270
271	if (info->control.hw_key &&
272	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
273		extra = WL1271_TKIP_IV_SPACE;
274
275	if (info->control.hw_key) {
276		bool is_wep;
277		u8 idx = info->control.hw_key->hw_key_idx;
278		u32 cipher = info->control.hw_key->cipher;
279
280		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
281			 (cipher == WLAN_CIPHER_SUITE_WEP104);
282
283		if (unlikely(is_wep && wl->default_key != idx)) {
284			ret = wl1271_set_default_wep_key(wl, idx);
285			if (ret < 0)
286				return ret;
287			wl->default_key = idx;
288		}
289	}
290
291	if (wl->bss_type == BSS_TYPE_AP_BSS)
292		hlid = wl1271_tx_get_hlid(skb);
293	else
294		hlid = TX_HW_DEFAULT_AID;
295
296	ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
297	if (ret < 0)
298		return ret;
299
300	if (wl->bss_type == BSS_TYPE_AP_BSS) {
301		wl1271_tx_ap_update_inconnection_sta(wl, skb);
302		wl1271_tx_regulate_link(wl, hlid);
303	}
304
305	wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
306
307	/*
308	 * The length of each packet is stored in terms of words. Thus, we must
309	 * pad the skb data to make sure its length is aligned.
310	 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
311	 */
312	total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO);
313	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
314	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
315
316	return total_len;
317}
318
319u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
320{
321	struct ieee80211_supported_band *band;
322	u32 enabled_rates = 0;
323	int bit;
324
325	band = wl->hw->wiphy->bands[wl->band];
326	for (bit = 0; bit < band->n_bitrates; bit++) {
327		if (rate_set & 0x1)
328			enabled_rates |= band->bitrates[bit].hw_value;
329		rate_set >>= 1;
330	}
331
332#ifdef CONFIG_WL12XX_HT
333	/* MCS rates indication are on bits 16 - 23 */
334	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
335
336	for (bit = 0; bit < 8; bit++) {
337		if (rate_set & 0x1)
338			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
339		rate_set >>= 1;
340	}
341#endif
342
343	return enabled_rates;
344}
345
346void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
347{
348	unsigned long flags;
349
350	if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
351	    wl->tx_queue_count <= WL1271_TX_QUEUE_LOW_WATERMARK) {
352		/* firmware buffer has space, restart queues */
353		spin_lock_irqsave(&wl->wl_lock, flags);
354		ieee80211_wake_queues(wl->hw);
355		clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
356		spin_unlock_irqrestore(&wl->wl_lock, flags);
357	}
358}
359
360static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
361{
362	struct sk_buff *skb = NULL;
363	unsigned long flags;
364
365	skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VO]);
366	if (skb)
367		goto out;
368	skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VI]);
369	if (skb)
370		goto out;
371	skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BE]);
372	if (skb)
373		goto out;
374	skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BK]);
375
376out:
377	if (skb) {
378		spin_lock_irqsave(&wl->wl_lock, flags);
379		wl->tx_queue_count--;
380		spin_unlock_irqrestore(&wl->wl_lock, flags);
381	}
382
383	return skb;
384}
385
386static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
387{
388	struct sk_buff *skb = NULL;
389	unsigned long flags;
390	int i, h, start_hlid;
391
392	/* start from the link after the last one */
393	start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
394
395	/* dequeue according to AC, round robin on each link */
396	for (i = 0; i < AP_MAX_LINKS; i++) {
397		h = (start_hlid + i) % AP_MAX_LINKS;
398
399		skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
400		if (skb)
401			goto out;
402		skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
403		if (skb)
404			goto out;
405		skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
406		if (skb)
407			goto out;
408		skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
409		if (skb)
410			goto out;
411	}
412
413out:
414	if (skb) {
415		wl->last_tx_hlid = h;
416		spin_lock_irqsave(&wl->wl_lock, flags);
417		wl->tx_queue_count--;
418		spin_unlock_irqrestore(&wl->wl_lock, flags);
419	} else {
420		wl->last_tx_hlid = 0;
421	}
422
423	return skb;
424}
425
426static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
427{
428	if (wl->bss_type == BSS_TYPE_AP_BSS)
429		return wl1271_ap_skb_dequeue(wl);
430
431	return wl1271_sta_skb_dequeue(wl);
432}
433
434static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
435{
436	unsigned long flags;
437	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
438
439	if (wl->bss_type == BSS_TYPE_AP_BSS) {
440		u8 hlid = wl1271_tx_get_hlid(skb);
441		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
442
443		/* make sure we dequeue the same packet next time */
444		wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
445	} else {
446		skb_queue_head(&wl->tx_queue[q], skb);
447	}
448
449	spin_lock_irqsave(&wl->wl_lock, flags);
450	wl->tx_queue_count++;
451	spin_unlock_irqrestore(&wl->wl_lock, flags);
452}
453
454void wl1271_tx_work_locked(struct wl1271 *wl)
455{
456	struct sk_buff *skb;
457	bool woken_up = false;
458	u32 buf_offset = 0;
459	bool sent_packets = false;
460	int ret;
461
462	if (unlikely(wl->state == WL1271_STATE_OFF))
463		goto out;
464
465	while ((skb = wl1271_skb_dequeue(wl))) {
466		if (!woken_up) {
467			ret = wl1271_ps_elp_wakeup(wl);
468			if (ret < 0)
469				goto out_ack;
470			woken_up = true;
471		}
472
473		ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
474		if (ret == -EAGAIN) {
475			/*
476			 * Aggregation buffer is full.
477			 * Flush buffer and try again.
478			 */
479			wl1271_skb_queue_head(wl, skb);
480			wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
481				     buf_offset, true);
482			sent_packets = true;
483			buf_offset = 0;
484			continue;
485		} else if (ret == -EBUSY) {
486			/*
487			 * Firmware buffer is full.
488			 * Queue back last skb, and stop aggregating.
489			 */
490			wl1271_skb_queue_head(wl, skb);
491			/* No work left, avoid scheduling redundant tx work */
492			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
493			goto out_ack;
494		} else if (ret < 0) {
495			dev_kfree_skb(skb);
496			goto out_ack;
497		}
498		buf_offset += ret;
499		wl->tx_packets_count++;
500	}
501
502out_ack:
503	if (buf_offset) {
504		wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
505				buf_offset, true);
506		sent_packets = true;
507	}
508	if (sent_packets) {
509		/*
510		 * Interrupt the firmware with the new packets. This is only
511		 * required for older hardware revisions
512		 */
513		if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
514			wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
515				       wl->tx_packets_count);
516
517		wl1271_handle_tx_low_watermark(wl);
518	}
519
520out:
521	if (woken_up)
522		wl1271_ps_elp_sleep(wl);
523}
524
525void wl1271_tx_work(struct work_struct *work)
526{
527	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
528
529	mutex_lock(&wl->mutex);
530	wl1271_tx_work_locked(wl);
531	mutex_unlock(&wl->mutex);
532}
533
534static void wl1271_tx_complete_packet(struct wl1271 *wl,
535				      struct wl1271_tx_hw_res_descr *result)
536{
537	struct ieee80211_tx_info *info;
538	struct sk_buff *skb;
539	int id = result->id;
540	int rate = -1;
541	u8 retries = 0;
542
543	/* check for id legality */
544	if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
545		wl1271_warning("TX result illegal id: %d", id);
546		return;
547	}
548
549	skb = wl->tx_frames[id];
550	info = IEEE80211_SKB_CB(skb);
551
552	/* update the TX status info */
553	if (result->status == TX_SUCCESS) {
554		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
555			info->flags |= IEEE80211_TX_STAT_ACK;
556		rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
557		retries = result->ack_failures;
558	} else if (result->status == TX_RETRY_EXCEEDED) {
559		wl->stats.excessive_retries++;
560		retries = result->ack_failures;
561	}
562
563	info->status.rates[0].idx = rate;
564	info->status.rates[0].count = retries;
565	info->status.rates[0].flags = 0;
566	info->status.ack_signal = -1;
567
568	wl->stats.retry_count += result->ack_failures;
569
570	/* update security sequence number */
571	wl->tx_security_seq += (result->lsb_security_sequence_number -
572				wl->tx_security_last_seq);
573	wl->tx_security_last_seq = result->lsb_security_sequence_number;
574
575	/* remove private header from packet */
576	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
577
578	/* remove TKIP header space if present */
579	if (info->control.hw_key &&
580	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
581		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
582		memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
583		skb_pull(skb, WL1271_TKIP_IV_SPACE);
584	}
585
586	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
587		     " status 0x%x",
588		     result->id, skb, result->ack_failures,
589		     result->rate_class_index, result->status);
590
591	/* return the packet to the stack */
592	skb_queue_tail(&wl->deferred_tx_queue, skb);
593	ieee80211_queue_work(wl->hw, &wl->netstack_work);
594	wl1271_free_tx_id(wl, result->id);
595}
596
597/* Called upon reception of a TX complete interrupt */
598void wl1271_tx_complete(struct wl1271 *wl)
599{
600	struct wl1271_acx_mem_map *memmap =
601		(struct wl1271_acx_mem_map *)wl->target_mem_map;
602	u32 count, fw_counter;
603	u32 i;
604
605	/* read the tx results from the chipset */
606	wl1271_read(wl, le32_to_cpu(memmap->tx_result),
607		    wl->tx_res_if, sizeof(*wl->tx_res_if), false);
608	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
609
610	/* write host counter to chipset (to ack) */
611	wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
612		       offsetof(struct wl1271_tx_hw_res_if,
613				tx_result_host_counter), fw_counter);
614
615	count = fw_counter - wl->tx_results_count;
616	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
617
618	/* verify that the result buffer is not getting overrun */
619	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
620		wl1271_warning("TX result overflow from chipset: %d", count);
621
622	/* process the results */
623	for (i = 0; i < count; i++) {
624		struct wl1271_tx_hw_res_descr *result;
625		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
626
627		/* process the packet */
628		result =  &(wl->tx_res_if->tx_results_queue[offset]);
629		wl1271_tx_complete_packet(wl, result);
630
631		wl->tx_results_count++;
632	}
633}
634
635void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
636{
637	struct sk_buff *skb;
638	int i, total = 0;
639	unsigned long flags;
640	struct ieee80211_tx_info *info;
641
642	for (i = 0; i < NUM_TX_QUEUES; i++) {
643		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
644			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
645			info = IEEE80211_SKB_CB(skb);
646			info->status.rates[0].idx = -1;
647			info->status.rates[0].count = 0;
648			ieee80211_tx_status(wl->hw, skb);
649			total++;
650		}
651	}
652
653	spin_lock_irqsave(&wl->wl_lock, flags);
654	wl->tx_queue_count -= total;
655	spin_unlock_irqrestore(&wl->wl_lock, flags);
656
657	wl1271_handle_tx_low_watermark(wl);
658}
659
660/* caller must hold wl->mutex */
661void wl1271_tx_reset(struct wl1271 *wl)
662{
663	int i;
664	struct sk_buff *skb;
665	struct ieee80211_tx_info *info;
666
667	/* TX failure */
668	if (wl->bss_type == BSS_TYPE_AP_BSS) {
669		for (i = 0; i < AP_MAX_LINKS; i++) {
670			wl1271_tx_reset_link_queues(wl, i);
671			wl->links[i].allocated_blks = 0;
672			wl->links[i].prev_freed_blks = 0;
673		}
674
675		wl->last_tx_hlid = 0;
676	} else {
677		for (i = 0; i < NUM_TX_QUEUES; i++) {
678			while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
679				wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
680					     skb);
681				info = IEEE80211_SKB_CB(skb);
682				info->status.rates[0].idx = -1;
683				info->status.rates[0].count = 0;
684				ieee80211_tx_status(wl->hw, skb);
685			}
686		}
687	}
688
689	wl->tx_queue_count = 0;
690
691	/*
692	 * Make sure the driver is at a consistent state, in case this
693	 * function is called from a context other than interface removal.
694	 */
695	wl1271_handle_tx_low_watermark(wl);
696
697	for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
698		if (wl->tx_frames[i] == NULL)
699			continue;
700
701		skb = wl->tx_frames[i];
702		wl1271_free_tx_id(wl, i);
703		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
704
705		/* Remove private headers before passing the skb to mac80211 */
706		info = IEEE80211_SKB_CB(skb);
707		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
708		if (info->control.hw_key &&
709		    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
710			int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
711			memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data,
712				hdrlen);
713			skb_pull(skb, WL1271_TKIP_IV_SPACE);
714		}
715
716		info->status.rates[0].idx = -1;
717		info->status.rates[0].count = 0;
718
719		ieee80211_tx_status(wl->hw, skb);
720	}
721}
722
723#define WL1271_TX_FLUSH_TIMEOUT 500000
724
725/* caller must *NOT* hold wl->mutex */
726void wl1271_tx_flush(struct wl1271 *wl)
727{
728	unsigned long timeout;
729	timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
730
731	while (!time_after(jiffies, timeout)) {
732		mutex_lock(&wl->mutex);
733		wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
734			     wl->tx_frames_cnt, wl->tx_queue_count);
735		if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
736			mutex_unlock(&wl->mutex);
737			return;
738		}
739		mutex_unlock(&wl->mutex);
740		msleep(1);
741	}
742
743	wl1271_warning("Unable to flush all TX buffers, timed out.");
744}
745
746u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
747{
748	int i;
749	u32 rate = 0;
750
751	if (!wl->basic_rate_set) {
752		WARN_ON(1);
753		wl->basic_rate_set = wl->conf.tx.basic_rate;
754	}
755
756	for (i = 0; !rate; i++) {
757		if ((wl->basic_rate_set >> i) & 0x1)
758			rate = 1 << i;
759	}
760
761	return rate;
762}
763