tx.c revision 79b122dc51797b650201f21360481a0450e9b7e4
1e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio/*
2e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * This file is part of wl1271
3e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio *
4e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * Copyright (C) 2009 Nokia Corporation
5e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio *
6e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio *
8e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * This program is free software; you can redistribute it and/or
9e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * modify it under the terms of the GNU General Public License
10e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * version 2 as published by the Free Software Foundation.
11e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio *
12e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * This program is distributed in the hope that it will be useful, but
13e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * WITHOUT ANY WARRANTY; without even the implied warranty of
14e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * General Public License for more details.
16e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio *
17e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * You should have received a copy of the GNU General Public License
18e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * along with this program; if not, write to the Free Software
19e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio * 02110-1301 USA
21e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio *
22e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio */
23e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio
24e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio#include <linux/kernel.h>
25e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio#include <linux/module.h>
26e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio#include <linux/etherdevice.h>
27e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio
28e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio#include "wl12xx.h"
29e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio#include "io.h"
30e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio#include "reg.h"
31e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio#include "ps.h"
32e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio#include "tx.h"
33e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio
34e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipiostatic int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
35e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio{
36e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio	int ret;
37e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
38e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio
39e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio	if (is_ap)
40e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio		ret = wl12xx_cmd_set_default_wep_key(wl, id,
41e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio						     WL1271_AP_BROADCAST_HLID);
42e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio	else
43e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio		ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
44e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio
45e14fa82439d33cef67eaafc1a48960bbfa610c8eRiku Voipio	if (ret < 0)
46		return ret;
47
48	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
49	return 0;
50}
51
52static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
53{
54	int id;
55
56	id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
57	if (id >= ACX_TX_DESCRIPTORS)
58		return -EBUSY;
59
60	__set_bit(id, wl->tx_frames_map);
61	wl->tx_frames[id] = skb;
62	wl->tx_frames_cnt++;
63	return id;
64}
65
66static void wl1271_free_tx_id(struct wl1271 *wl, int id)
67{
68	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
69		if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS))
70			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
71
72		wl->tx_frames[id] = NULL;
73		wl->tx_frames_cnt--;
74	}
75}
76
77static int wl1271_tx_update_filters(struct wl1271 *wl,
78						 struct sk_buff *skb)
79{
80	struct ieee80211_hdr *hdr;
81
82	hdr = (struct ieee80211_hdr *)(skb->data +
83				       sizeof(struct wl1271_tx_hw_descr));
84
85	/*
86	 * stop bssid-based filtering before transmitting authentication
87	 * requests. this way the hw will never drop authentication
88	 * responses coming from BSSIDs it isn't familiar with (e.g. on
89	 * roaming)
90	 */
91	if (!ieee80211_is_auth(hdr->frame_control))
92		return 0;
93
94	return 0;
95}
96
97static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
98						 struct sk_buff *skb)
99{
100	struct ieee80211_hdr *hdr;
101
102	/*
103	 * add the station to the known list before transmitting the
104	 * authentication response. this way it won't get de-authed by FW
105	 * when transmitting too soon.
106	 */
107	hdr = (struct ieee80211_hdr *)(skb->data +
108				       sizeof(struct wl1271_tx_hw_descr));
109	if (ieee80211_is_auth(hdr->frame_control))
110		wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
111}
112
113#if 0
114static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
115{
116	bool fw_ps;
117	u8 tx_blks;
118
119	/* only regulate station links */
120	if (hlid < WL1271_AP_STA_HLID_START)
121		return;
122
123	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
124	tx_blks = wl->links[hlid].allocated_blks;
125
126	/*
127	 * if in FW PS and there is enough data in FW we can put the link
128	 * into high-level PS and clean out its TX queues.
129	 */
130	if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS)
131		wl1271_ps_link_start(wl, hlid, true);
132}
133#endif
134
135u8 wl1271_tx_get_hlid(struct sk_buff *skb)
136{
137	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
138
139	if (control->control.sta) {
140		struct wl1271_station *wl_sta;
141
142		wl_sta = (struct wl1271_station *)
143				control->control.sta->drv_priv;
144		return wl_sta->hlid;
145	} else {
146		struct ieee80211_hdr *hdr;
147
148		hdr = (struct ieee80211_hdr *)skb->data;
149		if (ieee80211_is_mgmt(hdr->frame_control))
150			return WL1271_AP_GLOBAL_HLID;
151		else
152			return WL1271_AP_BROADCAST_HLID;
153	}
154}
155
156static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
157						unsigned int packet_length)
158{
159	if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
160		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
161	else
162		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
163}
164
165static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
166				u32 buf_offset, u8 hlid)
167{
168	struct wl1271_tx_hw_descr *desc;
169	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
170	u32 len;
171	u32 total_blocks;
172	int id, ret = -EBUSY;
173
174	/* we use 1 spare block */
175	u32 spare_blocks = 1;
176
177	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
178		return -EAGAIN;
179
180	/* allocate free identifier for the packet */
181	id = wl1271_alloc_tx_id(wl, skb);
182	if (id < 0)
183		return id;
184
185	/* approximate the number of blocks required for this packet
186	   in the firmware */
187	len = wl12xx_calc_packet_alignment(wl, total_len);
188
189	total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
190		spare_blocks;
191
192	if (total_blocks <= wl->tx_blocks_available) {
193		desc = (struct wl1271_tx_hw_descr *)skb_push(
194			skb, total_len - skb->len);
195
196		/* HW descriptor fields change between wl127x and wl128x */
197		if (wl->chip.id == CHIP_ID_1283_PG20) {
198			desc->wl128x_mem.total_mem_blocks = total_blocks;
199		} else {
200			desc->wl127x_mem.extra_blocks = spare_blocks;
201			desc->wl127x_mem.total_mem_blocks = total_blocks;
202		}
203
204		desc->id = id;
205
206		wl->tx_blocks_available -= total_blocks;
207		wl->tx_allocated_blocks += total_blocks;
208
209		if (wl->bss_type == BSS_TYPE_AP_BSS)
210			wl->links[hlid].allocated_blks += total_blocks;
211
212		ret = 0;
213
214		wl1271_debug(DEBUG_TX,
215			     "tx_allocate: size: %d, blocks: %d, id: %d",
216			     total_len, total_blocks, id);
217	} else {
218		wl1271_free_tx_id(wl, id);
219	}
220
221	return ret;
222}
223
224static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
225{
226	return wl->dummy_packet == skb;
227}
228
229static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
230			      u32 extra, struct ieee80211_tx_info *control,
231			      u8 hlid)
232{
233	struct timespec ts;
234	struct wl1271_tx_hw_descr *desc;
235	int aligned_len, ac, rate_idx;
236	s64 hosttime;
237	u16 tx_attr;
238
239	desc = (struct wl1271_tx_hw_descr *) skb->data;
240
241	/* relocate space for security header */
242	if (extra) {
243		void *framestart = skb->data + sizeof(*desc);
244		u16 fc = *(u16 *)(framestart + extra);
245		int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
246		memmove(framestart, framestart + extra, hdrlen);
247	}
248
249	/* configure packet life time */
250	getnstimeofday(&ts);
251	hosttime = (timespec_to_ns(&ts) >> 10);
252	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
253
254	if (wl->bss_type != BSS_TYPE_AP_BSS)
255		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
256	else
257		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
258
259	/* queue */
260	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
261	desc->tid = skb->priority;
262
263	if (wl12xx_is_dummy_packet(wl, skb)) {
264		/*
265		 * FW expects the dummy packet to have an invalid session id -
266		 * any session id that is different than the one set in the join
267		 */
268		tx_attr = ((~wl->session_counter) <<
269			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
270			   TX_HW_ATTR_SESSION_COUNTER;
271
272		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
273	} else {
274		/* configure the tx attributes */
275		tx_attr =
276			wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
277	}
278
279	desc->hlid = hlid;
280
281	if (wl->bss_type != BSS_TYPE_AP_BSS) {
282		/* if the packets are destined for AP (have a STA entry)
283		   send them with AP rate policies, otherwise use default
284		   basic rates */
285		if (control->control.sta)
286			rate_idx = ACX_TX_AP_FULL_RATE;
287		else
288			rate_idx = ACX_TX_BASIC_RATE;
289	} else {
290		switch (hlid) {
291		case WL1271_AP_GLOBAL_HLID:
292			rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
293			break;
294		case WL1271_AP_BROADCAST_HLID:
295			rate_idx = ACX_TX_AP_MODE_BCST_RATE;
296			break;
297		default:
298			rate_idx = ac;
299			break;
300		}
301	}
302
303	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
304	desc->reserved = 0;
305
306	aligned_len = wl12xx_calc_packet_alignment(wl, skb->len);
307
308	if (wl->chip.id == CHIP_ID_1283_PG20) {
309		desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
310		desc->length = cpu_to_le16(aligned_len >> 2);
311
312		wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
313			     "tx_attr: 0x%x len: %d life: %d mem: %d",
314			     desc->hlid, tx_attr,
315			     le16_to_cpu(desc->length),
316			     le16_to_cpu(desc->life_time),
317			     desc->wl128x_mem.total_mem_blocks);
318	} else {
319		int pad;
320
321		/* Store the aligned length in terms of words */
322		desc->length = cpu_to_le16(aligned_len >> 2);
323
324		/* calculate number of padding bytes */
325		pad = aligned_len - skb->len;
326		tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
327
328		wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
329			     "tx_attr: 0x%x len: %d life: %d mem: %d", pad,
330			     desc->hlid, tx_attr,
331			     le16_to_cpu(desc->length),
332			     le16_to_cpu(desc->life_time),
333			     desc->wl127x_mem.total_mem_blocks);
334	}
335
336	desc->tx_attr = cpu_to_le16(tx_attr);
337}
338
339/* caller must hold wl->mutex */
340static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
341							u32 buf_offset)
342{
343	struct ieee80211_tx_info *info;
344	u32 extra = 0;
345	int ret = 0;
346	u32 total_len;
347	u8 hlid;
348
349	if (!skb)
350		return -EINVAL;
351
352	info = IEEE80211_SKB_CB(skb);
353
354	if (info->control.hw_key &&
355	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
356		extra = WL1271_TKIP_IV_SPACE;
357
358	if (info->control.hw_key) {
359		bool is_wep;
360		u8 idx = info->control.hw_key->hw_key_idx;
361		u32 cipher = info->control.hw_key->cipher;
362
363		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
364			 (cipher == WLAN_CIPHER_SUITE_WEP104);
365
366		if (unlikely(is_wep && wl->default_key != idx)) {
367			ret = wl1271_set_default_wep_key(wl, idx);
368			if (ret < 0)
369				return ret;
370			wl->default_key = idx;
371		}
372	}
373
374	if (wl->bss_type == BSS_TYPE_AP_BSS)
375		hlid = wl1271_tx_get_hlid(skb);
376	else
377		if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
378			hlid = wl->sta_hlid;
379		else
380			hlid = wl->dev_hlid;
381
382	if (hlid == WL12XX_INVALID_LINK_ID) {
383		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
384		return -EINVAL;
385	}
386
387	ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
388	if (ret < 0)
389		return ret;
390
391	wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
392
393	if (wl->bss_type == BSS_TYPE_AP_BSS) {
394		wl1271_tx_ap_update_inconnection_sta(wl, skb);
395#if 0
396		wl1271_tx_regulate_link(wl, hlid);
397#endif
398	} else {
399		wl1271_tx_update_filters(wl, skb);
400	}
401
402	/*
403	 * The length of each packet is stored in terms of
404	 * words. Thus, we must pad the skb data to make sure its
405	 * length is aligned.  The number of padding bytes is computed
406	 * and set in wl1271_tx_fill_hdr.
407	 * In special cases, we want to align to a specific block size
408	 * (eg. for wl128x with SDIO we align to 256).
409	 */
410	total_len = wl12xx_calc_packet_alignment(wl, skb->len);
411
412	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
413	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
414
415	/* Revert side effects in the dummy packet skb, so it can be reused */
416	if (wl12xx_is_dummy_packet(wl, skb))
417		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
418
419	return total_len;
420}
421
422u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
423{
424	struct ieee80211_supported_band *band;
425	u32 enabled_rates = 0;
426	int bit;
427
428	band = wl->hw->wiphy->bands[wl->band];
429	for (bit = 0; bit < band->n_bitrates; bit++) {
430		if (rate_set & 0x1)
431			enabled_rates |= band->bitrates[bit].hw_value;
432		rate_set >>= 1;
433	}
434
435#ifdef CONFIG_WL12XX_HT
436	/* MCS rates indication are on bits 16 - 23 */
437	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
438
439	for (bit = 0; bit < 8; bit++) {
440		if (rate_set & 0x1)
441			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
442		rate_set >>= 1;
443	}
444#endif
445
446	return enabled_rates;
447}
448
449void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
450{
451	unsigned long flags;
452	int i;
453
454	for (i = 0; i < NUM_TX_QUEUES; i++) {
455		if (test_bit(i, &wl->stopped_queues_map) &&
456		    wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
457			/* firmware buffer has space, restart queues */
458			spin_lock_irqsave(&wl->wl_lock, flags);
459			ieee80211_wake_queue(wl->hw,
460					     wl1271_tx_get_mac80211_queue(i));
461			clear_bit(i, &wl->stopped_queues_map);
462			spin_unlock_irqrestore(&wl->wl_lock, flags);
463		}
464	}
465}
466
467static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
468{
469	struct sk_buff *skb = NULL;
470	unsigned long flags;
471
472	skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VO]);
473	if (skb)
474		goto out;
475	skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VI]);
476	if (skb)
477		goto out;
478	skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BE]);
479	if (skb)
480		goto out;
481	skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BK]);
482
483out:
484	if (skb) {
485		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
486		spin_lock_irqsave(&wl->wl_lock, flags);
487		wl->tx_queue_count[q]--;
488		spin_unlock_irqrestore(&wl->wl_lock, flags);
489	}
490
491	return skb;
492}
493
494static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
495{
496	struct sk_buff *skb = NULL;
497	unsigned long flags;
498	int i, h, start_hlid;
499
500	/* start from the link after the last one */
501	start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
502
503	/* dequeue according to AC, round robin on each link */
504	for (i = 0; i < AP_MAX_LINKS; i++) {
505		h = (start_hlid + i) % AP_MAX_LINKS;
506
507		skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
508		if (skb)
509			goto out;
510		skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
511		if (skb)
512			goto out;
513		skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
514		if (skb)
515			goto out;
516		skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
517		if (skb)
518			goto out;
519	}
520
521out:
522	if (skb) {
523		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
524		wl->last_tx_hlid = h;
525		spin_lock_irqsave(&wl->wl_lock, flags);
526		wl->tx_queue_count[q]--;
527		spin_unlock_irqrestore(&wl->wl_lock, flags);
528	} else {
529		wl->last_tx_hlid = 0;
530	}
531
532	return skb;
533}
534
535static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
536{
537	unsigned long flags;
538	struct sk_buff *skb = NULL;
539
540	if (wl->bss_type == BSS_TYPE_AP_BSS)
541		skb = wl1271_ap_skb_dequeue(wl);
542	else
543		skb = wl1271_sta_skb_dequeue(wl);
544
545	if (!skb &&
546	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
547		int q;
548
549		skb = wl->dummy_packet;
550		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
551		spin_lock_irqsave(&wl->wl_lock, flags);
552		wl->tx_queue_count[q]--;
553		spin_unlock_irqrestore(&wl->wl_lock, flags);
554	}
555
556	return skb;
557}
558
559static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
560{
561	unsigned long flags;
562	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
563
564	if (wl12xx_is_dummy_packet(wl, skb)) {
565		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
566	} else if (wl->bss_type == BSS_TYPE_AP_BSS) {
567		u8 hlid = wl1271_tx_get_hlid(skb);
568		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
569
570		/* make sure we dequeue the same packet next time */
571		wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
572	} else {
573		skb_queue_head(&wl->tx_queue[q], skb);
574	}
575
576	spin_lock_irqsave(&wl->wl_lock, flags);
577	wl->tx_queue_count[q]++;
578	spin_unlock_irqrestore(&wl->wl_lock, flags);
579}
580
581static bool wl1271_tx_is_data_present(struct sk_buff *skb)
582{
583	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
584
585	return ieee80211_is_data_present(hdr->frame_control);
586}
587
588void wl1271_tx_work_locked(struct wl1271 *wl)
589{
590	struct sk_buff *skb;
591	u32 buf_offset = 0;
592	bool sent_packets = false;
593	bool had_data = false;
594	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
595	int ret;
596
597	if (unlikely(wl->state == WL1271_STATE_OFF))
598		return;
599
600	while ((skb = wl1271_skb_dequeue(wl))) {
601		if (wl1271_tx_is_data_present(skb))
602			had_data = true;
603
604		ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
605		if (ret == -EAGAIN) {
606			/*
607			 * Aggregation buffer is full.
608			 * Flush buffer and try again.
609			 */
610			wl1271_skb_queue_head(wl, skb);
611			wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
612				     buf_offset, true);
613			sent_packets = true;
614			buf_offset = 0;
615			continue;
616		} else if (ret == -EBUSY) {
617			/*
618			 * Firmware buffer is full.
619			 * Queue back last skb, and stop aggregating.
620			 */
621			wl1271_skb_queue_head(wl, skb);
622			/* No work left, avoid scheduling redundant tx work */
623			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
624			goto out_ack;
625		} else if (ret < 0) {
626			dev_kfree_skb(skb);
627			goto out_ack;
628		}
629		buf_offset += ret;
630		wl->tx_packets_count++;
631	}
632
633out_ack:
634	if (buf_offset) {
635		wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
636				buf_offset, true);
637		sent_packets = true;
638	}
639	if (sent_packets) {
640		/*
641		 * Interrupt the firmware with the new packets. This is only
642		 * required for older hardware revisions
643		 */
644		if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
645			wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
646				       wl->tx_packets_count);
647
648		wl1271_handle_tx_low_watermark(wl);
649	}
650	if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
651	    (wl->conf.rx_streaming.always ||
652	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
653		u32 timeout = wl->conf.rx_streaming.duration;
654
655		/* enable rx streaming */
656		if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
657			ieee80211_queue_work(wl->hw,
658					     &wl->rx_streaming_enable_work);
659
660		mod_timer(&wl->rx_streaming_timer,
661			  jiffies + msecs_to_jiffies(timeout));
662	}
663}
664
665void wl1271_tx_work(struct work_struct *work)
666{
667	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
668	int ret;
669
670	mutex_lock(&wl->mutex);
671	ret = wl1271_ps_elp_wakeup(wl);
672	if (ret < 0)
673		goto out;
674
675	wl1271_tx_work_locked(wl);
676
677	wl1271_ps_elp_sleep(wl);
678out:
679	mutex_unlock(&wl->mutex);
680}
681
682static void wl1271_tx_complete_packet(struct wl1271 *wl,
683				      struct wl1271_tx_hw_res_descr *result)
684{
685	struct ieee80211_tx_info *info;
686	struct sk_buff *skb;
687	int id = result->id;
688	int rate = -1;
689	u8 retries = 0;
690
691	/* check for id legality */
692	if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
693		wl1271_warning("TX result illegal id: %d", id);
694		return;
695	}
696
697	skb = wl->tx_frames[id];
698	info = IEEE80211_SKB_CB(skb);
699
700	if (wl12xx_is_dummy_packet(wl, skb)) {
701		wl1271_free_tx_id(wl, id);
702		return;
703	}
704
705	/* update the TX status info */
706	if (result->status == TX_SUCCESS) {
707		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
708			info->flags |= IEEE80211_TX_STAT_ACK;
709		rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
710		retries = result->ack_failures;
711	} else if (result->status == TX_RETRY_EXCEEDED) {
712		wl->stats.excessive_retries++;
713		retries = result->ack_failures;
714	}
715
716	info->status.rates[0].idx = rate;
717	info->status.rates[0].count = retries;
718	info->status.rates[0].flags = 0;
719	info->status.ack_signal = -1;
720
721	wl->stats.retry_count += result->ack_failures;
722
723	/*
724	 * update sequence number only when relevant, i.e. only in
725	 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
726	 */
727	if (info->control.hw_key &&
728	    (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
729	     info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
730	     info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
731		u8 fw_lsb = result->tx_security_sequence_number_lsb;
732		u8 cur_lsb = wl->tx_security_last_seq_lsb;
733
734		/*
735		 * update security sequence number, taking care of potential
736		 * wrap-around
737		 */
738		wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
739		wl->tx_security_last_seq_lsb = fw_lsb;
740	}
741
742	/* remove private header from packet */
743	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
744
745	/* remove TKIP header space if present */
746	if (info->control.hw_key &&
747	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
748		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
749		memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
750		skb_pull(skb, WL1271_TKIP_IV_SPACE);
751	}
752
753	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
754		     " status 0x%x",
755		     result->id, skb, result->ack_failures,
756		     result->rate_class_index, result->status);
757
758	/* return the packet to the stack */
759	skb_queue_tail(&wl->deferred_tx_queue, skb);
760	queue_work(wl->freezable_wq, &wl->netstack_work);
761	wl1271_free_tx_id(wl, result->id);
762}
763
764/* Called upon reception of a TX complete interrupt */
765void wl1271_tx_complete(struct wl1271 *wl)
766{
767	struct wl1271_acx_mem_map *memmap =
768		(struct wl1271_acx_mem_map *)wl->target_mem_map;
769	u32 count, fw_counter;
770	u32 i;
771
772	/* read the tx results from the chipset */
773	wl1271_read(wl, le32_to_cpu(memmap->tx_result),
774		    wl->tx_res_if, sizeof(*wl->tx_res_if), false);
775	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
776
777	/* write host counter to chipset (to ack) */
778	wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
779		       offsetof(struct wl1271_tx_hw_res_if,
780				tx_result_host_counter), fw_counter);
781
782	count = fw_counter - wl->tx_results_count;
783	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
784
785	/* verify that the result buffer is not getting overrun */
786	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
787		wl1271_warning("TX result overflow from chipset: %d", count);
788
789	/* process the results */
790	for (i = 0; i < count; i++) {
791		struct wl1271_tx_hw_res_descr *result;
792		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
793
794		/* process the packet */
795		result =  &(wl->tx_res_if->tx_results_queue[offset]);
796		wl1271_tx_complete_packet(wl, result);
797
798		wl->tx_results_count++;
799	}
800}
801
802void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
803{
804	struct sk_buff *skb;
805	int i;
806	unsigned long flags;
807	struct ieee80211_tx_info *info;
808	int total[NUM_TX_QUEUES];
809
810	for (i = 0; i < NUM_TX_QUEUES; i++) {
811		total[i] = 0;
812		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
813			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
814			info = IEEE80211_SKB_CB(skb);
815			info->status.rates[0].idx = -1;
816			info->status.rates[0].count = 0;
817			ieee80211_tx_status_ni(wl->hw, skb);
818			total[i]++;
819		}
820	}
821
822	spin_lock_irqsave(&wl->wl_lock, flags);
823	for (i = 0; i < NUM_TX_QUEUES; i++)
824		wl->tx_queue_count[i] -= total[i];
825	spin_unlock_irqrestore(&wl->wl_lock, flags);
826
827	wl1271_handle_tx_low_watermark(wl);
828}
829
830/* caller must hold wl->mutex and TX must be stopped */
831void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
832{
833	int i;
834	struct sk_buff *skb;
835	struct ieee80211_tx_info *info;
836
837	/* TX failure */
838	if (wl->bss_type == BSS_TYPE_AP_BSS) {
839		for (i = 0; i < AP_MAX_LINKS; i++) {
840			wl1271_tx_reset_link_queues(wl, i);
841			wl->links[i].allocated_blks = 0;
842			wl->links[i].prev_freed_blks = 0;
843		}
844
845		wl->last_tx_hlid = 0;
846	} else {
847		for (i = 0; i < NUM_TX_QUEUES; i++) {
848			while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
849				wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
850					     skb);
851
852				if (!wl12xx_is_dummy_packet(wl, skb)) {
853					info = IEEE80211_SKB_CB(skb);
854					info->status.rates[0].idx = -1;
855					info->status.rates[0].count = 0;
856					ieee80211_tx_status_ni(wl->hw, skb);
857				}
858			}
859			wl->tx_queue_count[i] = 0;
860		}
861	}
862
863	wl->stopped_queues_map = 0;
864
865	/*
866	 * Make sure the driver is at a consistent state, in case this
867	 * function is called from a context other than interface removal.
868	 * This call will always wake the TX queues.
869	 */
870	if (reset_tx_queues)
871		wl1271_handle_tx_low_watermark(wl);
872
873	for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
874		if (wl->tx_frames[i] == NULL)
875			continue;
876
877		skb = wl->tx_frames[i];
878		wl1271_free_tx_id(wl, i);
879		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
880
881		if (!wl12xx_is_dummy_packet(wl, skb)) {
882			/*
883			 * Remove private headers before passing the skb to
884			 * mac80211
885			 */
886			info = IEEE80211_SKB_CB(skb);
887			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
888			if (info->control.hw_key &&
889			    info->control.hw_key->cipher ==
890			    WLAN_CIPHER_SUITE_TKIP) {
891				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
892				memmove(skb->data + WL1271_TKIP_IV_SPACE,
893					skb->data, hdrlen);
894				skb_pull(skb, WL1271_TKIP_IV_SPACE);
895			}
896
897			info->status.rates[0].idx = -1;
898			info->status.rates[0].count = 0;
899
900			ieee80211_tx_status_ni(wl->hw, skb);
901		}
902	}
903}
904
905#define WL1271_TX_FLUSH_TIMEOUT 500000
906
907/* caller must *NOT* hold wl->mutex */
908void wl1271_tx_flush(struct wl1271 *wl)
909{
910	unsigned long timeout;
911	timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
912
913	while (!time_after(jiffies, timeout)) {
914		mutex_lock(&wl->mutex);
915		wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
916			     wl->tx_frames_cnt,
917			     wl1271_tx_total_queue_count(wl));
918		if ((wl->tx_frames_cnt == 0) &&
919		    (wl1271_tx_total_queue_count(wl) == 0)) {
920			mutex_unlock(&wl->mutex);
921			return;
922		}
923		mutex_unlock(&wl->mutex);
924		msleep(1);
925	}
926
927	wl1271_warning("Unable to flush all TX buffers, timed out.");
928}
929
930u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
931{
932	int i;
933	u32 rate = 0;
934
935	if (!wl->basic_rate_set) {
936		WARN_ON(1);
937		wl->basic_rate_set = wl->conf.tx.basic_rate;
938	}
939
940	for (i = 0; !rate; i++) {
941		if ((wl->basic_rate_set >> i) & 0x1)
942			rate = 1 << i;
943	}
944
945	return rate;
946}
947