tx.c revision 31627dc59b4a87c4198b4245a7de1b8ccf4424fa
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26
27#include "wl1271.h"
28#include "wl1271_io.h"
29#include "wl1271_reg.h"
30#include "wl1271_ps.h"
31#include "wl1271_tx.h"
32
33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
34{
35	int i;
36	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
37		if (wl->tx_frames[i] == NULL) {
38			wl->tx_frames[i] = skb;
39			return i;
40		}
41
42	return -EBUSY;
43}
44
45static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
46{
47	struct wl1271_tx_hw_descr *desc;
48	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
49	u32 total_blocks;
50	int id, ret = -EBUSY;
51
52	/* allocate free identifier for the packet */
53	id = wl1271_tx_id(wl, skb);
54	if (id < 0)
55		return id;
56
57	/* approximate the number of blocks required for this packet
58	   in the firmware */
59	total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
60	total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
61	if (total_blocks <= wl->tx_blocks_available) {
62		desc = (struct wl1271_tx_hw_descr *)skb_push(
63			skb, total_len - skb->len);
64
65		desc->extra_mem_blocks = TX_HW_BLOCK_SPARE;
66		desc->total_mem_blocks = total_blocks;
67		desc->id = id;
68
69		wl->tx_blocks_available -= total_blocks;
70
71		ret = 0;
72
73		wl1271_debug(DEBUG_TX,
74			     "tx_allocate: size: %d, blocks: %d, id: %d",
75			     total_len, total_blocks, id);
76	} else
77		wl->tx_frames[id] = NULL;
78
79	return ret;
80}
81
82static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
83			      u32 extra, struct ieee80211_tx_info *control)
84{
85	struct timespec ts;
86	struct wl1271_tx_hw_descr *desc;
87	int pad, ac;
88	s64 hosttime;
89	u16 tx_attr;
90
91	desc = (struct wl1271_tx_hw_descr *) skb->data;
92
93	/* relocate space for security header */
94	if (extra) {
95		void *framestart = skb->data + sizeof(*desc);
96		u16 fc = *(u16 *)(framestart + extra);
97		int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
98		memmove(framestart, framestart + extra, hdrlen);
99	}
100
101	/* configure packet life time */
102	getnstimeofday(&ts);
103	hosttime = (timespec_to_ns(&ts) >> 10);
104	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
105	desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
106
107	/* configure the tx attributes */
108	tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
109
110	/* queue */
111	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
112	desc->tid = wl1271_tx_ac_to_tid(ac);
113
114	desc->aid = TX_HW_DEFAULT_AID;
115	desc->reserved = 0;
116
117	/* align the length (and store in terms of words) */
118	pad = WL1271_TX_ALIGN(skb->len);
119	desc->length = cpu_to_le16(pad >> 2);
120
121	/* calculate number of padding bytes */
122	pad = pad - skb->len;
123	tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
124
125	/* if the packets are destined for AP (have a STA entry) send them
126	   with AP rate policies, otherwise use default basic rates */
127	if (control->control.sta)
128		tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
129
130	desc->tx_attr = cpu_to_le16(tx_attr);
131
132	wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
133	return 0;
134}
135
136static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
137				 struct ieee80211_tx_info *control)
138{
139
140	struct wl1271_tx_hw_descr *desc;
141	int len;
142
143	/* FIXME: This is a workaround for getting non-aligned packets.
144	   This happens at least with EAPOL packets from the user space.
145	   Our DMA requires packets to be aligned on a 4-byte boundary.
146	*/
147	if (unlikely((long)skb->data & 0x03)) {
148		int offset = (4 - (long)skb->data) & 0x03;
149		wl1271_debug(DEBUG_TX, "skb offset %d", offset);
150
151		/* check whether the current skb can be used */
152		if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
153			unsigned char *src = skb->data;
154
155			/* align the buffer on a 4-byte boundary */
156			skb_reserve(skb, offset);
157			memmove(skb->data, src, skb->len);
158		} else {
159			wl1271_info("No handler, fixme!");
160			return -EINVAL;
161		}
162	}
163
164	len = WL1271_TX_ALIGN(skb->len);
165
166	/* perform a fixed address block write with the packet */
167	wl1271_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
168
169	/* write packet new counter into the write access register */
170	wl->tx_packets_count++;
171
172	desc = (struct wl1271_tx_hw_descr *) skb->data;
173	wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
174		     desc->id, skb, len, desc->length);
175
176	return 0;
177}
178
179/* caller must hold wl->mutex */
180static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
181{
182	struct ieee80211_tx_info *info;
183	u32 extra = 0;
184	int ret = 0;
185	u8 idx;
186
187	if (!skb)
188		return -EINVAL;
189
190	info = IEEE80211_SKB_CB(skb);
191
192	if (info->control.hw_key &&
193	    info->control.hw_key->alg == ALG_TKIP)
194		extra = WL1271_TKIP_IV_SPACE;
195
196	if (info->control.hw_key) {
197		idx = info->control.hw_key->hw_key_idx;
198
199		/* FIXME: do we have to do this if we're not using WEP? */
200		if (unlikely(wl->default_key != idx)) {
201			ret = wl1271_cmd_set_default_wep_key(wl, idx);
202			if (ret < 0)
203				return ret;
204			wl->default_key = idx;
205		}
206	}
207
208	ret = wl1271_tx_allocate(wl, skb, extra);
209	if (ret < 0)
210		return ret;
211
212	ret = wl1271_tx_fill_hdr(wl, skb, extra, info);
213	if (ret < 0)
214		return ret;
215
216	ret = wl1271_tx_send_packet(wl, skb, info);
217	if (ret < 0)
218		return ret;
219
220	return ret;
221}
222
223static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
224{
225	struct ieee80211_supported_band *band;
226	u32 enabled_rates = 0;
227	int bit;
228
229	band = wl->hw->wiphy->bands[wl->band];
230	for (bit = 0; bit < band->n_bitrates; bit++) {
231		if (rate_set & 0x1)
232			enabled_rates |= band->bitrates[bit].hw_value;
233		rate_set >>= 1;
234	}
235
236	return enabled_rates;
237}
238
239void wl1271_tx_work(struct work_struct *work)
240{
241	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
242	struct sk_buff *skb;
243	bool woken_up = false;
244	u32 sta_rates = 0;
245	u32 prev_tx_packets_count;
246	int ret;
247
248	/* check if the rates supported by the AP have changed */
249	if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
250					&wl->flags))) {
251		unsigned long flags;
252		spin_lock_irqsave(&wl->wl_lock, flags);
253		sta_rates = wl->sta_rate_set;
254		spin_unlock_irqrestore(&wl->wl_lock, flags);
255	}
256
257	mutex_lock(&wl->mutex);
258
259	if (unlikely(wl->state == WL1271_STATE_OFF))
260		goto out;
261
262	prev_tx_packets_count = wl->tx_packets_count;
263
264	/* if rates have changed, re-configure the rate policy */
265	if (unlikely(sta_rates)) {
266		wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
267		wl1271_acx_rate_policies(wl);
268	}
269
270	while ((skb = skb_dequeue(&wl->tx_queue))) {
271		if (!woken_up) {
272			ret = wl1271_ps_elp_wakeup(wl, false);
273			if (ret < 0)
274				goto out_ack;
275			woken_up = true;
276		}
277
278		ret = wl1271_tx_frame(wl, skb);
279		if (ret == -EBUSY) {
280			/* firmware buffer is full, lets stop transmitting. */
281			skb_queue_head(&wl->tx_queue, skb);
282			goto out_ack;
283		} else if (ret < 0) {
284			dev_kfree_skb(skb);
285			goto out_ack;
286		}
287	}
288
289out_ack:
290	/* interrupt the firmware with the new packets */
291	if (prev_tx_packets_count != wl->tx_packets_count)
292		wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
293
294out:
295	if (woken_up)
296		wl1271_ps_elp_sleep(wl);
297
298	mutex_unlock(&wl->mutex);
299}
300
301static void wl1271_tx_complete_packet(struct wl1271 *wl,
302				      struct wl1271_tx_hw_res_descr *result)
303{
304	struct ieee80211_tx_info *info;
305	struct sk_buff *skb;
306	int id = result->id;
307	int rate = -1;
308	u8 retries = 0;
309
310	/* check for id legality */
311	if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
312		wl1271_warning("TX result illegal id: %d", id);
313		return;
314	}
315
316	skb = wl->tx_frames[id];
317	info = IEEE80211_SKB_CB(skb);
318
319	/* update the TX status info */
320	if (result->status == TX_SUCCESS) {
321		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
322			info->flags |= IEEE80211_TX_STAT_ACK;
323		rate = wl1271_rate_to_idx(wl, result->rate_class_index);
324		retries = result->ack_failures;
325	} else if (result->status == TX_RETRY_EXCEEDED) {
326		wl->stats.excessive_retries++;
327		retries = result->ack_failures;
328	}
329
330	info->status.rates[0].idx = rate;
331	info->status.rates[0].count = retries;
332	info->status.rates[0].flags = 0;
333	info->status.ack_signal = -1;
334
335	wl->stats.retry_count += result->ack_failures;
336
337	/* update security sequence number */
338	wl->tx_security_seq += (result->lsb_security_sequence_number -
339				wl->tx_security_last_seq);
340	wl->tx_security_last_seq = result->lsb_security_sequence_number;
341
342	/* remove private header from packet */
343	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
344
345	/* remove TKIP header space if present */
346	if (info->control.hw_key &&
347	    info->control.hw_key->alg == ALG_TKIP) {
348		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
349		memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
350		skb_pull(skb, WL1271_TKIP_IV_SPACE);
351	}
352
353	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
354		     " status 0x%x",
355		     result->id, skb, result->ack_failures,
356		     result->rate_class_index, result->status);
357
358	/* return the packet to the stack */
359	ieee80211_tx_status(wl->hw, skb);
360	wl->tx_frames[result->id] = NULL;
361}
362
363/* Called upon reception of a TX complete interrupt */
364void wl1271_tx_complete(struct wl1271 *wl)
365{
366	struct wl1271_acx_mem_map *memmap =
367		(struct wl1271_acx_mem_map *)wl->target_mem_map;
368	u32 count, fw_counter;
369	u32 i;
370
371	/* read the tx results from the chipset */
372	wl1271_read(wl, le32_to_cpu(memmap->tx_result),
373		    wl->tx_res_if, sizeof(*wl->tx_res_if), false);
374	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
375
376	/* write host counter to chipset (to ack) */
377	wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
378		       offsetof(struct wl1271_tx_hw_res_if,
379				tx_result_host_counter), fw_counter);
380
381	count = fw_counter - wl->tx_results_count;
382	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
383
384	/* verify that the result buffer is not getting overrun */
385	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
386		wl1271_warning("TX result overflow from chipset: %d", count);
387
388	/* process the results */
389	for (i = 0; i < count; i++) {
390		struct wl1271_tx_hw_res_descr *result;
391		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
392
393		/* process the packet */
394		result =  &(wl->tx_res_if->tx_results_queue[offset]);
395		wl1271_tx_complete_packet(wl, result);
396
397		wl->tx_results_count++;
398	}
399
400	if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
401	    skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
402		unsigned long flags;
403
404		/* firmware buffer has space, restart queues */
405		wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
406		spin_lock_irqsave(&wl->wl_lock, flags);
407		ieee80211_wake_queues(wl->hw);
408		clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
409		spin_unlock_irqrestore(&wl->wl_lock, flags);
410		ieee80211_queue_work(wl->hw, &wl->tx_work);
411	}
412}
413
414/* caller must hold wl->mutex */
415void wl1271_tx_flush(struct wl1271 *wl)
416{
417	int i;
418	struct sk_buff *skb;
419	struct ieee80211_tx_info *info;
420
421	/* TX failure */
422/* 	control->flags = 0; FIXME */
423
424	while ((skb = skb_dequeue(&wl->tx_queue))) {
425		info = IEEE80211_SKB_CB(skb);
426
427		wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);
428
429		if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
430				continue;
431
432		ieee80211_tx_status(wl->hw, skb);
433	}
434
435	for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
436		if (wl->tx_frames[i] != NULL) {
437			skb = wl->tx_frames[i];
438			info = IEEE80211_SKB_CB(skb);
439
440			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
441				continue;
442
443			ieee80211_tx_status(wl->hw, skb);
444			wl->tx_frames[i] = NULL;
445		}
446}
447