1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/etherdevice.h>
27#include <linux/spinlock.h>
28
29#include "wlcore.h"
30#include "debug.h"
31#include "io.h"
32#include "ps.h"
33#include "tx.h"
34#include "event.h"
35#include "hw_ops.h"
36
37/*
38 * TODO: this is here just for now, it must be removed when the data
39 * operations are in place.
40 */
41#include "../wl12xx/reg.h"
42
43static int wl1271_set_default_wep_key(struct wl1271 *wl,
44				      struct wl12xx_vif *wlvif, u8 id)
45{
46	int ret;
47	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
48
49	if (is_ap)
50		ret = wl12xx_cmd_set_default_wep_key(wl, id,
51						     wlvif->ap.bcast_hlid);
52	else
53		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
54
55	if (ret < 0)
56		return ret;
57
58	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
59	return 0;
60}
61
62static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
63{
64	int id;
65
66	id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
67	if (id >= wl->num_tx_desc)
68		return -EBUSY;
69
70	__set_bit(id, wl->tx_frames_map);
71	wl->tx_frames[id] = skb;
72	wl->tx_frames_cnt++;
73	return id;
74}
75
76void wl1271_free_tx_id(struct wl1271 *wl, int id)
77{
78	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
79		if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
80			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
81
82		wl->tx_frames[id] = NULL;
83		wl->tx_frames_cnt--;
84	}
85}
86EXPORT_SYMBOL(wl1271_free_tx_id);
87
88static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
89						 struct wl12xx_vif *wlvif,
90						 struct sk_buff *skb)
91{
92	struct ieee80211_hdr *hdr;
93
94	hdr = (struct ieee80211_hdr *)(skb->data +
95				       sizeof(struct wl1271_tx_hw_descr));
96	if (!ieee80211_is_auth(hdr->frame_control))
97		return;
98
99	/*
100	 * add the station to the known list before transmitting the
101	 * authentication response. this way it won't get de-authed by FW
102	 * when transmitting too soon.
103	 */
104	wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
105
106	/*
107	 * ROC for 1 second on the AP channel for completing the connection.
108	 * Note the ROC will be continued by the update_sta_state callbacks
109	 * once the station reaches the associated state.
110	 */
111	wlcore_update_inconn_sta(wl, wlvif, NULL, true);
112	wlvif->pending_auth_reply_time = jiffies;
113	cancel_delayed_work(&wlvif->pending_auth_complete_work);
114	ieee80211_queue_delayed_work(wl->hw,
115				&wlvif->pending_auth_complete_work,
116				msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
117}
118
119static void wl1271_tx_regulate_link(struct wl1271 *wl,
120				    struct wl12xx_vif *wlvif,
121				    u8 hlid)
122{
123	bool fw_ps;
124	u8 tx_pkts;
125
126	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
127		return;
128
129	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
130	tx_pkts = wl->links[hlid].allocated_pkts;
131
132	/*
133	 * if in FW PS and there is enough data in FW we can put the link
134	 * into high-level PS and clean out its TX queues.
135	 * Make an exception if this is the only connected link. In this
136	 * case FW-memory congestion is less of a problem.
137	 * Note that a single connected STA means 2*ap_count + 1 active links,
138	 * since we must account for the global and broadcast AP links
139	 * for each AP. The "fw_ps" check assures us the other link is a STA
140	 * connected to the AP. Otherwise the FW would not set the PSM bit.
141	 */
142	if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
143	    tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
144		wl12xx_ps_link_start(wl, wlvif, hlid, true);
145}
146
147bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
148{
149	return wl->dummy_packet == skb;
150}
151EXPORT_SYMBOL(wl12xx_is_dummy_packet);
152
153static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
154				struct sk_buff *skb, struct ieee80211_sta *sta)
155{
156	if (sta) {
157		struct wl1271_station *wl_sta;
158
159		wl_sta = (struct wl1271_station *)sta->drv_priv;
160		return wl_sta->hlid;
161	} else {
162		struct ieee80211_hdr *hdr;
163
164		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
165			return wl->system_hlid;
166
167		hdr = (struct ieee80211_hdr *)skb->data;
168		if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
169			return wlvif->ap.bcast_hlid;
170		else
171			return wlvif->ap.global_hlid;
172	}
173}
174
175u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
176		      struct sk_buff *skb, struct ieee80211_sta *sta)
177{
178	struct ieee80211_tx_info *control;
179
180	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
181		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
182
183	control = IEEE80211_SKB_CB(skb);
184	if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
185		wl1271_debug(DEBUG_TX, "tx offchannel");
186		return wlvif->dev_hlid;
187	}
188
189	return wlvif->sta.hlid;
190}
191
192unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
193					  unsigned int packet_length)
194{
195	if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
196	    !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
197		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
198	else
199		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
200}
201EXPORT_SYMBOL(wlcore_calc_packet_alignment);
202
203static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
204			      struct sk_buff *skb, u32 extra, u32 buf_offset,
205			      u8 hlid, bool is_gem)
206{
207	struct wl1271_tx_hw_descr *desc;
208	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
209	u32 total_blocks;
210	int id, ret = -EBUSY, ac;
211	u32 spare_blocks;
212
213	if (buf_offset + total_len > wl->aggr_buf_size)
214		return -EAGAIN;
215
216	spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
217
218	/* allocate free identifier for the packet */
219	id = wl1271_alloc_tx_id(wl, skb);
220	if (id < 0)
221		return id;
222
223	total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
224
225	if (total_blocks <= wl->tx_blocks_available) {
226		desc = (struct wl1271_tx_hw_descr *)skb_push(
227			skb, total_len - skb->len);
228
229		wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
230					     spare_blocks);
231
232		desc->id = id;
233
234		wl->tx_blocks_available -= total_blocks;
235		wl->tx_allocated_blocks += total_blocks;
236
237		/*
238		 * If the FW was empty before, arm the Tx watchdog. Also do
239		 * this on the first Tx after resume, as we always cancel the
240		 * watchdog on suspend.
241		 */
242		if (wl->tx_allocated_blocks == total_blocks ||
243		    test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
244			wl12xx_rearm_tx_watchdog_locked(wl);
245
246		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
247		wl->tx_allocated_pkts[ac]++;
248
249		if (test_bit(hlid, wl->links_map))
250			wl->links[hlid].allocated_pkts++;
251
252		ret = 0;
253
254		wl1271_debug(DEBUG_TX,
255			     "tx_allocate: size: %d, blocks: %d, id: %d",
256			     total_len, total_blocks, id);
257	} else {
258		wl1271_free_tx_id(wl, id);
259	}
260
261	return ret;
262}
263
264static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
265			       struct sk_buff *skb, u32 extra,
266			       struct ieee80211_tx_info *control, u8 hlid)
267{
268	struct timespec ts;
269	struct wl1271_tx_hw_descr *desc;
270	int ac, rate_idx;
271	s64 hosttime;
272	u16 tx_attr = 0;
273	__le16 frame_control;
274	struct ieee80211_hdr *hdr;
275	u8 *frame_start;
276	bool is_dummy;
277
278	desc = (struct wl1271_tx_hw_descr *) skb->data;
279	frame_start = (u8 *)(desc + 1);
280	hdr = (struct ieee80211_hdr *)(frame_start + extra);
281	frame_control = hdr->frame_control;
282
283	/* relocate space for security header */
284	if (extra) {
285		int hdrlen = ieee80211_hdrlen(frame_control);
286		memmove(frame_start, hdr, hdrlen);
287		skb_set_network_header(skb, skb_network_offset(skb) + extra);
288	}
289
290	/* configure packet life time */
291	getnstimeofday(&ts);
292	hosttime = (timespec_to_ns(&ts) >> 10);
293	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
294
295	is_dummy = wl12xx_is_dummy_packet(wl, skb);
296	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
297		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
298	else
299		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
300
301	/* queue */
302	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
303	desc->tid = skb->priority;
304
305	if (is_dummy) {
306		/*
307		 * FW expects the dummy packet to have an invalid session id -
308		 * any session id that is different than the one set in the join
309		 */
310		tx_attr = (SESSION_COUNTER_INVALID <<
311			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
312			   TX_HW_ATTR_SESSION_COUNTER;
313
314		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
315	} else if (wlvif) {
316		u8 session_id = wl->session_ids[hlid];
317
318		if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
319		    (wlvif->bss_type == BSS_TYPE_AP_BSS))
320			session_id = 0;
321
322		/* configure the tx attributes */
323		tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
324	}
325
326	desc->hlid = hlid;
327	if (is_dummy || !wlvif)
328		rate_idx = 0;
329	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
330		/*
331		 * if the packets are data packets
332		 * send them with AP rate policies (EAPOLs are an exception),
333		 * otherwise use default basic rates
334		 */
335		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
336			rate_idx = wlvif->sta.basic_rate_idx;
337		else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
338			rate_idx = wlvif->sta.p2p_rate_idx;
339		else if (ieee80211_is_data(frame_control))
340			rate_idx = wlvif->sta.ap_rate_idx;
341		else
342			rate_idx = wlvif->sta.basic_rate_idx;
343	} else {
344		if (hlid == wlvif->ap.global_hlid)
345			rate_idx = wlvif->ap.mgmt_rate_idx;
346		else if (hlid == wlvif->ap.bcast_hlid ||
347			 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
348			 !ieee80211_is_data(frame_control))
349			/*
350			 * send non-data, bcast and EAPOLs using the
351			 * min basic rate
352			 */
353			rate_idx = wlvif->ap.bcast_rate_idx;
354		else
355			rate_idx = wlvif->ap.ucast_rate_idx[ac];
356	}
357
358	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
359
360	/* for WEP shared auth - no fw encryption is needed */
361	if (ieee80211_is_auth(frame_control) &&
362	    ieee80211_has_protected(frame_control))
363		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
364
365	/* send EAPOL frames as voice */
366	if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
367		tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
368
369	desc->tx_attr = cpu_to_le16(tx_attr);
370
371	wlcore_hw_set_tx_desc_csum(wl, desc, skb);
372	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
373}
374
375/* caller must hold wl->mutex */
376static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
377				   struct sk_buff *skb, u32 buf_offset, u8 hlid)
378{
379	struct ieee80211_tx_info *info;
380	u32 extra = 0;
381	int ret = 0;
382	u32 total_len;
383	bool is_dummy;
384	bool is_gem = false;
385
386	if (!skb) {
387		wl1271_error("discarding null skb");
388		return -EINVAL;
389	}
390
391	if (hlid == WL12XX_INVALID_LINK_ID) {
392		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
393		return -EINVAL;
394	}
395
396	info = IEEE80211_SKB_CB(skb);
397
398	is_dummy = wl12xx_is_dummy_packet(wl, skb);
399
400	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
401	    info->control.hw_key &&
402	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
403		extra = WL1271_EXTRA_SPACE_TKIP;
404
405	if (info->control.hw_key) {
406		bool is_wep;
407		u8 idx = info->control.hw_key->hw_key_idx;
408		u32 cipher = info->control.hw_key->cipher;
409
410		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
411			 (cipher == WLAN_CIPHER_SUITE_WEP104);
412
413		if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
414			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
415			if (ret < 0)
416				return ret;
417			wlvif->default_key = idx;
418		}
419
420		is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
421	}
422
423	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
424				 is_gem);
425	if (ret < 0)
426		return ret;
427
428	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
429
430	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
431		wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
432		wl1271_tx_regulate_link(wl, wlvif, hlid);
433	}
434
435	/*
436	 * The length of each packet is stored in terms of
437	 * words. Thus, we must pad the skb data to make sure its
438	 * length is aligned.  The number of padding bytes is computed
439	 * and set in wl1271_tx_fill_hdr.
440	 * In special cases, we want to align to a specific block size
441	 * (eg. for wl128x with SDIO we align to 256).
442	 */
443	total_len = wlcore_calc_packet_alignment(wl, skb->len);
444
445	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
446	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
447
448	/* Revert side effects in the dummy packet skb, so it can be reused */
449	if (is_dummy)
450		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
451
452	return total_len;
453}
454
455u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
456				enum ieee80211_band rate_band)
457{
458	struct ieee80211_supported_band *band;
459	u32 enabled_rates = 0;
460	int bit;
461
462	band = wl->hw->wiphy->bands[rate_band];
463	for (bit = 0; bit < band->n_bitrates; bit++) {
464		if (rate_set & 0x1)
465			enabled_rates |= band->bitrates[bit].hw_value;
466		rate_set >>= 1;
467	}
468
469	/* MCS rates indication are on bits 16 - 31 */
470	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
471
472	for (bit = 0; bit < 16; bit++) {
473		if (rate_set & 0x1)
474			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
475		rate_set >>= 1;
476	}
477
478	return enabled_rates;
479}
480
481void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
482{
483	int i;
484	struct wl12xx_vif *wlvif;
485
486	wl12xx_for_each_wlvif(wl, wlvif) {
487		for (i = 0; i < NUM_TX_QUEUES; i++) {
488			if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
489					WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
490			    wlvif->tx_queue_count[i] <=
491					WL1271_TX_QUEUE_LOW_WATERMARK)
492				/* firmware buffer has space, restart queues */
493				wlcore_wake_queue(wl, wlvif, i,
494					WLCORE_QUEUE_STOP_REASON_WATERMARK);
495		}
496	}
497}
498
499static int wlcore_select_ac(struct wl1271 *wl)
500{
501	int i, q = -1, ac;
502	u32 min_pkts = 0xffffffff;
503
504	/*
505	 * Find a non-empty ac where:
506	 * 1. There are packets to transmit
507	 * 2. The FW has the least allocated blocks
508	 *
509	 * We prioritize the ACs according to VO>VI>BE>BK
510	 */
511	for (i = 0; i < NUM_TX_QUEUES; i++) {
512		ac = wl1271_tx_get_queue(i);
513		if (wl->tx_queue_count[ac] &&
514		    wl->tx_allocated_pkts[ac] < min_pkts) {
515			q = ac;
516			min_pkts = wl->tx_allocated_pkts[q];
517		}
518	}
519
520	return q;
521}
522
523static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
524					  struct wl1271_link *lnk, u8 q)
525{
526	struct sk_buff *skb;
527	unsigned long flags;
528
529	skb = skb_dequeue(&lnk->tx_queue[q]);
530	if (skb) {
531		spin_lock_irqsave(&wl->wl_lock, flags);
532		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
533		wl->tx_queue_count[q]--;
534		if (lnk->wlvif) {
535			WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
536			lnk->wlvif->tx_queue_count[q]--;
537		}
538		spin_unlock_irqrestore(&wl->wl_lock, flags);
539	}
540
541	return skb;
542}
543
544static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
545						    u8 hlid, u8 ac,
546						    u8 *low_prio_hlid)
547{
548	struct wl1271_link *lnk = &wl->links[hlid];
549
550	if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
551		if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
552		    !skb_queue_empty(&lnk->tx_queue[ac]) &&
553		    wlcore_hw_lnk_low_prio(wl, hlid, lnk))
554			/* we found the first non-empty low priority queue */
555			*low_prio_hlid = hlid;
556
557		return NULL;
558	}
559
560	return wlcore_lnk_dequeue(wl, lnk, ac);
561}
562
563static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
564						    struct wl12xx_vif *wlvif,
565						    u8 ac, u8 *hlid,
566						    u8 *low_prio_hlid)
567{
568	struct sk_buff *skb = NULL;
569	int i, h, start_hlid;
570
571	/* start from the link after the last one */
572	start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
573
574	/* dequeue according to AC, round robin on each link */
575	for (i = 0; i < wl->num_links; i++) {
576		h = (start_hlid + i) % wl->num_links;
577
578		/* only consider connected stations */
579		if (!test_bit(h, wlvif->links_map))
580			continue;
581
582		skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
583						   low_prio_hlid);
584		if (!skb)
585			continue;
586
587		wlvif->last_tx_hlid = h;
588		break;
589	}
590
591	if (!skb)
592		wlvif->last_tx_hlid = 0;
593
594	*hlid = wlvif->last_tx_hlid;
595	return skb;
596}
597
598static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
599{
600	unsigned long flags;
601	struct wl12xx_vif *wlvif = wl->last_wlvif;
602	struct sk_buff *skb = NULL;
603	int ac;
604	u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
605
606	ac = wlcore_select_ac(wl);
607	if (ac < 0)
608		goto out;
609
610	/* continue from last wlvif (round robin) */
611	if (wlvif) {
612		wl12xx_for_each_wlvif_continue(wl, wlvif) {
613			if (!wlvif->tx_queue_count[ac])
614				continue;
615
616			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
617							   &low_prio_hlid);
618			if (!skb)
619				continue;
620
621			wl->last_wlvif = wlvif;
622			break;
623		}
624	}
625
626	/* dequeue from the system HLID before the restarting wlvif list */
627	if (!skb) {
628		skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
629						   ac, &low_prio_hlid);
630		if (skb) {
631			*hlid = wl->system_hlid;
632			wl->last_wlvif = NULL;
633		}
634	}
635
636	/* Do a new pass over the wlvif list. But no need to continue
637	 * after last_wlvif. The previous pass should have found it. */
638	if (!skb) {
639		wl12xx_for_each_wlvif(wl, wlvif) {
640			if (!wlvif->tx_queue_count[ac])
641				goto next;
642
643			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
644							   &low_prio_hlid);
645			if (skb) {
646				wl->last_wlvif = wlvif;
647				break;
648			}
649
650next:
651			if (wlvif == wl->last_wlvif)
652				break;
653		}
654	}
655
656	/* no high priority skbs found - but maybe a low priority one? */
657	if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
658		struct wl1271_link *lnk = &wl->links[low_prio_hlid];
659		skb = wlcore_lnk_dequeue(wl, lnk, ac);
660
661		WARN_ON(!skb); /* we checked this before */
662		*hlid = low_prio_hlid;
663
664		/* ensure proper round robin in the vif/link levels */
665		wl->last_wlvif = lnk->wlvif;
666		if (lnk->wlvif)
667			lnk->wlvif->last_tx_hlid = low_prio_hlid;
668
669	}
670
671out:
672	if (!skb &&
673	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
674		int q;
675
676		skb = wl->dummy_packet;
677		*hlid = wl->system_hlid;
678		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
679		spin_lock_irqsave(&wl->wl_lock, flags);
680		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
681		wl->tx_queue_count[q]--;
682		spin_unlock_irqrestore(&wl->wl_lock, flags);
683	}
684
685	return skb;
686}
687
688static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
689				  struct sk_buff *skb, u8 hlid)
690{
691	unsigned long flags;
692	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
693
694	if (wl12xx_is_dummy_packet(wl, skb)) {
695		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
696	} else {
697		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
698
699		/* make sure we dequeue the same packet next time */
700		wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
701				      wl->num_links;
702	}
703
704	spin_lock_irqsave(&wl->wl_lock, flags);
705	wl->tx_queue_count[q]++;
706	if (wlvif)
707		wlvif->tx_queue_count[q]++;
708	spin_unlock_irqrestore(&wl->wl_lock, flags);
709}
710
711static bool wl1271_tx_is_data_present(struct sk_buff *skb)
712{
713	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
714
715	return ieee80211_is_data_present(hdr->frame_control);
716}
717
718void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
719{
720	struct wl12xx_vif *wlvif;
721	u32 timeout;
722	u8 hlid;
723
724	if (!wl->conf.rx_streaming.interval)
725		return;
726
727	if (!wl->conf.rx_streaming.always &&
728	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
729		return;
730
731	timeout = wl->conf.rx_streaming.duration;
732	wl12xx_for_each_wlvif_sta(wl, wlvif) {
733		bool found = false;
734		for_each_set_bit(hlid, active_hlids, wl->num_links) {
735			if (test_bit(hlid, wlvif->links_map)) {
736				found  = true;
737				break;
738			}
739		}
740
741		if (!found)
742			continue;
743
744		/* enable rx streaming */
745		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
746			ieee80211_queue_work(wl->hw,
747					     &wlvif->rx_streaming_enable_work);
748
749		mod_timer(&wlvif->rx_streaming_timer,
750			  jiffies + msecs_to_jiffies(timeout));
751	}
752}
753
754/*
755 * Returns failure values only in case of failed bus ops within this function.
756 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
757 * triggering recovery by higher layers when not necessary.
758 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
759 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
760 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
761 * within prepare_tx_frame code but there's nothing we should do about those
762 * as well.
763 */
764int wlcore_tx_work_locked(struct wl1271 *wl)
765{
766	struct wl12xx_vif *wlvif;
767	struct sk_buff *skb;
768	struct wl1271_tx_hw_descr *desc;
769	u32 buf_offset = 0, last_len = 0;
770	bool sent_packets = false;
771	unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
772	int ret = 0;
773	int bus_ret = 0;
774	u8 hlid;
775
776	if (unlikely(wl->state != WLCORE_STATE_ON))
777		return 0;
778
779	while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
780		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
781		bool has_data = false;
782
783		wlvif = NULL;
784		if (!wl12xx_is_dummy_packet(wl, skb))
785			wlvif = wl12xx_vif_to_data(info->control.vif);
786		else
787			hlid = wl->system_hlid;
788
789		has_data = wlvif && wl1271_tx_is_data_present(skb);
790		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
791					      hlid);
792		if (ret == -EAGAIN) {
793			/*
794			 * Aggregation buffer is full.
795			 * Flush buffer and try again.
796			 */
797			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
798
799			buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
800							    last_len);
801			bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
802					     wl->aggr_buf, buf_offset, true);
803			if (bus_ret < 0)
804				goto out;
805
806			sent_packets = true;
807			buf_offset = 0;
808			continue;
809		} else if (ret == -EBUSY) {
810			/*
811			 * Firmware buffer is full.
812			 * Queue back last skb, and stop aggregating.
813			 */
814			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
815			/* No work left, avoid scheduling redundant tx work */
816			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
817			goto out_ack;
818		} else if (ret < 0) {
819			if (wl12xx_is_dummy_packet(wl, skb))
820				/*
821				 * fw still expects dummy packet,
822				 * so re-enqueue it
823				 */
824				wl1271_skb_queue_head(wl, wlvif, skb, hlid);
825			else
826				ieee80211_free_txskb(wl->hw, skb);
827			goto out_ack;
828		}
829		last_len = ret;
830		buf_offset += last_len;
831		wl->tx_packets_count++;
832		if (has_data) {
833			desc = (struct wl1271_tx_hw_descr *) skb->data;
834			__set_bit(desc->hlid, active_hlids);
835		}
836	}
837
838out_ack:
839	if (buf_offset) {
840		buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
841		bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
842					     buf_offset, true);
843		if (bus_ret < 0)
844			goto out;
845
846		sent_packets = true;
847	}
848	if (sent_packets) {
849		/*
850		 * Interrupt the firmware with the new packets. This is only
851		 * required for older hardware revisions
852		 */
853		if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
854			bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
855					     wl->tx_packets_count);
856			if (bus_ret < 0)
857				goto out;
858		}
859
860		wl1271_handle_tx_low_watermark(wl);
861	}
862	wl12xx_rearm_rx_streaming(wl, active_hlids);
863
864out:
865	return bus_ret;
866}
867
868void wl1271_tx_work(struct work_struct *work)
869{
870	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
871	int ret;
872
873	mutex_lock(&wl->mutex);
874	ret = wl1271_ps_elp_wakeup(wl);
875	if (ret < 0)
876		goto out;
877
878	ret = wlcore_tx_work_locked(wl);
879	if (ret < 0) {
880		wl12xx_queue_recovery_work(wl);
881		goto out;
882	}
883
884	wl1271_ps_elp_sleep(wl);
885out:
886	mutex_unlock(&wl->mutex);
887}
888
889static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
890{
891	u8 flags = 0;
892
893	/*
894	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
895	 * only it uses Tx-completion.
896	 */
897	if (rate_class_index <= 8)
898		flags |= IEEE80211_TX_RC_MCS;
899
900	/*
901	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
902	 * only it uses Tx-completion.
903	 */
904	if (rate_class_index == 0)
905		flags |= IEEE80211_TX_RC_SHORT_GI;
906
907	return flags;
908}
909
910static void wl1271_tx_complete_packet(struct wl1271 *wl,
911				      struct wl1271_tx_hw_res_descr *result)
912{
913	struct ieee80211_tx_info *info;
914	struct ieee80211_vif *vif;
915	struct wl12xx_vif *wlvif;
916	struct sk_buff *skb;
917	int id = result->id;
918	int rate = -1;
919	u8 rate_flags = 0;
920	u8 retries = 0;
921
922	/* check for id legality */
923	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
924		wl1271_warning("TX result illegal id: %d", id);
925		return;
926	}
927
928	skb = wl->tx_frames[id];
929	info = IEEE80211_SKB_CB(skb);
930
931	if (wl12xx_is_dummy_packet(wl, skb)) {
932		wl1271_free_tx_id(wl, id);
933		return;
934	}
935
936	/* info->control is valid as long as we don't update info->status */
937	vif = info->control.vif;
938	wlvif = wl12xx_vif_to_data(vif);
939
940	/* update the TX status info */
941	if (result->status == TX_SUCCESS) {
942		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
943			info->flags |= IEEE80211_TX_STAT_ACK;
944		rate = wlcore_rate_to_idx(wl, result->rate_class_index,
945					  wlvif->band);
946		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
947		retries = result->ack_failures;
948	} else if (result->status == TX_RETRY_EXCEEDED) {
949		wl->stats.excessive_retries++;
950		retries = result->ack_failures;
951	}
952
953	info->status.rates[0].idx = rate;
954	info->status.rates[0].count = retries;
955	info->status.rates[0].flags = rate_flags;
956	info->status.ack_signal = -1;
957
958	wl->stats.retry_count += result->ack_failures;
959
960	/* remove private header from packet */
961	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
962
963	/* remove TKIP header space if present */
964	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
965	    info->control.hw_key &&
966	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
967		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
968		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
969			hdrlen);
970		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
971	}
972
973	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
974		     " status 0x%x",
975		     result->id, skb, result->ack_failures,
976		     result->rate_class_index, result->status);
977
978	/* return the packet to the stack */
979	skb_queue_tail(&wl->deferred_tx_queue, skb);
980	queue_work(wl->freezable_wq, &wl->netstack_work);
981	wl1271_free_tx_id(wl, result->id);
982}
983
984/* Called upon reception of a TX complete interrupt */
985int wlcore_tx_complete(struct wl1271 *wl)
986{
987	struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
988	u32 count, fw_counter;
989	u32 i;
990	int ret;
991
992	/* read the tx results from the chipset */
993	ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
994			  wl->tx_res_if, sizeof(*wl->tx_res_if), false);
995	if (ret < 0)
996		goto out;
997
998	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
999
1000	/* write host counter to chipset (to ack) */
1001	ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
1002			     offsetof(struct wl1271_tx_hw_res_if,
1003				      tx_result_host_counter), fw_counter);
1004	if (ret < 0)
1005		goto out;
1006
1007	count = fw_counter - wl->tx_results_count;
1008	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
1009
1010	/* verify that the result buffer is not getting overrun */
1011	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
1012		wl1271_warning("TX result overflow from chipset: %d", count);
1013
1014	/* process the results */
1015	for (i = 0; i < count; i++) {
1016		struct wl1271_tx_hw_res_descr *result;
1017		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
1018
1019		/* process the packet */
1020		result =  &(wl->tx_res_if->tx_results_queue[offset]);
1021		wl1271_tx_complete_packet(wl, result);
1022
1023		wl->tx_results_count++;
1024	}
1025
1026out:
1027	return ret;
1028}
1029EXPORT_SYMBOL(wlcore_tx_complete);
1030
1031void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
1032{
1033	struct sk_buff *skb;
1034	int i;
1035	unsigned long flags;
1036	struct ieee80211_tx_info *info;
1037	int total[NUM_TX_QUEUES];
1038	struct wl1271_link *lnk = &wl->links[hlid];
1039
1040	for (i = 0; i < NUM_TX_QUEUES; i++) {
1041		total[i] = 0;
1042		while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
1043			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
1044
1045			if (!wl12xx_is_dummy_packet(wl, skb)) {
1046				info = IEEE80211_SKB_CB(skb);
1047				info->status.rates[0].idx = -1;
1048				info->status.rates[0].count = 0;
1049				ieee80211_tx_status_ni(wl->hw, skb);
1050			}
1051
1052			total[i]++;
1053		}
1054	}
1055
1056	spin_lock_irqsave(&wl->wl_lock, flags);
1057	for (i = 0; i < NUM_TX_QUEUES; i++) {
1058		wl->tx_queue_count[i] -= total[i];
1059		if (lnk->wlvif)
1060			lnk->wlvif->tx_queue_count[i] -= total[i];
1061	}
1062	spin_unlock_irqrestore(&wl->wl_lock, flags);
1063
1064	wl1271_handle_tx_low_watermark(wl);
1065}
1066
1067/* caller must hold wl->mutex and TX must be stopped */
1068void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1069{
1070	int i;
1071
1072	/* TX failure */
1073	for_each_set_bit(i, wlvif->links_map, wl->num_links) {
1074		if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
1075		    i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
1076			/* this calls wl12xx_free_link */
1077			wl1271_free_sta(wl, wlvif, i);
1078		} else {
1079			u8 hlid = i;
1080			wl12xx_free_link(wl, wlvif, &hlid);
1081		}
1082	}
1083	wlvif->last_tx_hlid = 0;
1084
1085	for (i = 0; i < NUM_TX_QUEUES; i++)
1086		wlvif->tx_queue_count[i] = 0;
1087}
1088/* caller must hold wl->mutex and TX must be stopped */
1089void wl12xx_tx_reset(struct wl1271 *wl)
1090{
1091	int i;
1092	struct sk_buff *skb;
1093	struct ieee80211_tx_info *info;
1094
1095	/* only reset the queues if something bad happened */
1096	if (wl1271_tx_total_queue_count(wl) != 0) {
1097		for (i = 0; i < wl->num_links; i++)
1098			wl1271_tx_reset_link_queues(wl, i);
1099
1100		for (i = 0; i < NUM_TX_QUEUES; i++)
1101			wl->tx_queue_count[i] = 0;
1102	}
1103
1104	/*
1105	 * Make sure the driver is at a consistent state, in case this
1106	 * function is called from a context other than interface removal.
1107	 * This call will always wake the TX queues.
1108	 */
1109	wl1271_handle_tx_low_watermark(wl);
1110
1111	for (i = 0; i < wl->num_tx_desc; i++) {
1112		if (wl->tx_frames[i] == NULL)
1113			continue;
1114
1115		skb = wl->tx_frames[i];
1116		wl1271_free_tx_id(wl, i);
1117		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1118
1119		if (!wl12xx_is_dummy_packet(wl, skb)) {
1120			/*
1121			 * Remove private headers before passing the skb to
1122			 * mac80211
1123			 */
1124			info = IEEE80211_SKB_CB(skb);
1125			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1126			if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1127			    info->control.hw_key &&
1128			    info->control.hw_key->cipher ==
1129			    WLAN_CIPHER_SUITE_TKIP) {
1130				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1131				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1132					skb->data, hdrlen);
1133				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1134			}
1135
1136			info->status.rates[0].idx = -1;
1137			info->status.rates[0].count = 0;
1138
1139			ieee80211_tx_status_ni(wl->hw, skb);
1140		}
1141	}
1142}
1143
1144#define WL1271_TX_FLUSH_TIMEOUT 500000
1145
1146/* caller must *NOT* hold wl->mutex */
1147void wl1271_tx_flush(struct wl1271 *wl)
1148{
1149	unsigned long timeout, start_time;
1150	int i;
1151	start_time = jiffies;
1152	timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1153
1154	/* only one flush should be in progress, for consistent queue state */
1155	mutex_lock(&wl->flush_mutex);
1156
1157	mutex_lock(&wl->mutex);
1158	if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1159		mutex_unlock(&wl->mutex);
1160		goto out;
1161	}
1162
1163	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1164
1165	while (!time_after(jiffies, timeout)) {
1166		wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1167			     wl->tx_frames_cnt,
1168			     wl1271_tx_total_queue_count(wl));
1169
1170		/* force Tx and give the driver some time to flush data */
1171		mutex_unlock(&wl->mutex);
1172		if (wl1271_tx_total_queue_count(wl))
1173			wl1271_tx_work(&wl->tx_work);
1174		msleep(20);
1175		mutex_lock(&wl->mutex);
1176
1177		if ((wl->tx_frames_cnt == 0) &&
1178		    (wl1271_tx_total_queue_count(wl) == 0)) {
1179			wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1180				     jiffies_to_msecs(jiffies - start_time));
1181			goto out_wake;
1182		}
1183	}
1184
1185	wl1271_warning("Unable to flush all TX buffers, "
1186		       "timed out (timeout %d ms",
1187		       WL1271_TX_FLUSH_TIMEOUT / 1000);
1188
1189	/* forcibly flush all Tx buffers on our queues */
1190	for (i = 0; i < wl->num_links; i++)
1191		wl1271_tx_reset_link_queues(wl, i);
1192
1193out_wake:
1194	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1195	mutex_unlock(&wl->mutex);
1196out:
1197	mutex_unlock(&wl->flush_mutex);
1198}
1199EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1200
1201u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1202{
1203	if (WARN_ON(!rate_set))
1204		return 0;
1205
1206	return BIT(__ffs(rate_set));
1207}
1208EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
1209
1210void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1211			      u8 queue, enum wlcore_queue_stop_reason reason)
1212{
1213	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1214	bool stopped = !!wl->queue_stop_reasons[hwq];
1215
1216	/* queue should not be stopped for this reason */
1217	WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
1218
1219	if (stopped)
1220		return;
1221
1222	ieee80211_stop_queue(wl->hw, hwq);
1223}
1224
1225void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1226		       enum wlcore_queue_stop_reason reason)
1227{
1228	unsigned long flags;
1229
1230	spin_lock_irqsave(&wl->wl_lock, flags);
1231	wlcore_stop_queue_locked(wl, wlvif, queue, reason);
1232	spin_unlock_irqrestore(&wl->wl_lock, flags);
1233}
1234
1235void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1236		       enum wlcore_queue_stop_reason reason)
1237{
1238	unsigned long flags;
1239	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1240
1241	spin_lock_irqsave(&wl->wl_lock, flags);
1242
1243	/* queue should not be clear for this reason */
1244	WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
1245
1246	if (wl->queue_stop_reasons[hwq])
1247		goto out;
1248
1249	ieee80211_wake_queue(wl->hw, hwq);
1250
1251out:
1252	spin_unlock_irqrestore(&wl->wl_lock, flags);
1253}
1254
1255void wlcore_stop_queues(struct wl1271 *wl,
1256			enum wlcore_queue_stop_reason reason)
1257{
1258	int i;
1259	unsigned long flags;
1260
1261	spin_lock_irqsave(&wl->wl_lock, flags);
1262
1263	/* mark all possible queues as stopped */
1264        for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1265                WARN_ON_ONCE(test_and_set_bit(reason,
1266					      &wl->queue_stop_reasons[i]));
1267
1268	/* use the global version to make sure all vifs in mac80211 we don't
1269	 * know are stopped.
1270	 */
1271	ieee80211_stop_queues(wl->hw);
1272
1273	spin_unlock_irqrestore(&wl->wl_lock, flags);
1274}
1275
1276void wlcore_wake_queues(struct wl1271 *wl,
1277			enum wlcore_queue_stop_reason reason)
1278{
1279	int i;
1280	unsigned long flags;
1281
1282	spin_lock_irqsave(&wl->wl_lock, flags);
1283
1284	/* mark all possible queues as awake */
1285        for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1286		WARN_ON_ONCE(!test_and_clear_bit(reason,
1287						 &wl->queue_stop_reasons[i]));
1288
1289	/* use the global version to make sure all vifs in mac80211 we don't
1290	 * know are woken up.
1291	 */
1292	ieee80211_wake_queues(wl->hw);
1293
1294	spin_unlock_irqrestore(&wl->wl_lock, flags);
1295}
1296
1297bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
1298				       struct wl12xx_vif *wlvif, u8 queue,
1299				       enum wlcore_queue_stop_reason reason)
1300{
1301	unsigned long flags;
1302	bool stopped;
1303
1304	spin_lock_irqsave(&wl->wl_lock, flags);
1305	stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
1306							   reason);
1307	spin_unlock_irqrestore(&wl->wl_lock, flags);
1308
1309	return stopped;
1310}
1311
1312bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
1313				       struct wl12xx_vif *wlvif, u8 queue,
1314				       enum wlcore_queue_stop_reason reason)
1315{
1316	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1317
1318	assert_spin_locked(&wl->wl_lock);
1319	return test_bit(reason, &wl->queue_stop_reasons[hwq]);
1320}
1321
1322bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1323				    u8 queue)
1324{
1325	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1326
1327	assert_spin_locked(&wl->wl_lock);
1328	return !!wl->queue_stop_reasons[hwq];
1329}
1330