recv.c revision 7a37081e2e25e58701b17c41579fd06bc353b392
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18#include "ar9003_mac.h"
19
20#define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))
21
22static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
23					       int mindelta, int main_rssi_avg,
24					       int alt_rssi_avg, int pkt_count)
25{
26	return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
27		(alt_rssi_avg > main_rssi_avg + maxdelta)) ||
28		(alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
29}
30
31static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
32{
33	return sc->ps_enabled &&
34	       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
35}
36
37static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
38					     struct ieee80211_hdr *hdr)
39{
40	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
41	int i;
42
43	spin_lock_bh(&sc->wiphy_lock);
44	for (i = 0; i < sc->num_sec_wiphy; i++) {
45		struct ath_wiphy *aphy = sc->sec_wiphy[i];
46		if (aphy == NULL)
47			continue;
48		if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
49		    == 0) {
50			hw = aphy->hw;
51			break;
52		}
53	}
54	spin_unlock_bh(&sc->wiphy_lock);
55	return hw;
56}
57
58/*
59 * Setup and link descriptors.
60 *
61 * 11N: we can no longer afford to self link the last descriptor.
62 * MAC acknowledges BA status as long as it copies frames to host
63 * buffer (or rx fifo). This can incorrectly acknowledge packets
64 * to a sender if last desc is self-linked.
65 */
66static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
67{
68	struct ath_hw *ah = sc->sc_ah;
69	struct ath_common *common = ath9k_hw_common(ah);
70	struct ath_desc *ds;
71	struct sk_buff *skb;
72
73	ATH_RXBUF_RESET(bf);
74
75	ds = bf->bf_desc;
76	ds->ds_link = 0; /* link to null */
77	ds->ds_data = bf->bf_buf_addr;
78
79	/* virtual addr of the beginning of the buffer. */
80	skb = bf->bf_mpdu;
81	BUG_ON(skb == NULL);
82	ds->ds_vdata = skb->data;
83
84	/*
85	 * setup rx descriptors. The rx_bufsize here tells the hardware
86	 * how much data it can DMA to us and that we are prepared
87	 * to process
88	 */
89	ath9k_hw_setuprxdesc(ah, ds,
90			     common->rx_bufsize,
91			     0);
92
93	if (sc->rx.rxlink == NULL)
94		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
95	else
96		*sc->rx.rxlink = bf->bf_daddr;
97
98	sc->rx.rxlink = &ds->ds_link;
99	ath9k_hw_rxena(ah);
100}
101
102static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
103{
104	/* XXX block beacon interrupts */
105	ath9k_hw_setantenna(sc->sc_ah, antenna);
106	sc->rx.defant = antenna;
107	sc->rx.rxotherant = 0;
108}
109
110static void ath_opmode_init(struct ath_softc *sc)
111{
112	struct ath_hw *ah = sc->sc_ah;
113	struct ath_common *common = ath9k_hw_common(ah);
114
115	u32 rfilt, mfilt[2];
116
117	/* configure rx filter */
118	rfilt = ath_calcrxfilter(sc);
119	ath9k_hw_setrxfilter(ah, rfilt);
120
121	/* configure bssid mask */
122	ath_hw_setbssidmask(common);
123
124	/* configure operational mode */
125	ath9k_hw_setopmode(ah);
126
127	/* calculate and install multicast filter */
128	mfilt[0] = mfilt[1] = ~0;
129	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
130}
131
132static bool ath_rx_edma_buf_link(struct ath_softc *sc,
133				 enum ath9k_rx_qtype qtype)
134{
135	struct ath_hw *ah = sc->sc_ah;
136	struct ath_rx_edma *rx_edma;
137	struct sk_buff *skb;
138	struct ath_buf *bf;
139
140	rx_edma = &sc->rx.rx_edma[qtype];
141	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
142		return false;
143
144	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
145	list_del_init(&bf->list);
146
147	skb = bf->bf_mpdu;
148
149	ATH_RXBUF_RESET(bf);
150	memset(skb->data, 0, ah->caps.rx_status_len);
151	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
152				ah->caps.rx_status_len, DMA_TO_DEVICE);
153
154	SKB_CB_ATHBUF(skb) = bf;
155	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
156	skb_queue_tail(&rx_edma->rx_fifo, skb);
157
158	return true;
159}
160
161static void ath_rx_addbuffer_edma(struct ath_softc *sc,
162				  enum ath9k_rx_qtype qtype, int size)
163{
164	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
165	u32 nbuf = 0;
166
167	if (list_empty(&sc->rx.rxbuf)) {
168		ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
169		return;
170	}
171
172	while (!list_empty(&sc->rx.rxbuf)) {
173		nbuf++;
174
175		if (!ath_rx_edma_buf_link(sc, qtype))
176			break;
177
178		if (nbuf >= size)
179			break;
180	}
181}
182
183static void ath_rx_remove_buffer(struct ath_softc *sc,
184				 enum ath9k_rx_qtype qtype)
185{
186	struct ath_buf *bf;
187	struct ath_rx_edma *rx_edma;
188	struct sk_buff *skb;
189
190	rx_edma = &sc->rx.rx_edma[qtype];
191
192	while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
193		bf = SKB_CB_ATHBUF(skb);
194		BUG_ON(!bf);
195		list_add_tail(&bf->list, &sc->rx.rxbuf);
196	}
197}
198
199static void ath_rx_edma_cleanup(struct ath_softc *sc)
200{
201	struct ath_buf *bf;
202
203	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
204	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
205
206	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
207		if (bf->bf_mpdu)
208			dev_kfree_skb_any(bf->bf_mpdu);
209	}
210
211	INIT_LIST_HEAD(&sc->rx.rxbuf);
212
213	kfree(sc->rx.rx_bufptr);
214	sc->rx.rx_bufptr = NULL;
215}
216
217static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
218{
219	skb_queue_head_init(&rx_edma->rx_fifo);
220	skb_queue_head_init(&rx_edma->rx_buffers);
221	rx_edma->rx_fifo_hwsize = size;
222}
223
224static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
225{
226	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
227	struct ath_hw *ah = sc->sc_ah;
228	struct sk_buff *skb;
229	struct ath_buf *bf;
230	int error = 0, i;
231	u32 size;
232
233
234	common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
235				     ah->caps.rx_status_len,
236				     min(common->cachelsz, (u16)64));
237
238	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
239				    ah->caps.rx_status_len);
240
241	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
242			       ah->caps.rx_lp_qdepth);
243	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
244			       ah->caps.rx_hp_qdepth);
245
246	size = sizeof(struct ath_buf) * nbufs;
247	bf = kzalloc(size, GFP_KERNEL);
248	if (!bf)
249		return -ENOMEM;
250
251	INIT_LIST_HEAD(&sc->rx.rxbuf);
252	sc->rx.rx_bufptr = bf;
253
254	for (i = 0; i < nbufs; i++, bf++) {
255		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
256		if (!skb) {
257			error = -ENOMEM;
258			goto rx_init_fail;
259		}
260
261		memset(skb->data, 0, common->rx_bufsize);
262		bf->bf_mpdu = skb;
263
264		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
265						 common->rx_bufsize,
266						 DMA_BIDIRECTIONAL);
267		if (unlikely(dma_mapping_error(sc->dev,
268						bf->bf_buf_addr))) {
269				dev_kfree_skb_any(skb);
270				bf->bf_mpdu = NULL;
271				ath_print(common, ATH_DBG_FATAL,
272					"dma_mapping_error() on RX init\n");
273				error = -ENOMEM;
274				goto rx_init_fail;
275		}
276
277		list_add_tail(&bf->list, &sc->rx.rxbuf);
278	}
279
280	return 0;
281
282rx_init_fail:
283	ath_rx_edma_cleanup(sc);
284	return error;
285}
286
287static void ath_edma_start_recv(struct ath_softc *sc)
288{
289	spin_lock_bh(&sc->rx.rxbuflock);
290
291	ath9k_hw_rxena(sc->sc_ah);
292
293	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
294			      sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
295
296	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
297			      sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
298
299	spin_unlock_bh(&sc->rx.rxbuflock);
300
301	ath_opmode_init(sc);
302
303	ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
304}
305
306static void ath_edma_stop_recv(struct ath_softc *sc)
307{
308	spin_lock_bh(&sc->rx.rxbuflock);
309	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
310	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
311	spin_unlock_bh(&sc->rx.rxbuflock);
312}
313
314int ath_rx_init(struct ath_softc *sc, int nbufs)
315{
316	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
317	struct sk_buff *skb;
318	struct ath_buf *bf;
319	int error = 0;
320
321	spin_lock_init(&sc->rx.rxflushlock);
322	sc->sc_flags &= ~SC_OP_RXFLUSH;
323	spin_lock_init(&sc->rx.rxbuflock);
324
325	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
326		return ath_rx_edma_init(sc, nbufs);
327	} else {
328		common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
329				min(common->cachelsz, (u16)64));
330
331		ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
332				common->cachelsz, common->rx_bufsize);
333
334		/* Initialize rx descriptors */
335
336		error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
337				"rx", nbufs, 1, 0);
338		if (error != 0) {
339			ath_print(common, ATH_DBG_FATAL,
340				  "failed to allocate rx descriptors: %d\n",
341				  error);
342			goto err;
343		}
344
345		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
346			skb = ath_rxbuf_alloc(common, common->rx_bufsize,
347					      GFP_KERNEL);
348			if (skb == NULL) {
349				error = -ENOMEM;
350				goto err;
351			}
352
353			bf->bf_mpdu = skb;
354			bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
355					common->rx_bufsize,
356					DMA_FROM_DEVICE);
357			if (unlikely(dma_mapping_error(sc->dev,
358							bf->bf_buf_addr))) {
359				dev_kfree_skb_any(skb);
360				bf->bf_mpdu = NULL;
361				ath_print(common, ATH_DBG_FATAL,
362					  "dma_mapping_error() on RX init\n");
363				error = -ENOMEM;
364				goto err;
365			}
366			bf->bf_dmacontext = bf->bf_buf_addr;
367		}
368		sc->rx.rxlink = NULL;
369	}
370
371err:
372	if (error)
373		ath_rx_cleanup(sc);
374
375	return error;
376}
377
378void ath_rx_cleanup(struct ath_softc *sc)
379{
380	struct ath_hw *ah = sc->sc_ah;
381	struct ath_common *common = ath9k_hw_common(ah);
382	struct sk_buff *skb;
383	struct ath_buf *bf;
384
385	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
386		ath_rx_edma_cleanup(sc);
387		return;
388	} else {
389		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
390			skb = bf->bf_mpdu;
391			if (skb) {
392				dma_unmap_single(sc->dev, bf->bf_buf_addr,
393						common->rx_bufsize,
394						DMA_FROM_DEVICE);
395				dev_kfree_skb(skb);
396			}
397		}
398
399		if (sc->rx.rxdma.dd_desc_len != 0)
400			ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
401	}
402}
403
404/*
405 * Calculate the receive filter according to the
406 * operating mode and state:
407 *
408 * o always accept unicast, broadcast, and multicast traffic
409 * o maintain current state of phy error reception (the hal
410 *   may enable phy error frames for noise immunity work)
411 * o probe request frames are accepted only when operating in
412 *   hostap, adhoc, or monitor modes
413 * o enable promiscuous mode according to the interface state
414 * o accept beacons:
415 *   - when operating in adhoc mode so the 802.11 layer creates
416 *     node table entries for peers,
417 *   - when operating in station mode for collecting rssi data when
418 *     the station is otherwise quiet, or
419 *   - when operating as a repeater so we see repeater-sta beacons
420 *   - when scanning
421 */
422
423u32 ath_calcrxfilter(struct ath_softc *sc)
424{
425#define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
426
427	u32 rfilt;
428
429	rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
430		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
431		| ATH9K_RX_FILTER_MCAST;
432
433	/* If not a STA, enable processing of Probe Requests */
434	if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
435		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
436
437	/*
438	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
439	 * mode interface or when in monitor mode. AP mode does not need this
440	 * since it receives all in-BSS frames anyway.
441	 */
442	if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
443	     (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
444	    (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
445		rfilt |= ATH9K_RX_FILTER_PROM;
446
447	if (sc->rx.rxfilter & FIF_CONTROL)
448		rfilt |= ATH9K_RX_FILTER_CONTROL;
449
450	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
451	    (sc->nvifs <= 1) &&
452	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
453		rfilt |= ATH9K_RX_FILTER_MYBEACON;
454	else
455		rfilt |= ATH9K_RX_FILTER_BEACON;
456
457	if ((AR_SREV_9280_20_OR_LATER(sc->sc_ah) ||
458	    AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
459	    (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
460	    (sc->rx.rxfilter & FIF_PSPOLL))
461		rfilt |= ATH9K_RX_FILTER_PSPOLL;
462
463	if (conf_is_ht(&sc->hw->conf))
464		rfilt |= ATH9K_RX_FILTER_COMP_BAR;
465
466	if (sc->sec_wiphy || (sc->nvifs > 1) ||
467	    (sc->rx.rxfilter & FIF_OTHER_BSS)) {
468		/* The following may also be needed for other older chips */
469		if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
470			rfilt |= ATH9K_RX_FILTER_PROM;
471		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
472	}
473
474	return rfilt;
475
476#undef RX_FILTER_PRESERVE
477}
478
479int ath_startrecv(struct ath_softc *sc)
480{
481	struct ath_hw *ah = sc->sc_ah;
482	struct ath_buf *bf, *tbf;
483
484	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
485		ath_edma_start_recv(sc);
486		return 0;
487	}
488
489	spin_lock_bh(&sc->rx.rxbuflock);
490	if (list_empty(&sc->rx.rxbuf))
491		goto start_recv;
492
493	sc->rx.rxlink = NULL;
494	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
495		ath_rx_buf_link(sc, bf);
496	}
497
498	/* We could have deleted elements so the list may be empty now */
499	if (list_empty(&sc->rx.rxbuf))
500		goto start_recv;
501
502	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
503	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
504	ath9k_hw_rxena(ah);
505
506start_recv:
507	spin_unlock_bh(&sc->rx.rxbuflock);
508	ath_opmode_init(sc);
509	ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
510
511	return 0;
512}
513
514bool ath_stoprecv(struct ath_softc *sc)
515{
516	struct ath_hw *ah = sc->sc_ah;
517	bool stopped;
518
519	ath9k_hw_stoppcurecv(ah);
520	ath9k_hw_setrxfilter(ah, 0);
521	stopped = ath9k_hw_stopdmarecv(ah);
522
523	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
524		ath_edma_stop_recv(sc);
525	else
526		sc->rx.rxlink = NULL;
527
528	return stopped;
529}
530
531void ath_flushrecv(struct ath_softc *sc)
532{
533	spin_lock_bh(&sc->rx.rxflushlock);
534	sc->sc_flags |= SC_OP_RXFLUSH;
535	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
536		ath_rx_tasklet(sc, 1, true);
537	ath_rx_tasklet(sc, 1, false);
538	sc->sc_flags &= ~SC_OP_RXFLUSH;
539	spin_unlock_bh(&sc->rx.rxflushlock);
540}
541
542static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
543{
544	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
545	struct ieee80211_mgmt *mgmt;
546	u8 *pos, *end, id, elen;
547	struct ieee80211_tim_ie *tim;
548
549	mgmt = (struct ieee80211_mgmt *)skb->data;
550	pos = mgmt->u.beacon.variable;
551	end = skb->data + skb->len;
552
553	while (pos + 2 < end) {
554		id = *pos++;
555		elen = *pos++;
556		if (pos + elen > end)
557			break;
558
559		if (id == WLAN_EID_TIM) {
560			if (elen < sizeof(*tim))
561				break;
562			tim = (struct ieee80211_tim_ie *) pos;
563			if (tim->dtim_count != 0)
564				break;
565			return tim->bitmap_ctrl & 0x01;
566		}
567
568		pos += elen;
569	}
570
571	return false;
572}
573
574static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
575{
576	struct ieee80211_mgmt *mgmt;
577	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
578
579	if (skb->len < 24 + 8 + 2 + 2)
580		return;
581
582	mgmt = (struct ieee80211_mgmt *)skb->data;
583	if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
584		return; /* not from our current AP */
585
586	sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
587
588	if (sc->ps_flags & PS_BEACON_SYNC) {
589		sc->ps_flags &= ~PS_BEACON_SYNC;
590		ath_print(common, ATH_DBG_PS,
591			  "Reconfigure Beacon timers based on "
592			  "timestamp from the AP\n");
593		ath_beacon_config(sc, NULL);
594	}
595
596	if (ath_beacon_dtim_pending_cab(skb)) {
597		/*
598		 * Remain awake waiting for buffered broadcast/multicast
599		 * frames. If the last broadcast/multicast frame is not
600		 * received properly, the next beacon frame will work as
601		 * a backup trigger for returning into NETWORK SLEEP state,
602		 * so we are waiting for it as well.
603		 */
604		ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
605			  "buffered broadcast/multicast frame(s)\n");
606		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
607		return;
608	}
609
610	if (sc->ps_flags & PS_WAIT_FOR_CAB) {
611		/*
612		 * This can happen if a broadcast frame is dropped or the AP
613		 * fails to send a frame indicating that all CAB frames have
614		 * been delivered.
615		 */
616		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
617		ath_print(common, ATH_DBG_PS,
618			  "PS wait for CAB frames timed out\n");
619	}
620}
621
622static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
623{
624	struct ieee80211_hdr *hdr;
625	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
626
627	hdr = (struct ieee80211_hdr *)skb->data;
628
629	/* Process Beacon and CAB receive in PS state */
630	if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
631	    && ieee80211_is_beacon(hdr->frame_control))
632		ath_rx_ps_beacon(sc, skb);
633	else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
634		 (ieee80211_is_data(hdr->frame_control) ||
635		  ieee80211_is_action(hdr->frame_control)) &&
636		 is_multicast_ether_addr(hdr->addr1) &&
637		 !ieee80211_has_moredata(hdr->frame_control)) {
638		/*
639		 * No more broadcast/multicast frames to be received at this
640		 * point.
641		 */
642		sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
643		ath_print(common, ATH_DBG_PS,
644			  "All PS CAB frames received, back to sleep\n");
645	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
646		   !is_multicast_ether_addr(hdr->addr1) &&
647		   !ieee80211_has_morefrags(hdr->frame_control)) {
648		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
649		ath_print(common, ATH_DBG_PS,
650			  "Going back to sleep after having received "
651			  "PS-Poll data (0x%lx)\n",
652			sc->ps_flags & (PS_WAIT_FOR_BEACON |
653					PS_WAIT_FOR_CAB |
654					PS_WAIT_FOR_PSPOLL_DATA |
655					PS_WAIT_FOR_TX_ACK));
656	}
657}
658
659static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
660				    struct ath_softc *sc, struct sk_buff *skb,
661				    struct ieee80211_rx_status *rxs)
662{
663	struct ieee80211_hdr *hdr;
664
665	hdr = (struct ieee80211_hdr *)skb->data;
666
667	/* Send the frame to mac80211 */
668	if (is_multicast_ether_addr(hdr->addr1)) {
669		int i;
670		/*
671		 * Deliver broadcast/multicast frames to all suitable
672		 * virtual wiphys.
673		 */
674		/* TODO: filter based on channel configuration */
675		for (i = 0; i < sc->num_sec_wiphy; i++) {
676			struct ath_wiphy *aphy = sc->sec_wiphy[i];
677			struct sk_buff *nskb;
678			if (aphy == NULL)
679				continue;
680			nskb = skb_copy(skb, GFP_ATOMIC);
681			if (!nskb)
682				continue;
683			ieee80211_rx(aphy->hw, nskb);
684		}
685		ieee80211_rx(sc->hw, skb);
686	} else
687		/* Deliver unicast frames based on receiver address */
688		ieee80211_rx(hw, skb);
689}
690
691static bool ath_edma_get_buffers(struct ath_softc *sc,
692				 enum ath9k_rx_qtype qtype)
693{
694	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
695	struct ath_hw *ah = sc->sc_ah;
696	struct ath_common *common = ath9k_hw_common(ah);
697	struct sk_buff *skb;
698	struct ath_buf *bf;
699	int ret;
700
701	skb = skb_peek(&rx_edma->rx_fifo);
702	if (!skb)
703		return false;
704
705	bf = SKB_CB_ATHBUF(skb);
706	BUG_ON(!bf);
707
708	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
709				common->rx_bufsize, DMA_FROM_DEVICE);
710
711	ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
712	if (ret == -EINPROGRESS) {
713		/*let device gain the buffer again*/
714		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
715				common->rx_bufsize, DMA_FROM_DEVICE);
716		return false;
717	}
718
719	__skb_unlink(skb, &rx_edma->rx_fifo);
720	if (ret == -EINVAL) {
721		/* corrupt descriptor, skip this one and the following one */
722		list_add_tail(&bf->list, &sc->rx.rxbuf);
723		ath_rx_edma_buf_link(sc, qtype);
724		skb = skb_peek(&rx_edma->rx_fifo);
725		if (!skb)
726			return true;
727
728		bf = SKB_CB_ATHBUF(skb);
729		BUG_ON(!bf);
730
731		__skb_unlink(skb, &rx_edma->rx_fifo);
732		list_add_tail(&bf->list, &sc->rx.rxbuf);
733		ath_rx_edma_buf_link(sc, qtype);
734		return true;
735	}
736	skb_queue_tail(&rx_edma->rx_buffers, skb);
737
738	return true;
739}
740
741static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
742						struct ath_rx_status *rs,
743						enum ath9k_rx_qtype qtype)
744{
745	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
746	struct sk_buff *skb;
747	struct ath_buf *bf;
748
749	while (ath_edma_get_buffers(sc, qtype));
750	skb = __skb_dequeue(&rx_edma->rx_buffers);
751	if (!skb)
752		return NULL;
753
754	bf = SKB_CB_ATHBUF(skb);
755	ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
756	return bf;
757}
758
759static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
760					   struct ath_rx_status *rs)
761{
762	struct ath_hw *ah = sc->sc_ah;
763	struct ath_common *common = ath9k_hw_common(ah);
764	struct ath_desc *ds;
765	struct ath_buf *bf;
766	int ret;
767
768	if (list_empty(&sc->rx.rxbuf)) {
769		sc->rx.rxlink = NULL;
770		return NULL;
771	}
772
773	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
774	ds = bf->bf_desc;
775
776	/*
777	 * Must provide the virtual address of the current
778	 * descriptor, the physical address, and the virtual
779	 * address of the next descriptor in the h/w chain.
780	 * This allows the HAL to look ahead to see if the
781	 * hardware is done with a descriptor by checking the
782	 * done bit in the following descriptor and the address
783	 * of the current descriptor the DMA engine is working
784	 * on.  All this is necessary because of our use of
785	 * a self-linked list to avoid rx overruns.
786	 */
787	ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
788	if (ret == -EINPROGRESS) {
789		struct ath_rx_status trs;
790		struct ath_buf *tbf;
791		struct ath_desc *tds;
792
793		memset(&trs, 0, sizeof(trs));
794		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
795			sc->rx.rxlink = NULL;
796			return NULL;
797		}
798
799		tbf = list_entry(bf->list.next, struct ath_buf, list);
800
801		/*
802		 * On some hardware the descriptor status words could
803		 * get corrupted, including the done bit. Because of
804		 * this, check if the next descriptor's done bit is
805		 * set or not.
806		 *
807		 * If the next descriptor's done bit is set, the current
808		 * descriptor has been corrupted. Force s/w to discard
809		 * this descriptor and continue...
810		 */
811
812		tds = tbf->bf_desc;
813		ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
814		if (ret == -EINPROGRESS)
815			return NULL;
816	}
817
818	if (!bf->bf_mpdu)
819		return bf;
820
821	/*
822	 * Synchronize the DMA transfer with CPU before
823	 * 1. accessing the frame
824	 * 2. requeueing the same buffer to h/w
825	 */
826	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
827			common->rx_bufsize,
828			DMA_FROM_DEVICE);
829
830	return bf;
831}
832
833/* Assumes you've already done the endian to CPU conversion */
834static bool ath9k_rx_accept(struct ath_common *common,
835			    struct ieee80211_hdr *hdr,
836			    struct ieee80211_rx_status *rxs,
837			    struct ath_rx_status *rx_stats,
838			    bool *decrypt_error)
839{
840	struct ath_hw *ah = common->ah;
841	__le16 fc;
842	u8 rx_status_len = ah->caps.rx_status_len;
843
844	fc = hdr->frame_control;
845
846	if (!rx_stats->rs_datalen)
847		return false;
848        /*
849         * rs_status follows rs_datalen so if rs_datalen is too large
850         * we can take a hint that hardware corrupted it, so ignore
851         * those frames.
852         */
853	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
854		return false;
855
856	/*
857	 * rs_more indicates chained descriptors which can be used
858	 * to link buffers together for a sort of scatter-gather
859	 * operation.
860	 * reject the frame, we don't support scatter-gather yet and
861	 * the frame is probably corrupt anyway
862	 */
863	if (rx_stats->rs_more)
864		return false;
865
866	/*
867	 * The rx_stats->rs_status will not be set until the end of the
868	 * chained descriptors so it can be ignored if rs_more is set. The
869	 * rs_more will be false at the last element of the chained
870	 * descriptors.
871	 */
872	if (rx_stats->rs_status != 0) {
873		if (rx_stats->rs_status & ATH9K_RXERR_CRC)
874			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
875		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
876			return false;
877
878		if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
879			*decrypt_error = true;
880		} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
881			/*
882			 * The MIC error bit is only valid if the frame
883			 * is not a control frame or fragment, and it was
884			 * decrypted using a valid TKIP key.
885			 */
886			if (!ieee80211_is_ctl(fc) &&
887			    !ieee80211_has_morefrags(fc) &&
888			    !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
889			    test_bit(rx_stats->rs_keyix, common->tkip_keymap))
890				rxs->flag |= RX_FLAG_MMIC_ERROR;
891			else
892				rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
893		}
894		/*
895		 * Reject error frames with the exception of
896		 * decryption and MIC failures. For monitor mode,
897		 * we also ignore the CRC error.
898		 */
899		if (ah->opmode == NL80211_IFTYPE_MONITOR) {
900			if (rx_stats->rs_status &
901			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
902			      ATH9K_RXERR_CRC))
903				return false;
904		} else {
905			if (rx_stats->rs_status &
906			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
907				return false;
908			}
909		}
910	}
911	return true;
912}
913
914static int ath9k_process_rate(struct ath_common *common,
915			      struct ieee80211_hw *hw,
916			      struct ath_rx_status *rx_stats,
917			      struct ieee80211_rx_status *rxs)
918{
919	struct ieee80211_supported_band *sband;
920	enum ieee80211_band band;
921	unsigned int i = 0;
922
923	band = hw->conf.channel->band;
924	sband = hw->wiphy->bands[band];
925
926	if (rx_stats->rs_rate & 0x80) {
927		/* HT rate */
928		rxs->flag |= RX_FLAG_HT;
929		if (rx_stats->rs_flags & ATH9K_RX_2040)
930			rxs->flag |= RX_FLAG_40MHZ;
931		if (rx_stats->rs_flags & ATH9K_RX_GI)
932			rxs->flag |= RX_FLAG_SHORT_GI;
933		rxs->rate_idx = rx_stats->rs_rate & 0x7f;
934		return 0;
935	}
936
937	for (i = 0; i < sband->n_bitrates; i++) {
938		if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
939			rxs->rate_idx = i;
940			return 0;
941		}
942		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
943			rxs->flag |= RX_FLAG_SHORTPRE;
944			rxs->rate_idx = i;
945			return 0;
946		}
947	}
948
949	/*
950	 * No valid hardware bitrate found -- we should not get here
951	 * because hardware has already validated this frame as OK.
952	 */
953	ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
954		  "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
955
956	return -EINVAL;
957}
958
959static void ath9k_process_rssi(struct ath_common *common,
960			       struct ieee80211_hw *hw,
961			       struct ieee80211_hdr *hdr,
962			       struct ath_rx_status *rx_stats)
963{
964	struct ath_hw *ah = common->ah;
965	struct ieee80211_sta *sta;
966	struct ath_node *an;
967	int last_rssi = ATH_RSSI_DUMMY_MARKER;
968	__le16 fc;
969
970	fc = hdr->frame_control;
971
972	rcu_read_lock();
973	/*
974	 * XXX: use ieee80211_find_sta! This requires quite a bit of work
975	 * under the current ath9k virtual wiphy implementation as we have
976	 * no way of tying a vif to wiphy. Typically vifs are attached to
977	 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
978	 * wiphy you'd have to iterate over every wiphy and each sdata.
979	 */
980	sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
981	if (sta) {
982		an = (struct ath_node *) sta->drv_priv;
983		if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
984		   !rx_stats->rs_moreaggr)
985			ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
986		last_rssi = an->last_rssi;
987	}
988	rcu_read_unlock();
989
990	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
991		rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
992					      ATH_RSSI_EP_MULTIPLIER);
993	if (rx_stats->rs_rssi < 0)
994		rx_stats->rs_rssi = 0;
995
996	/* Update Beacon RSSI, this is used by ANI. */
997	if (ieee80211_is_beacon(fc))
998		ah->stats.avgbrssi = rx_stats->rs_rssi;
999}
1000
1001/*
1002 * For Decrypt or Demic errors, we only mark packet status here and always push
1003 * up the frame up to let mac80211 handle the actual error case, be it no
1004 * decryption key or real decryption error. This let us keep statistics there.
1005 */
1006static int ath9k_rx_skb_preprocess(struct ath_common *common,
1007				   struct ieee80211_hw *hw,
1008				   struct ieee80211_hdr *hdr,
1009				   struct ath_rx_status *rx_stats,
1010				   struct ieee80211_rx_status *rx_status,
1011				   bool *decrypt_error)
1012{
1013	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
1014
1015	/*
1016	 * everything but the rate is checked here, the rate check is done
1017	 * separately to avoid doing two lookups for a rate for each frame.
1018	 */
1019	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
1020		return -EINVAL;
1021
1022	ath9k_process_rssi(common, hw, hdr, rx_stats);
1023
1024	if (ath9k_process_rate(common, hw, rx_stats, rx_status))
1025		return -EINVAL;
1026
1027	rx_status->band = hw->conf.channel->band;
1028	rx_status->freq = hw->conf.channel->center_freq;
1029	rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
1030	rx_status->antenna = rx_stats->rs_antenna;
1031	rx_status->flag |= RX_FLAG_TSFT;
1032
1033	return 0;
1034}
1035
1036static void ath9k_rx_skb_postprocess(struct ath_common *common,
1037				     struct sk_buff *skb,
1038				     struct ath_rx_status *rx_stats,
1039				     struct ieee80211_rx_status *rxs,
1040				     bool decrypt_error)
1041{
1042	struct ath_hw *ah = common->ah;
1043	struct ieee80211_hdr *hdr;
1044	int hdrlen, padpos, padsize;
1045	u8 keyix;
1046	__le16 fc;
1047
1048	/* see if any padding is done by the hw and remove it */
1049	hdr = (struct ieee80211_hdr *) skb->data;
1050	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1051	fc = hdr->frame_control;
1052	padpos = ath9k_cmn_padpos(hdr->frame_control);
1053
1054	/* The MAC header is padded to have 32-bit boundary if the
1055	 * packet payload is non-zero. The general calculation for
1056	 * padsize would take into account odd header lengths:
1057	 * padsize = (4 - padpos % 4) % 4; However, since only
1058	 * even-length headers are used, padding can only be 0 or 2
1059	 * bytes and we can optimize this a bit. In addition, we must
1060	 * not try to remove padding from short control frames that do
1061	 * not have payload. */
1062	padsize = padpos & 3;
1063	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1064		memmove(skb->data + padsize, skb->data, padpos);
1065		skb_pull(skb, padsize);
1066	}
1067
1068	keyix = rx_stats->rs_keyix;
1069
1070	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1071	    ieee80211_has_protected(fc)) {
1072		rxs->flag |= RX_FLAG_DECRYPTED;
1073	} else if (ieee80211_has_protected(fc)
1074		   && !decrypt_error && skb->len >= hdrlen + 4) {
1075		keyix = skb->data[hdrlen + 3] >> 6;
1076
1077		if (test_bit(keyix, common->keymap))
1078			rxs->flag |= RX_FLAG_DECRYPTED;
1079	}
1080	if (ah->sw_mgmt_crypto &&
1081	    (rxs->flag & RX_FLAG_DECRYPTED) &&
1082	    ieee80211_is_mgmt(fc))
1083		/* Use software decrypt for management frames. */
1084		rxs->flag &= ~RX_FLAG_DECRYPTED;
1085}
1086
1087static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1088				      struct ath_hw_antcomb_conf ant_conf,
1089				      int main_rssi_avg)
1090{
1091	antcomb->quick_scan_cnt = 0;
1092
1093	if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1094		antcomb->rssi_lna2 = main_rssi_avg;
1095	else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1096		antcomb->rssi_lna1 = main_rssi_avg;
1097
1098	switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1099	case (0x10): /* LNA2 A-B */
1100		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1101		antcomb->first_quick_scan_conf =
1102			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1103		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1104		break;
1105	case (0x20): /* LNA1 A-B */
1106		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1107		antcomb->first_quick_scan_conf =
1108			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1109		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1110		break;
1111	case (0x21): /* LNA1 LNA2 */
1112		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1113		antcomb->first_quick_scan_conf =
1114			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1115		antcomb->second_quick_scan_conf =
1116			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1117		break;
1118	case (0x12): /* LNA2 LNA1 */
1119		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1120		antcomb->first_quick_scan_conf =
1121			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1122		antcomb->second_quick_scan_conf =
1123			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1124		break;
1125	case (0x13): /* LNA2 A+B */
1126		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1127		antcomb->first_quick_scan_conf =
1128			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1129		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1130		break;
1131	case (0x23): /* LNA1 A+B */
1132		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1133		antcomb->first_quick_scan_conf =
1134			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1135		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1136		break;
1137	default:
1138		break;
1139	}
1140}
1141
1142static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1143				struct ath_hw_antcomb_conf *div_ant_conf,
1144				int main_rssi_avg, int alt_rssi_avg,
1145				int alt_ratio)
1146{
1147	/* alt_good */
1148	switch (antcomb->quick_scan_cnt) {
1149	case 0:
1150		/* set alt to main, and alt to first conf */
1151		div_ant_conf->main_lna_conf = antcomb->main_conf;
1152		div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1153		break;
1154	case 1:
1155		/* set alt to main, and alt to first conf */
1156		div_ant_conf->main_lna_conf = antcomb->main_conf;
1157		div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1158		antcomb->rssi_first = main_rssi_avg;
1159		antcomb->rssi_second = alt_rssi_avg;
1160
1161		if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1162			/* main is LNA1 */
1163			if (ath_is_alt_ant_ratio_better(alt_ratio,
1164						ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1165						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1166						main_rssi_avg, alt_rssi_avg,
1167						antcomb->total_pkt_count))
1168				antcomb->first_ratio = true;
1169			else
1170				antcomb->first_ratio = false;
1171		} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1172			if (ath_is_alt_ant_ratio_better(alt_ratio,
1173						ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1174						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1175						main_rssi_avg, alt_rssi_avg,
1176						antcomb->total_pkt_count))
1177				antcomb->first_ratio = true;
1178			else
1179				antcomb->first_ratio = false;
1180		} else {
1181			if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1182			    (alt_rssi_avg > main_rssi_avg +
1183			    ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1184			    (alt_rssi_avg > main_rssi_avg)) &&
1185			    (antcomb->total_pkt_count > 50))
1186				antcomb->first_ratio = true;
1187			else
1188				antcomb->first_ratio = false;
1189		}
1190		break;
1191	case 2:
1192		antcomb->alt_good = false;
1193		antcomb->scan_not_start = false;
1194		antcomb->scan = false;
1195		antcomb->rssi_first = main_rssi_avg;
1196		antcomb->rssi_third = alt_rssi_avg;
1197
1198		if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1199			antcomb->rssi_lna1 = alt_rssi_avg;
1200		else if (antcomb->second_quick_scan_conf ==
1201			 ATH_ANT_DIV_COMB_LNA2)
1202			antcomb->rssi_lna2 = alt_rssi_avg;
1203		else if (antcomb->second_quick_scan_conf ==
1204			 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1205			if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1206				antcomb->rssi_lna2 = main_rssi_avg;
1207			else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1208				antcomb->rssi_lna1 = main_rssi_avg;
1209		}
1210
1211		if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1212		    ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1213			div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1214		else
1215			div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1216
1217		if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1218			if (ath_is_alt_ant_ratio_better(alt_ratio,
1219						ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1220						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1221						main_rssi_avg, alt_rssi_avg,
1222						antcomb->total_pkt_count))
1223				antcomb->second_ratio = true;
1224			else
1225				antcomb->second_ratio = false;
1226		} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1227			if (ath_is_alt_ant_ratio_better(alt_ratio,
1228						ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1229						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1230						main_rssi_avg, alt_rssi_avg,
1231						antcomb->total_pkt_count))
1232				antcomb->second_ratio = true;
1233			else
1234				antcomb->second_ratio = false;
1235		} else {
1236			if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1237			    (alt_rssi_avg > main_rssi_avg +
1238			    ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1239			    (alt_rssi_avg > main_rssi_avg)) &&
1240			    (antcomb->total_pkt_count > 50))
1241				antcomb->second_ratio = true;
1242			else
1243				antcomb->second_ratio = false;
1244		}
1245
1246		/* set alt to the conf with maximun ratio */
1247		if (antcomb->first_ratio && antcomb->second_ratio) {
1248			if (antcomb->rssi_second > antcomb->rssi_third) {
1249				/* first alt*/
1250				if ((antcomb->first_quick_scan_conf ==
1251				    ATH_ANT_DIV_COMB_LNA1) ||
1252				    (antcomb->first_quick_scan_conf ==
1253				    ATH_ANT_DIV_COMB_LNA2))
1254					/* Set alt LNA1 or LNA2*/
1255					if (div_ant_conf->main_lna_conf ==
1256					    ATH_ANT_DIV_COMB_LNA2)
1257						div_ant_conf->alt_lna_conf =
1258							ATH_ANT_DIV_COMB_LNA1;
1259					else
1260						div_ant_conf->alt_lna_conf =
1261							ATH_ANT_DIV_COMB_LNA2;
1262				else
1263					/* Set alt to A+B or A-B */
1264					div_ant_conf->alt_lna_conf =
1265						antcomb->first_quick_scan_conf;
1266			} else if ((antcomb->second_quick_scan_conf ==
1267				   ATH_ANT_DIV_COMB_LNA1) ||
1268				   (antcomb->second_quick_scan_conf ==
1269				   ATH_ANT_DIV_COMB_LNA2)) {
1270				/* Set alt LNA1 or LNA2 */
1271				if (div_ant_conf->main_lna_conf ==
1272				    ATH_ANT_DIV_COMB_LNA2)
1273					div_ant_conf->alt_lna_conf =
1274						ATH_ANT_DIV_COMB_LNA1;
1275				else
1276					div_ant_conf->alt_lna_conf =
1277						ATH_ANT_DIV_COMB_LNA2;
1278			} else {
1279				/* Set alt to A+B or A-B */
1280				div_ant_conf->alt_lna_conf =
1281					antcomb->second_quick_scan_conf;
1282			}
1283		} else if (antcomb->first_ratio) {
1284			/* first alt */
1285			if ((antcomb->first_quick_scan_conf ==
1286			    ATH_ANT_DIV_COMB_LNA1) ||
1287			    (antcomb->first_quick_scan_conf ==
1288			    ATH_ANT_DIV_COMB_LNA2))
1289					/* Set alt LNA1 or LNA2 */
1290				if (div_ant_conf->main_lna_conf ==
1291				    ATH_ANT_DIV_COMB_LNA2)
1292					div_ant_conf->alt_lna_conf =
1293							ATH_ANT_DIV_COMB_LNA1;
1294				else
1295					div_ant_conf->alt_lna_conf =
1296							ATH_ANT_DIV_COMB_LNA2;
1297			else
1298				/* Set alt to A+B or A-B */
1299				div_ant_conf->alt_lna_conf =
1300						antcomb->first_quick_scan_conf;
1301		} else if (antcomb->second_ratio) {
1302				/* second alt */
1303			if ((antcomb->second_quick_scan_conf ==
1304			    ATH_ANT_DIV_COMB_LNA1) ||
1305			    (antcomb->second_quick_scan_conf ==
1306			    ATH_ANT_DIV_COMB_LNA2))
1307				/* Set alt LNA1 or LNA2 */
1308				if (div_ant_conf->main_lna_conf ==
1309				    ATH_ANT_DIV_COMB_LNA2)
1310					div_ant_conf->alt_lna_conf =
1311						ATH_ANT_DIV_COMB_LNA1;
1312				else
1313					div_ant_conf->alt_lna_conf =
1314						ATH_ANT_DIV_COMB_LNA2;
1315			else
1316				/* Set alt to A+B or A-B */
1317				div_ant_conf->alt_lna_conf =
1318						antcomb->second_quick_scan_conf;
1319		} else {
1320			/* main is largest */
1321			if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1322			    (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1323				/* Set alt LNA1 or LNA2 */
1324				if (div_ant_conf->main_lna_conf ==
1325				    ATH_ANT_DIV_COMB_LNA2)
1326					div_ant_conf->alt_lna_conf =
1327							ATH_ANT_DIV_COMB_LNA1;
1328				else
1329					div_ant_conf->alt_lna_conf =
1330							ATH_ANT_DIV_COMB_LNA2;
1331			else
1332				/* Set alt to A+B or A-B */
1333				div_ant_conf->alt_lna_conf = antcomb->main_conf;
1334		}
1335		break;
1336	default:
1337		break;
1338	}
1339}
1340
1341static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf)
1342{
1343	/* Adjust the fast_div_bias based on main and alt lna conf */
1344	switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) {
1345	case (0x01): /* A-B LNA2 */
1346		ant_conf->fast_div_bias = 0x3b;
1347		break;
1348	case (0x02): /* A-B LNA1 */
1349		ant_conf->fast_div_bias = 0x3d;
1350		break;
1351	case (0x03): /* A-B A+B */
1352		ant_conf->fast_div_bias = 0x1;
1353		break;
1354	case (0x10): /* LNA2 A-B */
1355		ant_conf->fast_div_bias = 0x7;
1356		break;
1357	case (0x12): /* LNA2 LNA1 */
1358		ant_conf->fast_div_bias = 0x2;
1359		break;
1360	case (0x13): /* LNA2 A+B */
1361		ant_conf->fast_div_bias = 0x7;
1362		break;
1363	case (0x20): /* LNA1 A-B */
1364		ant_conf->fast_div_bias = 0x6;
1365		break;
1366	case (0x21): /* LNA1 LNA2 */
1367		ant_conf->fast_div_bias = 0x0;
1368		break;
1369	case (0x23): /* LNA1 A+B */
1370		ant_conf->fast_div_bias = 0x6;
1371		break;
1372	case (0x30): /* A+B A-B */
1373		ant_conf->fast_div_bias = 0x1;
1374		break;
1375	case (0x31): /* A+B LNA2 */
1376		ant_conf->fast_div_bias = 0x3b;
1377		break;
1378	case (0x32): /* A+B LNA1 */
1379		ant_conf->fast_div_bias = 0x3d;
1380		break;
1381	default:
1382		break;
1383	}
1384}
1385
1386/* Antenna diversity and combining */
1387static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1388{
1389	struct ath_hw_antcomb_conf div_ant_conf;
1390	struct ath_ant_comb *antcomb = &sc->ant_comb;
1391	int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1392	int curr_main_set, curr_bias;
1393	int main_rssi = rs->rs_rssi_ctl0;
1394	int alt_rssi = rs->rs_rssi_ctl1;
1395	int rx_ant_conf,  main_ant_conf;
1396	bool short_scan = false;
1397
1398	rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1399		       ATH_ANT_RX_MASK;
1400	main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1401			 ATH_ANT_RX_MASK;
1402
1403	/* Record packet only when alt_rssi is positive */
1404	if (alt_rssi > 0) {
1405		antcomb->total_pkt_count++;
1406		antcomb->main_total_rssi += main_rssi;
1407		antcomb->alt_total_rssi  += alt_rssi;
1408		if (main_ant_conf == rx_ant_conf)
1409			antcomb->main_recv_cnt++;
1410		else
1411			antcomb->alt_recv_cnt++;
1412	}
1413
1414	/* Short scan check */
1415	if (antcomb->scan && antcomb->alt_good) {
1416		if (time_after(jiffies, antcomb->scan_start_time +
1417		    msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1418			short_scan = true;
1419		else
1420			if (antcomb->total_pkt_count ==
1421			    ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1422				alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1423					    antcomb->total_pkt_count);
1424				if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1425					short_scan = true;
1426			}
1427	}
1428
1429	if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1430	    rs->rs_moreaggr) && !short_scan)
1431		return;
1432
1433	if (antcomb->total_pkt_count) {
1434		alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1435			     antcomb->total_pkt_count);
1436		main_rssi_avg = (antcomb->main_total_rssi /
1437				 antcomb->total_pkt_count);
1438		alt_rssi_avg = (antcomb->alt_total_rssi /
1439				 antcomb->total_pkt_count);
1440	}
1441
1442
1443	ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1444	curr_alt_set = div_ant_conf.alt_lna_conf;
1445	curr_main_set = div_ant_conf.main_lna_conf;
1446	curr_bias = div_ant_conf.fast_div_bias;
1447
1448	antcomb->count++;
1449
1450	if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1451		if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1452			ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1453						  main_rssi_avg);
1454			antcomb->alt_good = true;
1455		} else {
1456			antcomb->alt_good = false;
1457		}
1458
1459		antcomb->count = 0;
1460		antcomb->scan = true;
1461		antcomb->scan_not_start = true;
1462	}
1463
1464	if (!antcomb->scan) {
1465		if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1466			if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1467				/* Switch main and alt LNA */
1468				div_ant_conf.main_lna_conf =
1469						ATH_ANT_DIV_COMB_LNA2;
1470				div_ant_conf.alt_lna_conf  =
1471						ATH_ANT_DIV_COMB_LNA1;
1472			} else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1473				div_ant_conf.main_lna_conf =
1474						ATH_ANT_DIV_COMB_LNA1;
1475				div_ant_conf.alt_lna_conf  =
1476						ATH_ANT_DIV_COMB_LNA2;
1477			}
1478
1479			goto div_comb_done;
1480		} else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1481			   (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1482			/* Set alt to another LNA */
1483			if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1484				div_ant_conf.alt_lna_conf =
1485						ATH_ANT_DIV_COMB_LNA1;
1486			else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1487				div_ant_conf.alt_lna_conf =
1488						ATH_ANT_DIV_COMB_LNA2;
1489
1490			goto div_comb_done;
1491		}
1492
1493		if ((alt_rssi_avg < (main_rssi_avg +
1494		    ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA)))
1495			goto div_comb_done;
1496	}
1497
1498	if (!antcomb->scan_not_start) {
1499		switch (curr_alt_set) {
1500		case ATH_ANT_DIV_COMB_LNA2:
1501			antcomb->rssi_lna2 = alt_rssi_avg;
1502			antcomb->rssi_lna1 = main_rssi_avg;
1503			antcomb->scan = true;
1504			/* set to A+B */
1505			div_ant_conf.main_lna_conf =
1506				ATH_ANT_DIV_COMB_LNA1;
1507			div_ant_conf.alt_lna_conf  =
1508				ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1509			break;
1510		case ATH_ANT_DIV_COMB_LNA1:
1511			antcomb->rssi_lna1 = alt_rssi_avg;
1512			antcomb->rssi_lna2 = main_rssi_avg;
1513			antcomb->scan = true;
1514			/* set to A+B */
1515			div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1516			div_ant_conf.alt_lna_conf  =
1517				ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1518			break;
1519		case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1520			antcomb->rssi_add = alt_rssi_avg;
1521			antcomb->scan = true;
1522			/* set to A-B */
1523			div_ant_conf.alt_lna_conf =
1524				ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1525			break;
1526		case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1527			antcomb->rssi_sub = alt_rssi_avg;
1528			antcomb->scan = false;
1529			if (antcomb->rssi_lna2 >
1530			    (antcomb->rssi_lna1 +
1531			    ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1532				/* use LNA2 as main LNA */
1533				if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1534				    (antcomb->rssi_add > antcomb->rssi_sub)) {
1535					/* set to A+B */
1536					div_ant_conf.main_lna_conf =
1537						ATH_ANT_DIV_COMB_LNA2;
1538					div_ant_conf.alt_lna_conf  =
1539						ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1540				} else if (antcomb->rssi_sub >
1541					   antcomb->rssi_lna1) {
1542					/* set to A-B */
1543					div_ant_conf.main_lna_conf =
1544						ATH_ANT_DIV_COMB_LNA2;
1545					div_ant_conf.alt_lna_conf =
1546						ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1547				} else {
1548					/* set to LNA1 */
1549					div_ant_conf.main_lna_conf =
1550						ATH_ANT_DIV_COMB_LNA2;
1551					div_ant_conf.alt_lna_conf =
1552						ATH_ANT_DIV_COMB_LNA1;
1553				}
1554			} else {
1555				/* use LNA1 as main LNA */
1556				if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1557				    (antcomb->rssi_add > antcomb->rssi_sub)) {
1558					/* set to A+B */
1559					div_ant_conf.main_lna_conf =
1560						ATH_ANT_DIV_COMB_LNA1;
1561					div_ant_conf.alt_lna_conf  =
1562						ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1563				} else if (antcomb->rssi_sub >
1564					   antcomb->rssi_lna1) {
1565					/* set to A-B */
1566					div_ant_conf.main_lna_conf =
1567						ATH_ANT_DIV_COMB_LNA1;
1568					div_ant_conf.alt_lna_conf =
1569						ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1570				} else {
1571					/* set to LNA2 */
1572					div_ant_conf.main_lna_conf =
1573						ATH_ANT_DIV_COMB_LNA1;
1574					div_ant_conf.alt_lna_conf =
1575						ATH_ANT_DIV_COMB_LNA2;
1576				}
1577			}
1578			break;
1579		default:
1580			break;
1581		}
1582	} else {
1583		if (!antcomb->alt_good) {
1584			antcomb->scan_not_start = false;
1585			/* Set alt to another LNA */
1586			if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1587				div_ant_conf.main_lna_conf =
1588						ATH_ANT_DIV_COMB_LNA2;
1589				div_ant_conf.alt_lna_conf =
1590						ATH_ANT_DIV_COMB_LNA1;
1591			} else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1592				div_ant_conf.main_lna_conf =
1593						ATH_ANT_DIV_COMB_LNA1;
1594				div_ant_conf.alt_lna_conf =
1595						ATH_ANT_DIV_COMB_LNA2;
1596			}
1597			goto div_comb_done;
1598		}
1599	}
1600
1601	ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1602					   main_rssi_avg, alt_rssi_avg,
1603					   alt_ratio);
1604
1605	antcomb->quick_scan_cnt++;
1606
1607div_comb_done:
1608	ath_ant_div_conf_fast_divbias(&div_ant_conf);
1609
1610	ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1611
1612	antcomb->scan_start_time = jiffies;
1613	antcomb->total_pkt_count = 0;
1614	antcomb->main_total_rssi = 0;
1615	antcomb->alt_total_rssi = 0;
1616	antcomb->main_recv_cnt = 0;
1617	antcomb->alt_recv_cnt = 0;
1618}
1619
1620int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1621{
1622	struct ath_buf *bf;
1623	struct sk_buff *skb = NULL, *requeue_skb;
1624	struct ieee80211_rx_status *rxs;
1625	struct ath_hw *ah = sc->sc_ah;
1626	struct ath_common *common = ath9k_hw_common(ah);
1627	/*
1628	 * The hw can techncically differ from common->hw when using ath9k
1629	 * virtual wiphy so to account for that we iterate over the active
1630	 * wiphys and find the appropriate wiphy and therefore hw.
1631	 */
1632	struct ieee80211_hw *hw = NULL;
1633	struct ieee80211_hdr *hdr;
1634	int retval;
1635	bool decrypt_error = false;
1636	struct ath_rx_status rs;
1637	enum ath9k_rx_qtype qtype;
1638	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1639	int dma_type;
1640	u8 rx_status_len = ah->caps.rx_status_len;
1641	u64 tsf = 0;
1642	u32 tsf_lower = 0;
1643	unsigned long flags;
1644
1645	if (edma)
1646		dma_type = DMA_BIDIRECTIONAL;
1647	else
1648		dma_type = DMA_FROM_DEVICE;
1649
1650	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1651	spin_lock_bh(&sc->rx.rxbuflock);
1652
1653	tsf = ath9k_hw_gettsf64(ah);
1654	tsf_lower = tsf & 0xffffffff;
1655
1656	do {
1657		/* If handling rx interrupt and flush is in progress => exit */
1658		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
1659			break;
1660
1661		memset(&rs, 0, sizeof(rs));
1662		if (edma)
1663			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1664		else
1665			bf = ath_get_next_rx_buf(sc, &rs);
1666
1667		if (!bf)
1668			break;
1669
1670		skb = bf->bf_mpdu;
1671		if (!skb)
1672			continue;
1673
1674		hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
1675		rxs =  IEEE80211_SKB_RXCB(skb);
1676
1677		hw = ath_get_virt_hw(sc, hdr);
1678
1679		ath_debug_stat_rx(sc, &rs);
1680
1681		/*
1682		 * If we're asked to flush receive queue, directly
1683		 * chain it back at the queue without processing it.
1684		 */
1685		if (flush)
1686			goto requeue;
1687
1688		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1689						 rxs, &decrypt_error);
1690		if (retval)
1691			goto requeue;
1692
1693		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1694		if (rs.rs_tstamp > tsf_lower &&
1695		    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1696			rxs->mactime -= 0x100000000ULL;
1697
1698		if (rs.rs_tstamp < tsf_lower &&
1699		    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1700			rxs->mactime += 0x100000000ULL;
1701
1702		/* Ensure we always have an skb to requeue once we are done
1703		 * processing the current buffer's skb */
1704		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1705
1706		/* If there is no memory we ignore the current RX'd frame,
1707		 * tell hardware it can give us a new frame using the old
1708		 * skb and put it at the tail of the sc->rx.rxbuf list for
1709		 * processing. */
1710		if (!requeue_skb)
1711			goto requeue;
1712
1713		/* Unmap the frame */
1714		dma_unmap_single(sc->dev, bf->bf_buf_addr,
1715				 common->rx_bufsize,
1716				 dma_type);
1717
1718		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1719		if (ah->caps.rx_status_len)
1720			skb_pull(skb, ah->caps.rx_status_len);
1721
1722		ath9k_rx_skb_postprocess(common, skb, &rs,
1723					 rxs, decrypt_error);
1724
1725		/* We will now give hardware our shiny new allocated skb */
1726		bf->bf_mpdu = requeue_skb;
1727		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1728						 common->rx_bufsize,
1729						 dma_type);
1730		if (unlikely(dma_mapping_error(sc->dev,
1731			  bf->bf_buf_addr))) {
1732			dev_kfree_skb_any(requeue_skb);
1733			bf->bf_mpdu = NULL;
1734			ath_print(common, ATH_DBG_FATAL,
1735				  "dma_mapping_error() on RX\n");
1736			ath_rx_send_to_mac80211(hw, sc, skb, rxs);
1737			break;
1738		}
1739		bf->bf_dmacontext = bf->bf_buf_addr;
1740
1741		/*
1742		 * change the default rx antenna if rx diversity chooses the
1743		 * other antenna 3 times in a row.
1744		 */
1745		if (sc->rx.defant != rs.rs_antenna) {
1746			if (++sc->rx.rxotherant >= 3)
1747				ath_setdefantenna(sc, rs.rs_antenna);
1748		} else {
1749			sc->rx.rxotherant = 0;
1750		}
1751
1752		spin_lock_irqsave(&sc->sc_pm_lock, flags);
1753		if (unlikely(ath9k_check_auto_sleep(sc) ||
1754			     (sc->ps_flags & (PS_WAIT_FOR_BEACON |
1755					      PS_WAIT_FOR_CAB |
1756					      PS_WAIT_FOR_PSPOLL_DATA))))
1757			ath_rx_ps(sc, skb);
1758		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1759
1760		if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1761			ath_ant_comb_scan(sc, &rs);
1762
1763		ath_rx_send_to_mac80211(hw, sc, skb, rxs);
1764
1765requeue:
1766		if (edma) {
1767			list_add_tail(&bf->list, &sc->rx.rxbuf);
1768			ath_rx_edma_buf_link(sc, qtype);
1769		} else {
1770			list_move_tail(&bf->list, &sc->rx.rxbuf);
1771			ath_rx_buf_link(sc, bf);
1772		}
1773	} while (1);
1774
1775	spin_unlock_bh(&sc->rx.rxbuflock);
1776
1777	return 0;
1778}
1779