1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/dma-mapping.h>
18#include "ath9k.h"
19#include "ar9003_mac.h"
20
21#define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))
22
23static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
24					       int mindelta, int main_rssi_avg,
25					       int alt_rssi_avg, int pkt_count)
26{
27	return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
28		(alt_rssi_avg > main_rssi_avg + maxdelta)) ||
29		(alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
30}
31
32static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
33					int curr_main_set, int curr_alt_set,
34					int alt_rssi_avg, int main_rssi_avg)
35{
36	bool result = false;
37	switch (div_group) {
38	case 0:
39		if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
40			result = true;
41		break;
42	case 1:
43	case 2:
44		if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
45			(curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
46				(alt_rssi_avg >= (main_rssi_avg - 5))) ||
47			((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
48			(curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
49				(alt_rssi_avg >= (main_rssi_avg - 2)))) &&
50							(alt_rssi_avg >= 4))
51			result = true;
52		else
53			result = false;
54		break;
55	}
56
57	return result;
58}
59
60static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
61{
62	return sc->ps_enabled &&
63	       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
64}
65
66/*
67 * Setup and link descriptors.
68 *
69 * 11N: we can no longer afford to self link the last descriptor.
70 * MAC acknowledges BA status as long as it copies frames to host
71 * buffer (or rx fifo). This can incorrectly acknowledge packets
72 * to a sender if last desc is self-linked.
73 */
74static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
75{
76	struct ath_hw *ah = sc->sc_ah;
77	struct ath_common *common = ath9k_hw_common(ah);
78	struct ath_desc *ds;
79	struct sk_buff *skb;
80
81	ATH_RXBUF_RESET(bf);
82
83	ds = bf->bf_desc;
84	ds->ds_link = 0; /* link to null */
85	ds->ds_data = bf->bf_buf_addr;
86
87	/* virtual addr of the beginning of the buffer. */
88	skb = bf->bf_mpdu;
89	BUG_ON(skb == NULL);
90	ds->ds_vdata = skb->data;
91
92	/*
93	 * setup rx descriptors. The rx_bufsize here tells the hardware
94	 * how much data it can DMA to us and that we are prepared
95	 * to process
96	 */
97	ath9k_hw_setuprxdesc(ah, ds,
98			     common->rx_bufsize,
99			     0);
100
101	if (sc->rx.rxlink == NULL)
102		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
103	else
104		*sc->rx.rxlink = bf->bf_daddr;
105
106	sc->rx.rxlink = &ds->ds_link;
107}
108
109static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
110{
111	/* XXX block beacon interrupts */
112	ath9k_hw_setantenna(sc->sc_ah, antenna);
113	sc->rx.defant = antenna;
114	sc->rx.rxotherant = 0;
115}
116
117static void ath_opmode_init(struct ath_softc *sc)
118{
119	struct ath_hw *ah = sc->sc_ah;
120	struct ath_common *common = ath9k_hw_common(ah);
121
122	u32 rfilt, mfilt[2];
123
124	/* configure rx filter */
125	rfilt = ath_calcrxfilter(sc);
126	ath9k_hw_setrxfilter(ah, rfilt);
127
128	/* configure bssid mask */
129	ath_hw_setbssidmask(common);
130
131	/* configure operational mode */
132	ath9k_hw_setopmode(ah);
133
134	/* calculate and install multicast filter */
135	mfilt[0] = mfilt[1] = ~0;
136	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
137}
138
139static bool ath_rx_edma_buf_link(struct ath_softc *sc,
140				 enum ath9k_rx_qtype qtype)
141{
142	struct ath_hw *ah = sc->sc_ah;
143	struct ath_rx_edma *rx_edma;
144	struct sk_buff *skb;
145	struct ath_buf *bf;
146
147	rx_edma = &sc->rx.rx_edma[qtype];
148	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
149		return false;
150
151	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
152	list_del_init(&bf->list);
153
154	skb = bf->bf_mpdu;
155
156	ATH_RXBUF_RESET(bf);
157	memset(skb->data, 0, ah->caps.rx_status_len);
158	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
159				ah->caps.rx_status_len, DMA_TO_DEVICE);
160
161	SKB_CB_ATHBUF(skb) = bf;
162	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
163	skb_queue_tail(&rx_edma->rx_fifo, skb);
164
165	return true;
166}
167
168static void ath_rx_addbuffer_edma(struct ath_softc *sc,
169				  enum ath9k_rx_qtype qtype, int size)
170{
171	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
172	struct ath_buf *bf, *tbf;
173
174	if (list_empty(&sc->rx.rxbuf)) {
175		ath_dbg(common, QUEUE, "No free rx buf available\n");
176		return;
177	}
178
179	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
180		if (!ath_rx_edma_buf_link(sc, qtype))
181			break;
182
183}
184
185static void ath_rx_remove_buffer(struct ath_softc *sc,
186				 enum ath9k_rx_qtype qtype)
187{
188	struct ath_buf *bf;
189	struct ath_rx_edma *rx_edma;
190	struct sk_buff *skb;
191
192	rx_edma = &sc->rx.rx_edma[qtype];
193
194	while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
195		bf = SKB_CB_ATHBUF(skb);
196		BUG_ON(!bf);
197		list_add_tail(&bf->list, &sc->rx.rxbuf);
198	}
199}
200
201static void ath_rx_edma_cleanup(struct ath_softc *sc)
202{
203	struct ath_hw *ah = sc->sc_ah;
204	struct ath_common *common = ath9k_hw_common(ah);
205	struct ath_buf *bf;
206
207	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
208	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
209
210	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
211		if (bf->bf_mpdu) {
212			dma_unmap_single(sc->dev, bf->bf_buf_addr,
213					common->rx_bufsize,
214					DMA_BIDIRECTIONAL);
215			dev_kfree_skb_any(bf->bf_mpdu);
216			bf->bf_buf_addr = 0;
217			bf->bf_mpdu = NULL;
218		}
219	}
220
221	INIT_LIST_HEAD(&sc->rx.rxbuf);
222
223	kfree(sc->rx.rx_bufptr);
224	sc->rx.rx_bufptr = NULL;
225}
226
227static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
228{
229	skb_queue_head_init(&rx_edma->rx_fifo);
230	rx_edma->rx_fifo_hwsize = size;
231}
232
233static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
234{
235	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
236	struct ath_hw *ah = sc->sc_ah;
237	struct sk_buff *skb;
238	struct ath_buf *bf;
239	int error = 0, i;
240	u32 size;
241
242	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
243				    ah->caps.rx_status_len);
244
245	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
246			       ah->caps.rx_lp_qdepth);
247	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
248			       ah->caps.rx_hp_qdepth);
249
250	size = sizeof(struct ath_buf) * nbufs;
251	bf = kzalloc(size, GFP_KERNEL);
252	if (!bf)
253		return -ENOMEM;
254
255	INIT_LIST_HEAD(&sc->rx.rxbuf);
256	sc->rx.rx_bufptr = bf;
257
258	for (i = 0; i < nbufs; i++, bf++) {
259		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
260		if (!skb) {
261			error = -ENOMEM;
262			goto rx_init_fail;
263		}
264
265		memset(skb->data, 0, common->rx_bufsize);
266		bf->bf_mpdu = skb;
267
268		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
269						 common->rx_bufsize,
270						 DMA_BIDIRECTIONAL);
271		if (unlikely(dma_mapping_error(sc->dev,
272						bf->bf_buf_addr))) {
273				dev_kfree_skb_any(skb);
274				bf->bf_mpdu = NULL;
275				bf->bf_buf_addr = 0;
276				ath_err(common,
277					"dma_mapping_error() on RX init\n");
278				error = -ENOMEM;
279				goto rx_init_fail;
280		}
281
282		list_add_tail(&bf->list, &sc->rx.rxbuf);
283	}
284
285	return 0;
286
287rx_init_fail:
288	ath_rx_edma_cleanup(sc);
289	return error;
290}
291
292static void ath_edma_start_recv(struct ath_softc *sc)
293{
294	spin_lock_bh(&sc->rx.rxbuflock);
295
296	ath9k_hw_rxena(sc->sc_ah);
297
298	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
299			      sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
300
301	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
302			      sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
303
304	ath_opmode_init(sc);
305
306	ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
307
308	spin_unlock_bh(&sc->rx.rxbuflock);
309}
310
311static void ath_edma_stop_recv(struct ath_softc *sc)
312{
313	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
314	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
315}
316
317int ath_rx_init(struct ath_softc *sc, int nbufs)
318{
319	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
320	struct sk_buff *skb;
321	struct ath_buf *bf;
322	int error = 0;
323
324	spin_lock_init(&sc->sc_pcu_lock);
325	sc->sc_flags &= ~SC_OP_RXFLUSH;
326	spin_lock_init(&sc->rx.rxbuflock);
327
328	common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
329			     sc->sc_ah->caps.rx_status_len;
330
331	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
332		return ath_rx_edma_init(sc, nbufs);
333	} else {
334		ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
335			common->cachelsz, common->rx_bufsize);
336
337		/* Initialize rx descriptors */
338
339		error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
340				"rx", nbufs, 1, 0);
341		if (error != 0) {
342			ath_err(common,
343				"failed to allocate rx descriptors: %d\n",
344				error);
345			goto err;
346		}
347
348		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
349			skb = ath_rxbuf_alloc(common, common->rx_bufsize,
350					      GFP_KERNEL);
351			if (skb == NULL) {
352				error = -ENOMEM;
353				goto err;
354			}
355
356			bf->bf_mpdu = skb;
357			bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
358					common->rx_bufsize,
359					DMA_FROM_DEVICE);
360			if (unlikely(dma_mapping_error(sc->dev,
361							bf->bf_buf_addr))) {
362				dev_kfree_skb_any(skb);
363				bf->bf_mpdu = NULL;
364				bf->bf_buf_addr = 0;
365				ath_err(common,
366					"dma_mapping_error() on RX init\n");
367				error = -ENOMEM;
368				goto err;
369			}
370		}
371		sc->rx.rxlink = NULL;
372	}
373
374err:
375	if (error)
376		ath_rx_cleanup(sc);
377
378	return error;
379}
380
381void ath_rx_cleanup(struct ath_softc *sc)
382{
383	struct ath_hw *ah = sc->sc_ah;
384	struct ath_common *common = ath9k_hw_common(ah);
385	struct sk_buff *skb;
386	struct ath_buf *bf;
387
388	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
389		ath_rx_edma_cleanup(sc);
390		return;
391	} else {
392		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
393			skb = bf->bf_mpdu;
394			if (skb) {
395				dma_unmap_single(sc->dev, bf->bf_buf_addr,
396						common->rx_bufsize,
397						DMA_FROM_DEVICE);
398				dev_kfree_skb(skb);
399				bf->bf_buf_addr = 0;
400				bf->bf_mpdu = NULL;
401			}
402		}
403
404		if (sc->rx.rxdma.dd_desc_len != 0)
405			ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
406	}
407}
408
409/*
410 * Calculate the receive filter according to the
411 * operating mode and state:
412 *
413 * o always accept unicast, broadcast, and multicast traffic
414 * o maintain current state of phy error reception (the hal
415 *   may enable phy error frames for noise immunity work)
416 * o probe request frames are accepted only when operating in
417 *   hostap, adhoc, or monitor modes
418 * o enable promiscuous mode according to the interface state
419 * o accept beacons:
420 *   - when operating in adhoc mode so the 802.11 layer creates
421 *     node table entries for peers,
422 *   - when operating in station mode for collecting rssi data when
423 *     the station is otherwise quiet, or
424 *   - when operating as a repeater so we see repeater-sta beacons
425 *   - when scanning
426 */
427
428u32 ath_calcrxfilter(struct ath_softc *sc)
429{
430	u32 rfilt;
431
432	rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
433		| ATH9K_RX_FILTER_MCAST;
434
435	if (sc->rx.rxfilter & FIF_PROBE_REQ)
436		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
437
438	/*
439	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
440	 * mode interface or when in monitor mode. AP mode does not need this
441	 * since it receives all in-BSS frames anyway.
442	 */
443	if (sc->sc_ah->is_monitoring)
444		rfilt |= ATH9K_RX_FILTER_PROM;
445
446	if (sc->rx.rxfilter & FIF_CONTROL)
447		rfilt |= ATH9K_RX_FILTER_CONTROL;
448
449	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
450	    (sc->nvifs <= 1) &&
451	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
452		rfilt |= ATH9K_RX_FILTER_MYBEACON;
453	else
454		rfilt |= ATH9K_RX_FILTER_BEACON;
455
456	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
457	    (sc->rx.rxfilter & FIF_PSPOLL))
458		rfilt |= ATH9K_RX_FILTER_PSPOLL;
459
460	if (conf_is_ht(&sc->hw->conf))
461		rfilt |= ATH9K_RX_FILTER_COMP_BAR;
462
463	if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
464		/* The following may also be needed for other older chips */
465		if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
466			rfilt |= ATH9K_RX_FILTER_PROM;
467		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
468	}
469
470	return rfilt;
471
472}
473
474int ath_startrecv(struct ath_softc *sc)
475{
476	struct ath_hw *ah = sc->sc_ah;
477	struct ath_buf *bf, *tbf;
478
479	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
480		ath_edma_start_recv(sc);
481		return 0;
482	}
483
484	spin_lock_bh(&sc->rx.rxbuflock);
485	if (list_empty(&sc->rx.rxbuf))
486		goto start_recv;
487
488	sc->rx.rxlink = NULL;
489	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
490		ath_rx_buf_link(sc, bf);
491	}
492
493	/* We could have deleted elements so the list may be empty now */
494	if (list_empty(&sc->rx.rxbuf))
495		goto start_recv;
496
497	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
498	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
499	ath9k_hw_rxena(ah);
500
501start_recv:
502	ath_opmode_init(sc);
503	ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
504
505	spin_unlock_bh(&sc->rx.rxbuflock);
506
507	return 0;
508}
509
510bool ath_stoprecv(struct ath_softc *sc)
511{
512	struct ath_hw *ah = sc->sc_ah;
513	bool stopped, reset = false;
514
515	spin_lock_bh(&sc->rx.rxbuflock);
516	ath9k_hw_abortpcurecv(ah);
517	ath9k_hw_setrxfilter(ah, 0);
518	stopped = ath9k_hw_stopdmarecv(ah, &reset);
519
520	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
521		ath_edma_stop_recv(sc);
522	else
523		sc->rx.rxlink = NULL;
524	spin_unlock_bh(&sc->rx.rxbuflock);
525
526	if (!(ah->ah_flags & AH_UNPLUGGED) &&
527	    unlikely(!stopped)) {
528		ath_err(ath9k_hw_common(sc->sc_ah),
529			"Could not stop RX, we could be "
530			"confusing the DMA engine when we start RX up\n");
531		ATH_DBG_WARN_ON_ONCE(!stopped);
532	}
533	return stopped && !reset;
534}
535
536void ath_flushrecv(struct ath_softc *sc)
537{
538	sc->sc_flags |= SC_OP_RXFLUSH;
539	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
540		ath_rx_tasklet(sc, 1, true);
541	ath_rx_tasklet(sc, 1, false);
542	sc->sc_flags &= ~SC_OP_RXFLUSH;
543}
544
545static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
546{
547	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
548	struct ieee80211_mgmt *mgmt;
549	u8 *pos, *end, id, elen;
550	struct ieee80211_tim_ie *tim;
551
552	mgmt = (struct ieee80211_mgmt *)skb->data;
553	pos = mgmt->u.beacon.variable;
554	end = skb->data + skb->len;
555
556	while (pos + 2 < end) {
557		id = *pos++;
558		elen = *pos++;
559		if (pos + elen > end)
560			break;
561
562		if (id == WLAN_EID_TIM) {
563			if (elen < sizeof(*tim))
564				break;
565			tim = (struct ieee80211_tim_ie *) pos;
566			if (tim->dtim_count != 0)
567				break;
568			return tim->bitmap_ctrl & 0x01;
569		}
570
571		pos += elen;
572	}
573
574	return false;
575}
576
577static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
578{
579	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
580
581	if (skb->len < 24 + 8 + 2 + 2)
582		return;
583
584	sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
585
586	if (sc->ps_flags & PS_BEACON_SYNC) {
587		sc->ps_flags &= ~PS_BEACON_SYNC;
588		ath_dbg(common, PS,
589			"Reconfigure Beacon timers based on timestamp from the AP\n");
590		ath_set_beacon(sc);
591	}
592
593	if (ath_beacon_dtim_pending_cab(skb)) {
594		/*
595		 * Remain awake waiting for buffered broadcast/multicast
596		 * frames. If the last broadcast/multicast frame is not
597		 * received properly, the next beacon frame will work as
598		 * a backup trigger for returning into NETWORK SLEEP state,
599		 * so we are waiting for it as well.
600		 */
601		ath_dbg(common, PS,
602			"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
603		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
604		return;
605	}
606
607	if (sc->ps_flags & PS_WAIT_FOR_CAB) {
608		/*
609		 * This can happen if a broadcast frame is dropped or the AP
610		 * fails to send a frame indicating that all CAB frames have
611		 * been delivered.
612		 */
613		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
614		ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
615	}
616}
617
618static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
619{
620	struct ieee80211_hdr *hdr;
621	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
622
623	hdr = (struct ieee80211_hdr *)skb->data;
624
625	/* Process Beacon and CAB receive in PS state */
626	if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
627	    && mybeacon)
628		ath_rx_ps_beacon(sc, skb);
629	else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
630		 (ieee80211_is_data(hdr->frame_control) ||
631		  ieee80211_is_action(hdr->frame_control)) &&
632		 is_multicast_ether_addr(hdr->addr1) &&
633		 !ieee80211_has_moredata(hdr->frame_control)) {
634		/*
635		 * No more broadcast/multicast frames to be received at this
636		 * point.
637		 */
638		sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
639		ath_dbg(common, PS,
640			"All PS CAB frames received, back to sleep\n");
641	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
642		   !is_multicast_ether_addr(hdr->addr1) &&
643		   !ieee80211_has_morefrags(hdr->frame_control)) {
644		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
645		ath_dbg(common, PS,
646			"Going back to sleep after having received PS-Poll data (0x%lx)\n",
647			sc->ps_flags & (PS_WAIT_FOR_BEACON |
648					PS_WAIT_FOR_CAB |
649					PS_WAIT_FOR_PSPOLL_DATA |
650					PS_WAIT_FOR_TX_ACK));
651	}
652}
653
654static bool ath_edma_get_buffers(struct ath_softc *sc,
655				 enum ath9k_rx_qtype qtype,
656				 struct ath_rx_status *rs,
657				 struct ath_buf **dest)
658{
659	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
660	struct ath_hw *ah = sc->sc_ah;
661	struct ath_common *common = ath9k_hw_common(ah);
662	struct sk_buff *skb;
663	struct ath_buf *bf;
664	int ret;
665
666	skb = skb_peek(&rx_edma->rx_fifo);
667	if (!skb)
668		return false;
669
670	bf = SKB_CB_ATHBUF(skb);
671	BUG_ON(!bf);
672
673	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
674				common->rx_bufsize, DMA_FROM_DEVICE);
675
676	ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
677	if (ret == -EINPROGRESS) {
678		/*let device gain the buffer again*/
679		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
680				common->rx_bufsize, DMA_FROM_DEVICE);
681		return false;
682	}
683
684	__skb_unlink(skb, &rx_edma->rx_fifo);
685	if (ret == -EINVAL) {
686		/* corrupt descriptor, skip this one and the following one */
687		list_add_tail(&bf->list, &sc->rx.rxbuf);
688		ath_rx_edma_buf_link(sc, qtype);
689
690		skb = skb_peek(&rx_edma->rx_fifo);
691		if (skb) {
692			bf = SKB_CB_ATHBUF(skb);
693			BUG_ON(!bf);
694
695			__skb_unlink(skb, &rx_edma->rx_fifo);
696			list_add_tail(&bf->list, &sc->rx.rxbuf);
697			ath_rx_edma_buf_link(sc, qtype);
698		}
699
700		bf = NULL;
701	}
702
703	*dest = bf;
704	return true;
705}
706
707static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
708						struct ath_rx_status *rs,
709						enum ath9k_rx_qtype qtype)
710{
711	struct ath_buf *bf = NULL;
712
713	while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
714		if (!bf)
715			continue;
716
717		return bf;
718	}
719	return NULL;
720}
721
722static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
723					   struct ath_rx_status *rs)
724{
725	struct ath_hw *ah = sc->sc_ah;
726	struct ath_common *common = ath9k_hw_common(ah);
727	struct ath_desc *ds;
728	struct ath_buf *bf;
729	int ret;
730
731	if (list_empty(&sc->rx.rxbuf)) {
732		sc->rx.rxlink = NULL;
733		return NULL;
734	}
735
736	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
737	ds = bf->bf_desc;
738
739	/*
740	 * Must provide the virtual address of the current
741	 * descriptor, the physical address, and the virtual
742	 * address of the next descriptor in the h/w chain.
743	 * This allows the HAL to look ahead to see if the
744	 * hardware is done with a descriptor by checking the
745	 * done bit in the following descriptor and the address
746	 * of the current descriptor the DMA engine is working
747	 * on.  All this is necessary because of our use of
748	 * a self-linked list to avoid rx overruns.
749	 */
750	ret = ath9k_hw_rxprocdesc(ah, ds, rs);
751	if (ret == -EINPROGRESS) {
752		struct ath_rx_status trs;
753		struct ath_buf *tbf;
754		struct ath_desc *tds;
755
756		memset(&trs, 0, sizeof(trs));
757		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
758			sc->rx.rxlink = NULL;
759			return NULL;
760		}
761
762		tbf = list_entry(bf->list.next, struct ath_buf, list);
763
764		/*
765		 * On some hardware the descriptor status words could
766		 * get corrupted, including the done bit. Because of
767		 * this, check if the next descriptor's done bit is
768		 * set or not.
769		 *
770		 * If the next descriptor's done bit is set, the current
771		 * descriptor has been corrupted. Force s/w to discard
772		 * this descriptor and continue...
773		 */
774
775		tds = tbf->bf_desc;
776		ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
777		if (ret == -EINPROGRESS)
778			return NULL;
779	}
780
781	if (!bf->bf_mpdu)
782		return bf;
783
784	/*
785	 * Synchronize the DMA transfer with CPU before
786	 * 1. accessing the frame
787	 * 2. requeueing the same buffer to h/w
788	 */
789	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
790			common->rx_bufsize,
791			DMA_FROM_DEVICE);
792
793	return bf;
794}
795
796/* Assumes you've already done the endian to CPU conversion */
797static bool ath9k_rx_accept(struct ath_common *common,
798			    struct ieee80211_hdr *hdr,
799			    struct ieee80211_rx_status *rxs,
800			    struct ath_rx_status *rx_stats,
801			    bool *decrypt_error)
802{
803	struct ath_softc *sc = (struct ath_softc *) common->priv;
804	bool is_mc, is_valid_tkip, strip_mic, mic_error;
805	struct ath_hw *ah = common->ah;
806	__le16 fc;
807	u8 rx_status_len = ah->caps.rx_status_len;
808
809	fc = hdr->frame_control;
810
811	is_mc = !!is_multicast_ether_addr(hdr->addr1);
812	is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
813		test_bit(rx_stats->rs_keyix, common->tkip_keymap);
814	strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
815		!(rx_stats->rs_status &
816		(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
817		 ATH9K_RXERR_KEYMISS));
818
819	/*
820	 * Key miss events are only relevant for pairwise keys where the
821	 * descriptor does contain a valid key index. This has been observed
822	 * mostly with CCMP encryption.
823	 */
824	if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
825	    !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
826		rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
827
828	if (!rx_stats->rs_datalen)
829		return false;
830        /*
831         * rs_status follows rs_datalen so if rs_datalen is too large
832         * we can take a hint that hardware corrupted it, so ignore
833         * those frames.
834         */
835	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
836		return false;
837
838	/* Only use error bits from the last fragment */
839	if (rx_stats->rs_more)
840		return true;
841
842	mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
843		!ieee80211_has_morefrags(fc) &&
844		!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
845		(rx_stats->rs_status & ATH9K_RXERR_MIC);
846
847	/*
848	 * The rx_stats->rs_status will not be set until the end of the
849	 * chained descriptors so it can be ignored if rs_more is set. The
850	 * rs_more will be false at the last element of the chained
851	 * descriptors.
852	 */
853	if (rx_stats->rs_status != 0) {
854		u8 status_mask;
855
856		if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
857			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
858			mic_error = false;
859		}
860		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
861			return false;
862
863		if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
864		    (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
865			*decrypt_error = true;
866			mic_error = false;
867		}
868
869		/*
870		 * Reject error frames with the exception of
871		 * decryption and MIC failures. For monitor mode,
872		 * we also ignore the CRC error.
873		 */
874		status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
875			      ATH9K_RXERR_KEYMISS;
876
877		if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
878			status_mask |= ATH9K_RXERR_CRC;
879
880		if (rx_stats->rs_status & ~status_mask)
881			return false;
882	}
883
884	/*
885	 * For unicast frames the MIC error bit can have false positives,
886	 * so all MIC error reports need to be validated in software.
887	 * False negatives are not common, so skip software verification
888	 * if the hardware considers the MIC valid.
889	 */
890	if (strip_mic)
891		rxs->flag |= RX_FLAG_MMIC_STRIPPED;
892	else if (is_mc && mic_error)
893		rxs->flag |= RX_FLAG_MMIC_ERROR;
894
895	return true;
896}
897
898static int ath9k_process_rate(struct ath_common *common,
899			      struct ieee80211_hw *hw,
900			      struct ath_rx_status *rx_stats,
901			      struct ieee80211_rx_status *rxs)
902{
903	struct ieee80211_supported_band *sband;
904	enum ieee80211_band band;
905	unsigned int i = 0;
906
907	band = hw->conf.channel->band;
908	sband = hw->wiphy->bands[band];
909
910	if (rx_stats->rs_rate & 0x80) {
911		/* HT rate */
912		rxs->flag |= RX_FLAG_HT;
913		if (rx_stats->rs_flags & ATH9K_RX_2040)
914			rxs->flag |= RX_FLAG_40MHZ;
915		if (rx_stats->rs_flags & ATH9K_RX_GI)
916			rxs->flag |= RX_FLAG_SHORT_GI;
917		rxs->rate_idx = rx_stats->rs_rate & 0x7f;
918		return 0;
919	}
920
921	for (i = 0; i < sband->n_bitrates; i++) {
922		if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
923			rxs->rate_idx = i;
924			return 0;
925		}
926		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
927			rxs->flag |= RX_FLAG_SHORTPRE;
928			rxs->rate_idx = i;
929			return 0;
930		}
931	}
932
933	/*
934	 * No valid hardware bitrate found -- we should not get here
935	 * because hardware has already validated this frame as OK.
936	 */
937	ath_dbg(common, ANY,
938		"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
939		rx_stats->rs_rate);
940
941	return -EINVAL;
942}
943
944static void ath9k_process_rssi(struct ath_common *common,
945			       struct ieee80211_hw *hw,
946			       struct ieee80211_hdr *hdr,
947			       struct ath_rx_status *rx_stats)
948{
949	struct ath_softc *sc = hw->priv;
950	struct ath_hw *ah = common->ah;
951	int last_rssi;
952	int rssi = rx_stats->rs_rssi;
953
954	if (!rx_stats->is_mybeacon ||
955	    ((ah->opmode != NL80211_IFTYPE_STATION) &&
956	     (ah->opmode != NL80211_IFTYPE_ADHOC)))
957		return;
958
959	if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
960		ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
961
962	last_rssi = sc->last_rssi;
963	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
964		rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
965	if (rssi < 0)
966		rssi = 0;
967
968	/* Update Beacon RSSI, this is used by ANI. */
969	ah->stats.avgbrssi = rssi;
970}
971
972/*
973 * For Decrypt or Demic errors, we only mark packet status here and always push
974 * up the frame up to let mac80211 handle the actual error case, be it no
975 * decryption key or real decryption error. This let us keep statistics there.
976 */
977static int ath9k_rx_skb_preprocess(struct ath_common *common,
978				   struct ieee80211_hw *hw,
979				   struct ieee80211_hdr *hdr,
980				   struct ath_rx_status *rx_stats,
981				   struct ieee80211_rx_status *rx_status,
982				   bool *decrypt_error)
983{
984	struct ath_hw *ah = common->ah;
985
986	/*
987	 * everything but the rate is checked here, the rate check is done
988	 * separately to avoid doing two lookups for a rate for each frame.
989	 */
990	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
991		return -EINVAL;
992
993	/* Only use status info from the last fragment */
994	if (rx_stats->rs_more)
995		return 0;
996
997	ath9k_process_rssi(common, hw, hdr, rx_stats);
998
999	if (ath9k_process_rate(common, hw, rx_stats, rx_status))
1000		return -EINVAL;
1001
1002	rx_status->band = hw->conf.channel->band;
1003	rx_status->freq = hw->conf.channel->center_freq;
1004	rx_status->signal = ah->noise + rx_stats->rs_rssi;
1005	rx_status->antenna = rx_stats->rs_antenna;
1006	rx_status->flag |= RX_FLAG_MACTIME_MPDU;
1007	if (rx_stats->rs_moreaggr)
1008		rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1009
1010	return 0;
1011}
1012
1013static void ath9k_rx_skb_postprocess(struct ath_common *common,
1014				     struct sk_buff *skb,
1015				     struct ath_rx_status *rx_stats,
1016				     struct ieee80211_rx_status *rxs,
1017				     bool decrypt_error)
1018{
1019	struct ath_hw *ah = common->ah;
1020	struct ieee80211_hdr *hdr;
1021	int hdrlen, padpos, padsize;
1022	u8 keyix;
1023	__le16 fc;
1024
1025	/* see if any padding is done by the hw and remove it */
1026	hdr = (struct ieee80211_hdr *) skb->data;
1027	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1028	fc = hdr->frame_control;
1029	padpos = ath9k_cmn_padpos(hdr->frame_control);
1030
1031	/* The MAC header is padded to have 32-bit boundary if the
1032	 * packet payload is non-zero. The general calculation for
1033	 * padsize would take into account odd header lengths:
1034	 * padsize = (4 - padpos % 4) % 4; However, since only
1035	 * even-length headers are used, padding can only be 0 or 2
1036	 * bytes and we can optimize this a bit. In addition, we must
1037	 * not try to remove padding from short control frames that do
1038	 * not have payload. */
1039	padsize = padpos & 3;
1040	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
1041		memmove(skb->data + padsize, skb->data, padpos);
1042		skb_pull(skb, padsize);
1043	}
1044
1045	keyix = rx_stats->rs_keyix;
1046
1047	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
1048	    ieee80211_has_protected(fc)) {
1049		rxs->flag |= RX_FLAG_DECRYPTED;
1050	} else if (ieee80211_has_protected(fc)
1051		   && !decrypt_error && skb->len >= hdrlen + 4) {
1052		keyix = skb->data[hdrlen + 3] >> 6;
1053
1054		if (test_bit(keyix, common->keymap))
1055			rxs->flag |= RX_FLAG_DECRYPTED;
1056	}
1057	if (ah->sw_mgmt_crypto &&
1058	    (rxs->flag & RX_FLAG_DECRYPTED) &&
1059	    ieee80211_is_mgmt(fc))
1060		/* Use software decrypt for management frames. */
1061		rxs->flag &= ~RX_FLAG_DECRYPTED;
1062}
1063
1064static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1065				      struct ath_hw_antcomb_conf ant_conf,
1066				      int main_rssi_avg)
1067{
1068	antcomb->quick_scan_cnt = 0;
1069
1070	if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1071		antcomb->rssi_lna2 = main_rssi_avg;
1072	else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1073		antcomb->rssi_lna1 = main_rssi_avg;
1074
1075	switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1076	case 0x10: /* LNA2 A-B */
1077		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1078		antcomb->first_quick_scan_conf =
1079			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1080		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1081		break;
1082	case 0x20: /* LNA1 A-B */
1083		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1084		antcomb->first_quick_scan_conf =
1085			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1086		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1087		break;
1088	case 0x21: /* LNA1 LNA2 */
1089		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1090		antcomb->first_quick_scan_conf =
1091			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1092		antcomb->second_quick_scan_conf =
1093			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1094		break;
1095	case 0x12: /* LNA2 LNA1 */
1096		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1097		antcomb->first_quick_scan_conf =
1098			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1099		antcomb->second_quick_scan_conf =
1100			ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1101		break;
1102	case 0x13: /* LNA2 A+B */
1103		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1104		antcomb->first_quick_scan_conf =
1105			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1106		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1107		break;
1108	case 0x23: /* LNA1 A+B */
1109		antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1110		antcomb->first_quick_scan_conf =
1111			ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1112		antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1113		break;
1114	default:
1115		break;
1116	}
1117}
1118
1119static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1120				struct ath_hw_antcomb_conf *div_ant_conf,
1121				int main_rssi_avg, int alt_rssi_avg,
1122				int alt_ratio)
1123{
1124	/* alt_good */
1125	switch (antcomb->quick_scan_cnt) {
1126	case 0:
1127		/* set alt to main, and alt to first conf */
1128		div_ant_conf->main_lna_conf = antcomb->main_conf;
1129		div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1130		break;
1131	case 1:
1132		/* set alt to main, and alt to first conf */
1133		div_ant_conf->main_lna_conf = antcomb->main_conf;
1134		div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1135		antcomb->rssi_first = main_rssi_avg;
1136		antcomb->rssi_second = alt_rssi_avg;
1137
1138		if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1139			/* main is LNA1 */
1140			if (ath_is_alt_ant_ratio_better(alt_ratio,
1141						ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1142						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1143						main_rssi_avg, alt_rssi_avg,
1144						antcomb->total_pkt_count))
1145				antcomb->first_ratio = true;
1146			else
1147				antcomb->first_ratio = false;
1148		} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1149			if (ath_is_alt_ant_ratio_better(alt_ratio,
1150						ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1151						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1152						main_rssi_avg, alt_rssi_avg,
1153						antcomb->total_pkt_count))
1154				antcomb->first_ratio = true;
1155			else
1156				antcomb->first_ratio = false;
1157		} else {
1158			if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1159			    (alt_rssi_avg > main_rssi_avg +
1160			    ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1161			    (alt_rssi_avg > main_rssi_avg)) &&
1162			    (antcomb->total_pkt_count > 50))
1163				antcomb->first_ratio = true;
1164			else
1165				antcomb->first_ratio = false;
1166		}
1167		break;
1168	case 2:
1169		antcomb->alt_good = false;
1170		antcomb->scan_not_start = false;
1171		antcomb->scan = false;
1172		antcomb->rssi_first = main_rssi_avg;
1173		antcomb->rssi_third = alt_rssi_avg;
1174
1175		if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1176			antcomb->rssi_lna1 = alt_rssi_avg;
1177		else if (antcomb->second_quick_scan_conf ==
1178			 ATH_ANT_DIV_COMB_LNA2)
1179			antcomb->rssi_lna2 = alt_rssi_avg;
1180		else if (antcomb->second_quick_scan_conf ==
1181			 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1182			if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1183				antcomb->rssi_lna2 = main_rssi_avg;
1184			else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1185				antcomb->rssi_lna1 = main_rssi_avg;
1186		}
1187
1188		if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1189		    ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1190			div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1191		else
1192			div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1193
1194		if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1195			if (ath_is_alt_ant_ratio_better(alt_ratio,
1196						ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1197						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1198						main_rssi_avg, alt_rssi_avg,
1199						antcomb->total_pkt_count))
1200				antcomb->second_ratio = true;
1201			else
1202				antcomb->second_ratio = false;
1203		} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1204			if (ath_is_alt_ant_ratio_better(alt_ratio,
1205						ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1206						ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1207						main_rssi_avg, alt_rssi_avg,
1208						antcomb->total_pkt_count))
1209				antcomb->second_ratio = true;
1210			else
1211				antcomb->second_ratio = false;
1212		} else {
1213			if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1214			    (alt_rssi_avg > main_rssi_avg +
1215			    ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1216			    (alt_rssi_avg > main_rssi_avg)) &&
1217			    (antcomb->total_pkt_count > 50))
1218				antcomb->second_ratio = true;
1219			else
1220				antcomb->second_ratio = false;
1221		}
1222
1223		/* set alt to the conf with maximun ratio */
1224		if (antcomb->first_ratio && antcomb->second_ratio) {
1225			if (antcomb->rssi_second > antcomb->rssi_third) {
1226				/* first alt*/
1227				if ((antcomb->first_quick_scan_conf ==
1228				    ATH_ANT_DIV_COMB_LNA1) ||
1229				    (antcomb->first_quick_scan_conf ==
1230				    ATH_ANT_DIV_COMB_LNA2))
1231					/* Set alt LNA1 or LNA2*/
1232					if (div_ant_conf->main_lna_conf ==
1233					    ATH_ANT_DIV_COMB_LNA2)
1234						div_ant_conf->alt_lna_conf =
1235							ATH_ANT_DIV_COMB_LNA1;
1236					else
1237						div_ant_conf->alt_lna_conf =
1238							ATH_ANT_DIV_COMB_LNA2;
1239				else
1240					/* Set alt to A+B or A-B */
1241					div_ant_conf->alt_lna_conf =
1242						antcomb->first_quick_scan_conf;
1243			} else if ((antcomb->second_quick_scan_conf ==
1244				   ATH_ANT_DIV_COMB_LNA1) ||
1245				   (antcomb->second_quick_scan_conf ==
1246				   ATH_ANT_DIV_COMB_LNA2)) {
1247				/* Set alt LNA1 or LNA2 */
1248				if (div_ant_conf->main_lna_conf ==
1249				    ATH_ANT_DIV_COMB_LNA2)
1250					div_ant_conf->alt_lna_conf =
1251						ATH_ANT_DIV_COMB_LNA1;
1252				else
1253					div_ant_conf->alt_lna_conf =
1254						ATH_ANT_DIV_COMB_LNA2;
1255			} else {
1256				/* Set alt to A+B or A-B */
1257				div_ant_conf->alt_lna_conf =
1258					antcomb->second_quick_scan_conf;
1259			}
1260		} else if (antcomb->first_ratio) {
1261			/* first alt */
1262			if ((antcomb->first_quick_scan_conf ==
1263			    ATH_ANT_DIV_COMB_LNA1) ||
1264			    (antcomb->first_quick_scan_conf ==
1265			    ATH_ANT_DIV_COMB_LNA2))
1266					/* Set alt LNA1 or LNA2 */
1267				if (div_ant_conf->main_lna_conf ==
1268				    ATH_ANT_DIV_COMB_LNA2)
1269					div_ant_conf->alt_lna_conf =
1270							ATH_ANT_DIV_COMB_LNA1;
1271				else
1272					div_ant_conf->alt_lna_conf =
1273							ATH_ANT_DIV_COMB_LNA2;
1274			else
1275				/* Set alt to A+B or A-B */
1276				div_ant_conf->alt_lna_conf =
1277						antcomb->first_quick_scan_conf;
1278		} else if (antcomb->second_ratio) {
1279				/* second alt */
1280			if ((antcomb->second_quick_scan_conf ==
1281			    ATH_ANT_DIV_COMB_LNA1) ||
1282			    (antcomb->second_quick_scan_conf ==
1283			    ATH_ANT_DIV_COMB_LNA2))
1284				/* Set alt LNA1 or LNA2 */
1285				if (div_ant_conf->main_lna_conf ==
1286				    ATH_ANT_DIV_COMB_LNA2)
1287					div_ant_conf->alt_lna_conf =
1288						ATH_ANT_DIV_COMB_LNA1;
1289				else
1290					div_ant_conf->alt_lna_conf =
1291						ATH_ANT_DIV_COMB_LNA2;
1292			else
1293				/* Set alt to A+B or A-B */
1294				div_ant_conf->alt_lna_conf =
1295						antcomb->second_quick_scan_conf;
1296		} else {
1297			/* main is largest */
1298			if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1299			    (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1300				/* Set alt LNA1 or LNA2 */
1301				if (div_ant_conf->main_lna_conf ==
1302				    ATH_ANT_DIV_COMB_LNA2)
1303					div_ant_conf->alt_lna_conf =
1304							ATH_ANT_DIV_COMB_LNA1;
1305				else
1306					div_ant_conf->alt_lna_conf =
1307							ATH_ANT_DIV_COMB_LNA2;
1308			else
1309				/* Set alt to A+B or A-B */
1310				div_ant_conf->alt_lna_conf = antcomb->main_conf;
1311		}
1312		break;
1313	default:
1314		break;
1315	}
1316}
1317
1318static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
1319		struct ath_ant_comb *antcomb, int alt_ratio)
1320{
1321	if (ant_conf->div_group == 0) {
1322		/* Adjust the fast_div_bias based on main and alt lna conf */
1323		switch ((ant_conf->main_lna_conf << 4) |
1324				ant_conf->alt_lna_conf) {
1325		case 0x01: /* A-B LNA2 */
1326			ant_conf->fast_div_bias = 0x3b;
1327			break;
1328		case 0x02: /* A-B LNA1 */
1329			ant_conf->fast_div_bias = 0x3d;
1330			break;
1331		case 0x03: /* A-B A+B */
1332			ant_conf->fast_div_bias = 0x1;
1333			break;
1334		case 0x10: /* LNA2 A-B */
1335			ant_conf->fast_div_bias = 0x7;
1336			break;
1337		case 0x12: /* LNA2 LNA1 */
1338			ant_conf->fast_div_bias = 0x2;
1339			break;
1340		case 0x13: /* LNA2 A+B */
1341			ant_conf->fast_div_bias = 0x7;
1342			break;
1343		case 0x20: /* LNA1 A-B */
1344			ant_conf->fast_div_bias = 0x6;
1345			break;
1346		case 0x21: /* LNA1 LNA2 */
1347			ant_conf->fast_div_bias = 0x0;
1348			break;
1349		case 0x23: /* LNA1 A+B */
1350			ant_conf->fast_div_bias = 0x6;
1351			break;
1352		case 0x30: /* A+B A-B */
1353			ant_conf->fast_div_bias = 0x1;
1354			break;
1355		case 0x31: /* A+B LNA2 */
1356			ant_conf->fast_div_bias = 0x3b;
1357			break;
1358		case 0x32: /* A+B LNA1 */
1359			ant_conf->fast_div_bias = 0x3d;
1360			break;
1361		default:
1362			break;
1363		}
1364	} else if (ant_conf->div_group == 1) {
1365		/* Adjust the fast_div_bias based on main and alt_lna_conf */
1366		switch ((ant_conf->main_lna_conf << 4) |
1367			ant_conf->alt_lna_conf) {
1368		case 0x01: /* A-B LNA2 */
1369			ant_conf->fast_div_bias = 0x1;
1370			ant_conf->main_gaintb = 0;
1371			ant_conf->alt_gaintb = 0;
1372			break;
1373		case 0x02: /* A-B LNA1 */
1374			ant_conf->fast_div_bias = 0x1;
1375			ant_conf->main_gaintb = 0;
1376			ant_conf->alt_gaintb = 0;
1377			break;
1378		case 0x03: /* A-B A+B */
1379			ant_conf->fast_div_bias = 0x1;
1380			ant_conf->main_gaintb = 0;
1381			ant_conf->alt_gaintb = 0;
1382			break;
1383		case 0x10: /* LNA2 A-B */
1384			if (!(antcomb->scan) &&
1385			    (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1386				ant_conf->fast_div_bias = 0x3f;
1387			else
1388				ant_conf->fast_div_bias = 0x1;
1389			ant_conf->main_gaintb = 0;
1390			ant_conf->alt_gaintb = 0;
1391			break;
1392		case 0x12: /* LNA2 LNA1 */
1393			ant_conf->fast_div_bias = 0x1;
1394			ant_conf->main_gaintb = 0;
1395			ant_conf->alt_gaintb = 0;
1396			break;
1397		case 0x13: /* LNA2 A+B */
1398			if (!(antcomb->scan) &&
1399			    (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1400				ant_conf->fast_div_bias = 0x3f;
1401			else
1402				ant_conf->fast_div_bias = 0x1;
1403			ant_conf->main_gaintb = 0;
1404			ant_conf->alt_gaintb = 0;
1405			break;
1406		case 0x20: /* LNA1 A-B */
1407			if (!(antcomb->scan) &&
1408			    (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1409				ant_conf->fast_div_bias = 0x3f;
1410			else
1411				ant_conf->fast_div_bias = 0x1;
1412			ant_conf->main_gaintb = 0;
1413			ant_conf->alt_gaintb = 0;
1414			break;
1415		case 0x21: /* LNA1 LNA2 */
1416			ant_conf->fast_div_bias = 0x1;
1417			ant_conf->main_gaintb = 0;
1418			ant_conf->alt_gaintb = 0;
1419			break;
1420		case 0x23: /* LNA1 A+B */
1421			if (!(antcomb->scan) &&
1422			    (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1423				ant_conf->fast_div_bias = 0x3f;
1424			else
1425				ant_conf->fast_div_bias = 0x1;
1426			ant_conf->main_gaintb = 0;
1427			ant_conf->alt_gaintb = 0;
1428			break;
1429		case 0x30: /* A+B A-B */
1430			ant_conf->fast_div_bias = 0x1;
1431			ant_conf->main_gaintb = 0;
1432			ant_conf->alt_gaintb = 0;
1433			break;
1434		case 0x31: /* A+B LNA2 */
1435			ant_conf->fast_div_bias = 0x1;
1436			ant_conf->main_gaintb = 0;
1437			ant_conf->alt_gaintb = 0;
1438			break;
1439		case 0x32: /* A+B LNA1 */
1440			ant_conf->fast_div_bias = 0x1;
1441			ant_conf->main_gaintb = 0;
1442			ant_conf->alt_gaintb = 0;
1443			break;
1444		default:
1445			break;
1446		}
1447	} else if (ant_conf->div_group == 2) {
1448		/* Adjust the fast_div_bias based on main and alt_lna_conf */
1449		switch ((ant_conf->main_lna_conf << 4) |
1450				ant_conf->alt_lna_conf) {
1451		case 0x01: /* A-B LNA2 */
1452			ant_conf->fast_div_bias = 0x1;
1453			ant_conf->main_gaintb = 0;
1454			ant_conf->alt_gaintb = 0;
1455			break;
1456		case 0x02: /* A-B LNA1 */
1457			ant_conf->fast_div_bias = 0x1;
1458			ant_conf->main_gaintb = 0;
1459			ant_conf->alt_gaintb = 0;
1460			break;
1461		case 0x03: /* A-B A+B */
1462			ant_conf->fast_div_bias = 0x1;
1463			ant_conf->main_gaintb = 0;
1464			ant_conf->alt_gaintb = 0;
1465			break;
1466		case 0x10: /* LNA2 A-B */
1467			if (!(antcomb->scan) &&
1468				(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1469				ant_conf->fast_div_bias = 0x1;
1470			else
1471				ant_conf->fast_div_bias = 0x2;
1472			ant_conf->main_gaintb = 0;
1473			ant_conf->alt_gaintb = 0;
1474			break;
1475		case 0x12: /* LNA2 LNA1 */
1476			ant_conf->fast_div_bias = 0x1;
1477			ant_conf->main_gaintb = 0;
1478			ant_conf->alt_gaintb = 0;
1479			break;
1480		case 0x13: /* LNA2 A+B */
1481			if (!(antcomb->scan) &&
1482				(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1483				ant_conf->fast_div_bias = 0x1;
1484			else
1485				ant_conf->fast_div_bias = 0x2;
1486			ant_conf->main_gaintb = 0;
1487			ant_conf->alt_gaintb = 0;
1488			break;
1489		case 0x20: /* LNA1 A-B */
1490			if (!(antcomb->scan) &&
1491				(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1492				ant_conf->fast_div_bias = 0x1;
1493			else
1494				ant_conf->fast_div_bias = 0x2;
1495			ant_conf->main_gaintb = 0;
1496			ant_conf->alt_gaintb = 0;
1497			break;
1498		case 0x21: /* LNA1 LNA2 */
1499			ant_conf->fast_div_bias = 0x1;
1500			ant_conf->main_gaintb = 0;
1501			ant_conf->alt_gaintb = 0;
1502			break;
1503		case 0x23: /* LNA1 A+B */
1504			if (!(antcomb->scan) &&
1505				(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
1506				ant_conf->fast_div_bias = 0x1;
1507			else
1508				ant_conf->fast_div_bias = 0x2;
1509			ant_conf->main_gaintb = 0;
1510			ant_conf->alt_gaintb = 0;
1511			break;
1512		case 0x30: /* A+B A-B */
1513			ant_conf->fast_div_bias = 0x1;
1514			ant_conf->main_gaintb = 0;
1515			ant_conf->alt_gaintb = 0;
1516			break;
1517		case 0x31: /* A+B LNA2 */
1518			ant_conf->fast_div_bias = 0x1;
1519			ant_conf->main_gaintb = 0;
1520			ant_conf->alt_gaintb = 0;
1521			break;
1522		case 0x32: /* A+B LNA1 */
1523			ant_conf->fast_div_bias = 0x1;
1524			ant_conf->main_gaintb = 0;
1525			ant_conf->alt_gaintb = 0;
1526			break;
1527		default:
1528			break;
1529		}
1530	}
1531}
1532
1533/* Antenna diversity and combining */
1534static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1535{
1536	struct ath_hw_antcomb_conf div_ant_conf;
1537	struct ath_ant_comb *antcomb = &sc->ant_comb;
1538	int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1539	int curr_main_set;
1540	int main_rssi = rs->rs_rssi_ctl0;
1541	int alt_rssi = rs->rs_rssi_ctl1;
1542	int rx_ant_conf,  main_ant_conf;
1543	bool short_scan = false;
1544
1545	rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1546		       ATH_ANT_RX_MASK;
1547	main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1548			 ATH_ANT_RX_MASK;
1549
1550	/* Record packet only when both main_rssi and  alt_rssi is positive */
1551	if (main_rssi > 0 && alt_rssi > 0) {
1552		antcomb->total_pkt_count++;
1553		antcomb->main_total_rssi += main_rssi;
1554		antcomb->alt_total_rssi  += alt_rssi;
1555		if (main_ant_conf == rx_ant_conf)
1556			antcomb->main_recv_cnt++;
1557		else
1558			antcomb->alt_recv_cnt++;
1559	}
1560
1561	/* Short scan check */
1562	if (antcomb->scan && antcomb->alt_good) {
1563		if (time_after(jiffies, antcomb->scan_start_time +
1564		    msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1565			short_scan = true;
1566		else
1567			if (antcomb->total_pkt_count ==
1568			    ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1569				alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1570					    antcomb->total_pkt_count);
1571				if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1572					short_scan = true;
1573			}
1574	}
1575
1576	if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1577	    rs->rs_moreaggr) && !short_scan)
1578		return;
1579
1580	if (antcomb->total_pkt_count) {
1581		alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1582			     antcomb->total_pkt_count);
1583		main_rssi_avg = (antcomb->main_total_rssi /
1584				 antcomb->total_pkt_count);
1585		alt_rssi_avg = (antcomb->alt_total_rssi /
1586				 antcomb->total_pkt_count);
1587	}
1588
1589
1590	ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1591	curr_alt_set = div_ant_conf.alt_lna_conf;
1592	curr_main_set = div_ant_conf.main_lna_conf;
1593
1594	antcomb->count++;
1595
1596	if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1597		if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1598			ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1599						  main_rssi_avg);
1600			antcomb->alt_good = true;
1601		} else {
1602			antcomb->alt_good = false;
1603		}
1604
1605		antcomb->count = 0;
1606		antcomb->scan = true;
1607		antcomb->scan_not_start = true;
1608	}
1609
1610	if (!antcomb->scan) {
1611		if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
1612					alt_ratio, curr_main_set, curr_alt_set,
1613					alt_rssi_avg, main_rssi_avg)) {
1614			if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1615				/* Switch main and alt LNA */
1616				div_ant_conf.main_lna_conf =
1617						ATH_ANT_DIV_COMB_LNA2;
1618				div_ant_conf.alt_lna_conf  =
1619						ATH_ANT_DIV_COMB_LNA1;
1620			} else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1621				div_ant_conf.main_lna_conf =
1622						ATH_ANT_DIV_COMB_LNA1;
1623				div_ant_conf.alt_lna_conf  =
1624						ATH_ANT_DIV_COMB_LNA2;
1625			}
1626
1627			goto div_comb_done;
1628		} else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1629			   (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1630			/* Set alt to another LNA */
1631			if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1632				div_ant_conf.alt_lna_conf =
1633						ATH_ANT_DIV_COMB_LNA1;
1634			else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1635				div_ant_conf.alt_lna_conf =
1636						ATH_ANT_DIV_COMB_LNA2;
1637
1638			goto div_comb_done;
1639		}
1640
1641		if ((alt_rssi_avg < (main_rssi_avg +
1642						div_ant_conf.lna1_lna2_delta)))
1643			goto div_comb_done;
1644	}
1645
1646	if (!antcomb->scan_not_start) {
1647		switch (curr_alt_set) {
1648		case ATH_ANT_DIV_COMB_LNA2:
1649			antcomb->rssi_lna2 = alt_rssi_avg;
1650			antcomb->rssi_lna1 = main_rssi_avg;
1651			antcomb->scan = true;
1652			/* set to A+B */
1653			div_ant_conf.main_lna_conf =
1654				ATH_ANT_DIV_COMB_LNA1;
1655			div_ant_conf.alt_lna_conf  =
1656				ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1657			break;
1658		case ATH_ANT_DIV_COMB_LNA1:
1659			antcomb->rssi_lna1 = alt_rssi_avg;
1660			antcomb->rssi_lna2 = main_rssi_avg;
1661			antcomb->scan = true;
1662			/* set to A+B */
1663			div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1664			div_ant_conf.alt_lna_conf  =
1665				ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1666			break;
1667		case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1668			antcomb->rssi_add = alt_rssi_avg;
1669			antcomb->scan = true;
1670			/* set to A-B */
1671			div_ant_conf.alt_lna_conf =
1672				ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1673			break;
1674		case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1675			antcomb->rssi_sub = alt_rssi_avg;
1676			antcomb->scan = false;
1677			if (antcomb->rssi_lna2 >
1678			    (antcomb->rssi_lna1 +
1679			    ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1680				/* use LNA2 as main LNA */
1681				if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1682				    (antcomb->rssi_add > antcomb->rssi_sub)) {
1683					/* set to A+B */
1684					div_ant_conf.main_lna_conf =
1685						ATH_ANT_DIV_COMB_LNA2;
1686					div_ant_conf.alt_lna_conf  =
1687						ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1688				} else if (antcomb->rssi_sub >
1689					   antcomb->rssi_lna1) {
1690					/* set to A-B */
1691					div_ant_conf.main_lna_conf =
1692						ATH_ANT_DIV_COMB_LNA2;
1693					div_ant_conf.alt_lna_conf =
1694						ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1695				} else {
1696					/* set to LNA1 */
1697					div_ant_conf.main_lna_conf =
1698						ATH_ANT_DIV_COMB_LNA2;
1699					div_ant_conf.alt_lna_conf =
1700						ATH_ANT_DIV_COMB_LNA1;
1701				}
1702			} else {
1703				/* use LNA1 as main LNA */
1704				if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1705				    (antcomb->rssi_add > antcomb->rssi_sub)) {
1706					/* set to A+B */
1707					div_ant_conf.main_lna_conf =
1708						ATH_ANT_DIV_COMB_LNA1;
1709					div_ant_conf.alt_lna_conf  =
1710						ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1711				} else if (antcomb->rssi_sub >
1712					   antcomb->rssi_lna1) {
1713					/* set to A-B */
1714					div_ant_conf.main_lna_conf =
1715						ATH_ANT_DIV_COMB_LNA1;
1716					div_ant_conf.alt_lna_conf =
1717						ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1718				} else {
1719					/* set to LNA2 */
1720					div_ant_conf.main_lna_conf =
1721						ATH_ANT_DIV_COMB_LNA1;
1722					div_ant_conf.alt_lna_conf =
1723						ATH_ANT_DIV_COMB_LNA2;
1724				}
1725			}
1726			break;
1727		default:
1728			break;
1729		}
1730	} else {
1731		if (!antcomb->alt_good) {
1732			antcomb->scan_not_start = false;
1733			/* Set alt to another LNA */
1734			if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1735				div_ant_conf.main_lna_conf =
1736						ATH_ANT_DIV_COMB_LNA2;
1737				div_ant_conf.alt_lna_conf =
1738						ATH_ANT_DIV_COMB_LNA1;
1739			} else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1740				div_ant_conf.main_lna_conf =
1741						ATH_ANT_DIV_COMB_LNA1;
1742				div_ant_conf.alt_lna_conf =
1743						ATH_ANT_DIV_COMB_LNA2;
1744			}
1745			goto div_comb_done;
1746		}
1747	}
1748
1749	ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1750					   main_rssi_avg, alt_rssi_avg,
1751					   alt_ratio);
1752
1753	antcomb->quick_scan_cnt++;
1754
1755div_comb_done:
1756	ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
1757	ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1758
1759	antcomb->scan_start_time = jiffies;
1760	antcomb->total_pkt_count = 0;
1761	antcomb->main_total_rssi = 0;
1762	antcomb->alt_total_rssi = 0;
1763	antcomb->main_recv_cnt = 0;
1764	antcomb->alt_recv_cnt = 0;
1765}
1766
1767int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1768{
1769	struct ath_buf *bf;
1770	struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1771	struct ieee80211_rx_status *rxs;
1772	struct ath_hw *ah = sc->sc_ah;
1773	struct ath_common *common = ath9k_hw_common(ah);
1774	struct ieee80211_hw *hw = sc->hw;
1775	struct ieee80211_hdr *hdr;
1776	int retval;
1777	bool decrypt_error = false;
1778	struct ath_rx_status rs;
1779	enum ath9k_rx_qtype qtype;
1780	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
1781	int dma_type;
1782	u8 rx_status_len = ah->caps.rx_status_len;
1783	u64 tsf = 0;
1784	u32 tsf_lower = 0;
1785	unsigned long flags;
1786
1787	if (edma)
1788		dma_type = DMA_BIDIRECTIONAL;
1789	else
1790		dma_type = DMA_FROM_DEVICE;
1791
1792	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
1793	spin_lock_bh(&sc->rx.rxbuflock);
1794
1795	tsf = ath9k_hw_gettsf64(ah);
1796	tsf_lower = tsf & 0xffffffff;
1797
1798	do {
1799		/* If handling rx interrupt and flush is in progress => exit */
1800		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
1801			break;
1802
1803		memset(&rs, 0, sizeof(rs));
1804		if (edma)
1805			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
1806		else
1807			bf = ath_get_next_rx_buf(sc, &rs);
1808
1809		if (!bf)
1810			break;
1811
1812		skb = bf->bf_mpdu;
1813		if (!skb)
1814			continue;
1815
1816		/*
1817		 * Take frame header from the first fragment and RX status from
1818		 * the last one.
1819		 */
1820		if (sc->rx.frag)
1821			hdr_skb = sc->rx.frag;
1822		else
1823			hdr_skb = skb;
1824
1825		hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1826		rxs = IEEE80211_SKB_RXCB(hdr_skb);
1827		if (ieee80211_is_beacon(hdr->frame_control) &&
1828		    !is_zero_ether_addr(common->curbssid) &&
1829		    !compare_ether_addr(hdr->addr3, common->curbssid))
1830			rs.is_mybeacon = true;
1831		else
1832			rs.is_mybeacon = false;
1833
1834		ath_debug_stat_rx(sc, &rs);
1835
1836		/*
1837		 * If we're asked to flush receive queue, directly
1838		 * chain it back at the queue without processing it.
1839		 */
1840		if (sc->sc_flags & SC_OP_RXFLUSH)
1841			goto requeue_drop_frag;
1842
1843		memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1844
1845		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1846		if (rs.rs_tstamp > tsf_lower &&
1847		    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
1848			rxs->mactime -= 0x100000000ULL;
1849
1850		if (rs.rs_tstamp < tsf_lower &&
1851		    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1852			rxs->mactime += 0x100000000ULL;
1853
1854		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1855						 rxs, &decrypt_error);
1856		if (retval)
1857			goto requeue_drop_frag;
1858
1859		/* Ensure we always have an skb to requeue once we are done
1860		 * processing the current buffer's skb */
1861		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1862
1863		/* If there is no memory we ignore the current RX'd frame,
1864		 * tell hardware it can give us a new frame using the old
1865		 * skb and put it at the tail of the sc->rx.rxbuf list for
1866		 * processing. */
1867		if (!requeue_skb)
1868			goto requeue_drop_frag;
1869
1870		/* Unmap the frame */
1871		dma_unmap_single(sc->dev, bf->bf_buf_addr,
1872				 common->rx_bufsize,
1873				 dma_type);
1874
1875		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
1876		if (ah->caps.rx_status_len)
1877			skb_pull(skb, ah->caps.rx_status_len);
1878
1879		if (!rs.rs_more)
1880			ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1881						 rxs, decrypt_error);
1882
1883		/* We will now give hardware our shiny new allocated skb */
1884		bf->bf_mpdu = requeue_skb;
1885		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1886						 common->rx_bufsize,
1887						 dma_type);
1888		if (unlikely(dma_mapping_error(sc->dev,
1889			  bf->bf_buf_addr))) {
1890			dev_kfree_skb_any(requeue_skb);
1891			bf->bf_mpdu = NULL;
1892			bf->bf_buf_addr = 0;
1893			ath_err(common, "dma_mapping_error() on RX\n");
1894			ieee80211_rx(hw, skb);
1895			break;
1896		}
1897
1898		if (rs.rs_more) {
1899			/*
1900			 * rs_more indicates chained descriptors which can be
1901			 * used to link buffers together for a sort of
1902			 * scatter-gather operation.
1903			 */
1904			if (sc->rx.frag) {
1905				/* too many fragments - cannot handle frame */
1906				dev_kfree_skb_any(sc->rx.frag);
1907				dev_kfree_skb_any(skb);
1908				skb = NULL;
1909			}
1910			sc->rx.frag = skb;
1911			goto requeue;
1912		}
1913
1914		if (sc->rx.frag) {
1915			int space = skb->len - skb_tailroom(hdr_skb);
1916
1917			if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1918				dev_kfree_skb(skb);
1919				goto requeue_drop_frag;
1920			}
1921
1922			sc->rx.frag = NULL;
1923
1924			skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1925						  skb->len);
1926			dev_kfree_skb_any(skb);
1927			skb = hdr_skb;
1928		}
1929
1930
1931		if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
1932
1933			/*
1934			 * change the default rx antenna if rx diversity
1935			 * chooses the other antenna 3 times in a row.
1936			 */
1937			if (sc->rx.defant != rs.rs_antenna) {
1938				if (++sc->rx.rxotherant >= 3)
1939					ath_setdefantenna(sc, rs.rs_antenna);
1940			} else {
1941				sc->rx.rxotherant = 0;
1942			}
1943
1944		}
1945
1946		if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
1947			skb_trim(skb, skb->len - 8);
1948
1949		spin_lock_irqsave(&sc->sc_pm_lock, flags);
1950
1951		if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1952				     PS_WAIT_FOR_CAB |
1953				     PS_WAIT_FOR_PSPOLL_DATA)) ||
1954		    ath9k_check_auto_sleep(sc))
1955			ath_rx_ps(sc, skb, rs.is_mybeacon);
1956		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1957
1958		if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
1959			ath_ant_comb_scan(sc, &rs);
1960
1961		ieee80211_rx(hw, skb);
1962
1963requeue_drop_frag:
1964		if (sc->rx.frag) {
1965			dev_kfree_skb_any(sc->rx.frag);
1966			sc->rx.frag = NULL;
1967		}
1968requeue:
1969		if (edma) {
1970			list_add_tail(&bf->list, &sc->rx.rxbuf);
1971			ath_rx_edma_buf_link(sc, qtype);
1972		} else {
1973			list_move_tail(&bf->list, &sc->rx.rxbuf);
1974			ath_rx_buf_link(sc, bf);
1975			if (!flush)
1976				ath9k_hw_rxena(ah);
1977		}
1978	} while (1);
1979
1980	spin_unlock_bh(&sc->rx.rxbuflock);
1981
1982	if (!(ah->imask & ATH9K_INT_RXEOL)) {
1983		ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
1984		ath9k_hw_set_interrupts(ah);
1985	}
1986
1987	return 0;
1988}
1989