recv.c revision 8afbcc8bfb549a522298fa4a31ee5155c2b5f7a0
1/* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17#include "ath9k.h" 18#include "ar9003_mac.h" 19 20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21 22static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 23 int mindelta, int main_rssi_avg, 24 int alt_rssi_avg, int pkt_count) 25{ 26 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 27 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 28 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 29} 30 31static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 32{ 33 return sc->ps_enabled && 34 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 35} 36 37/* 38 * Setup and link descriptors. 39 * 40 * 11N: we can no longer afford to self link the last descriptor. 41 * MAC acknowledges BA status as long as it copies frames to host 42 * buffer (or rx fifo). This can incorrectly acknowledge packets 43 * to a sender if last desc is self-linked. 44 */ 45static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 46{ 47 struct ath_hw *ah = sc->sc_ah; 48 struct ath_common *common = ath9k_hw_common(ah); 49 struct ath_desc *ds; 50 struct sk_buff *skb; 51 52 ATH_RXBUF_RESET(bf); 53 54 ds = bf->bf_desc; 55 ds->ds_link = 0; /* link to null */ 56 ds->ds_data = bf->bf_buf_addr; 57 58 /* virtual addr of the beginning of the buffer. */ 59 skb = bf->bf_mpdu; 60 BUG_ON(skb == NULL); 61 ds->ds_vdata = skb->data; 62 63 /* 64 * setup rx descriptors. The rx_bufsize here tells the hardware 65 * how much data it can DMA to us and that we are prepared 66 * to process 67 */ 68 ath9k_hw_setuprxdesc(ah, ds, 69 common->rx_bufsize, 70 0); 71 72 if (sc->rx.rxlink == NULL) 73 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 74 else 75 *sc->rx.rxlink = bf->bf_daddr; 76 77 sc->rx.rxlink = &ds->ds_link; 78} 79 80static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 81{ 82 /* XXX block beacon interrupts */ 83 ath9k_hw_setantenna(sc->sc_ah, antenna); 84 sc->rx.defant = antenna; 85 sc->rx.rxotherant = 0; 86} 87 88static void ath_opmode_init(struct ath_softc *sc) 89{ 90 struct ath_hw *ah = sc->sc_ah; 91 struct ath_common *common = ath9k_hw_common(ah); 92 93 u32 rfilt, mfilt[2]; 94 95 /* configure rx filter */ 96 rfilt = ath_calcrxfilter(sc); 97 ath9k_hw_setrxfilter(ah, rfilt); 98 99 /* configure bssid mask */ 100 ath_hw_setbssidmask(common); 101 102 /* configure operational mode */ 103 ath9k_hw_setopmode(ah); 104 105 /* calculate and install multicast filter */ 106 mfilt[0] = mfilt[1] = ~0; 107 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 108} 109 110static bool ath_rx_edma_buf_link(struct ath_softc *sc, 111 enum ath9k_rx_qtype qtype) 112{ 113 struct ath_hw *ah = sc->sc_ah; 114 struct ath_rx_edma *rx_edma; 115 struct sk_buff *skb; 116 struct ath_buf *bf; 117 118 rx_edma = &sc->rx.rx_edma[qtype]; 119 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 120 return false; 121 122 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 123 list_del_init(&bf->list); 124 125 skb = bf->bf_mpdu; 126 127 ATH_RXBUF_RESET(bf); 128 memset(skb->data, 0, ah->caps.rx_status_len); 129 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 130 ah->caps.rx_status_len, DMA_TO_DEVICE); 131 132 SKB_CB_ATHBUF(skb) = bf; 133 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 134 skb_queue_tail(&rx_edma->rx_fifo, skb); 135 136 return true; 137} 138 139static void ath_rx_addbuffer_edma(struct ath_softc *sc, 140 enum ath9k_rx_qtype qtype, int size) 141{ 142 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 143 u32 nbuf = 0; 144 145 if (list_empty(&sc->rx.rxbuf)) { 146 ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n"); 147 return; 148 } 149 150 while (!list_empty(&sc->rx.rxbuf)) { 151 nbuf++; 152 153 if (!ath_rx_edma_buf_link(sc, qtype)) 154 break; 155 156 if (nbuf >= size) 157 break; 158 } 159} 160 161static void ath_rx_remove_buffer(struct ath_softc *sc, 162 enum ath9k_rx_qtype qtype) 163{ 164 struct ath_buf *bf; 165 struct ath_rx_edma *rx_edma; 166 struct sk_buff *skb; 167 168 rx_edma = &sc->rx.rx_edma[qtype]; 169 170 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 171 bf = SKB_CB_ATHBUF(skb); 172 BUG_ON(!bf); 173 list_add_tail(&bf->list, &sc->rx.rxbuf); 174 } 175} 176 177static void ath_rx_edma_cleanup(struct ath_softc *sc) 178{ 179 struct ath_buf *bf; 180 181 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 182 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 183 184 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 185 if (bf->bf_mpdu) 186 dev_kfree_skb_any(bf->bf_mpdu); 187 } 188 189 INIT_LIST_HEAD(&sc->rx.rxbuf); 190 191 kfree(sc->rx.rx_bufptr); 192 sc->rx.rx_bufptr = NULL; 193} 194 195static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 196{ 197 skb_queue_head_init(&rx_edma->rx_fifo); 198 skb_queue_head_init(&rx_edma->rx_buffers); 199 rx_edma->rx_fifo_hwsize = size; 200} 201 202static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 203{ 204 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 205 struct ath_hw *ah = sc->sc_ah; 206 struct sk_buff *skb; 207 struct ath_buf *bf; 208 int error = 0, i; 209 u32 size; 210 211 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 212 ah->caps.rx_status_len); 213 214 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 215 ah->caps.rx_lp_qdepth); 216 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 217 ah->caps.rx_hp_qdepth); 218 219 size = sizeof(struct ath_buf) * nbufs; 220 bf = kzalloc(size, GFP_KERNEL); 221 if (!bf) 222 return -ENOMEM; 223 224 INIT_LIST_HEAD(&sc->rx.rxbuf); 225 sc->rx.rx_bufptr = bf; 226 227 for (i = 0; i < nbufs; i++, bf++) { 228 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 229 if (!skb) { 230 error = -ENOMEM; 231 goto rx_init_fail; 232 } 233 234 memset(skb->data, 0, common->rx_bufsize); 235 bf->bf_mpdu = skb; 236 237 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 238 common->rx_bufsize, 239 DMA_BIDIRECTIONAL); 240 if (unlikely(dma_mapping_error(sc->dev, 241 bf->bf_buf_addr))) { 242 dev_kfree_skb_any(skb); 243 bf->bf_mpdu = NULL; 244 bf->bf_buf_addr = 0; 245 ath_err(common, 246 "dma_mapping_error() on RX init\n"); 247 error = -ENOMEM; 248 goto rx_init_fail; 249 } 250 251 list_add_tail(&bf->list, &sc->rx.rxbuf); 252 } 253 254 return 0; 255 256rx_init_fail: 257 ath_rx_edma_cleanup(sc); 258 return error; 259} 260 261static void ath_edma_start_recv(struct ath_softc *sc) 262{ 263 spin_lock_bh(&sc->rx.rxbuflock); 264 265 ath9k_hw_rxena(sc->sc_ah); 266 267 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 268 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 269 270 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 271 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 272 273 ath_opmode_init(sc); 274 275 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 276 277 spin_unlock_bh(&sc->rx.rxbuflock); 278} 279 280static void ath_edma_stop_recv(struct ath_softc *sc) 281{ 282 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 283 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 284} 285 286int ath_rx_init(struct ath_softc *sc, int nbufs) 287{ 288 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 289 struct sk_buff *skb; 290 struct ath_buf *bf; 291 int error = 0; 292 293 spin_lock_init(&sc->sc_pcu_lock); 294 sc->sc_flags &= ~SC_OP_RXFLUSH; 295 spin_lock_init(&sc->rx.rxbuflock); 296 297 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 298 sc->sc_ah->caps.rx_status_len; 299 300 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 301 return ath_rx_edma_init(sc, nbufs); 302 } else { 303 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 304 common->cachelsz, common->rx_bufsize); 305 306 /* Initialize rx descriptors */ 307 308 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 309 "rx", nbufs, 1, 0); 310 if (error != 0) { 311 ath_err(common, 312 "failed to allocate rx descriptors: %d\n", 313 error); 314 goto err; 315 } 316 317 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 318 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 319 GFP_KERNEL); 320 if (skb == NULL) { 321 error = -ENOMEM; 322 goto err; 323 } 324 325 bf->bf_mpdu = skb; 326 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 327 common->rx_bufsize, 328 DMA_FROM_DEVICE); 329 if (unlikely(dma_mapping_error(sc->dev, 330 bf->bf_buf_addr))) { 331 dev_kfree_skb_any(skb); 332 bf->bf_mpdu = NULL; 333 bf->bf_buf_addr = 0; 334 ath_err(common, 335 "dma_mapping_error() on RX init\n"); 336 error = -ENOMEM; 337 goto err; 338 } 339 } 340 sc->rx.rxlink = NULL; 341 } 342 343err: 344 if (error) 345 ath_rx_cleanup(sc); 346 347 return error; 348} 349 350void ath_rx_cleanup(struct ath_softc *sc) 351{ 352 struct ath_hw *ah = sc->sc_ah; 353 struct ath_common *common = ath9k_hw_common(ah); 354 struct sk_buff *skb; 355 struct ath_buf *bf; 356 357 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 358 ath_rx_edma_cleanup(sc); 359 return; 360 } else { 361 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 362 skb = bf->bf_mpdu; 363 if (skb) { 364 dma_unmap_single(sc->dev, bf->bf_buf_addr, 365 common->rx_bufsize, 366 DMA_FROM_DEVICE); 367 dev_kfree_skb(skb); 368 bf->bf_buf_addr = 0; 369 bf->bf_mpdu = NULL; 370 } 371 } 372 373 if (sc->rx.rxdma.dd_desc_len != 0) 374 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 375 } 376} 377 378/* 379 * Calculate the receive filter according to the 380 * operating mode and state: 381 * 382 * o always accept unicast, broadcast, and multicast traffic 383 * o maintain current state of phy error reception (the hal 384 * may enable phy error frames for noise immunity work) 385 * o probe request frames are accepted only when operating in 386 * hostap, adhoc, or monitor modes 387 * o enable promiscuous mode according to the interface state 388 * o accept beacons: 389 * - when operating in adhoc mode so the 802.11 layer creates 390 * node table entries for peers, 391 * - when operating in station mode for collecting rssi data when 392 * the station is otherwise quiet, or 393 * - when operating as a repeater so we see repeater-sta beacons 394 * - when scanning 395 */ 396 397u32 ath_calcrxfilter(struct ath_softc *sc) 398{ 399#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 400 401 u32 rfilt; 402 403 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 404 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 405 | ATH9K_RX_FILTER_MCAST; 406 407 if (sc->rx.rxfilter & FIF_PROBE_REQ) 408 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 409 410 /* 411 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 412 * mode interface or when in monitor mode. AP mode does not need this 413 * since it receives all in-BSS frames anyway. 414 */ 415 if (sc->sc_ah->is_monitoring) 416 rfilt |= ATH9K_RX_FILTER_PROM; 417 418 if (sc->rx.rxfilter & FIF_CONTROL) 419 rfilt |= ATH9K_RX_FILTER_CONTROL; 420 421 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 422 (sc->nvifs <= 1) && 423 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 424 rfilt |= ATH9K_RX_FILTER_MYBEACON; 425 else 426 rfilt |= ATH9K_RX_FILTER_BEACON; 427 428 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 429 (sc->rx.rxfilter & FIF_PSPOLL)) 430 rfilt |= ATH9K_RX_FILTER_PSPOLL; 431 432 if (conf_is_ht(&sc->hw->conf)) 433 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 434 435 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 436 /* The following may also be needed for other older chips */ 437 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 438 rfilt |= ATH9K_RX_FILTER_PROM; 439 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 440 } 441 442 return rfilt; 443 444#undef RX_FILTER_PRESERVE 445} 446 447int ath_startrecv(struct ath_softc *sc) 448{ 449 struct ath_hw *ah = sc->sc_ah; 450 struct ath_buf *bf, *tbf; 451 452 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 453 ath_edma_start_recv(sc); 454 return 0; 455 } 456 457 spin_lock_bh(&sc->rx.rxbuflock); 458 if (list_empty(&sc->rx.rxbuf)) 459 goto start_recv; 460 461 sc->rx.rxlink = NULL; 462 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 463 ath_rx_buf_link(sc, bf); 464 } 465 466 /* We could have deleted elements so the list may be empty now */ 467 if (list_empty(&sc->rx.rxbuf)) 468 goto start_recv; 469 470 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 471 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 472 ath9k_hw_rxena(ah); 473 474start_recv: 475 ath_opmode_init(sc); 476 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 477 478 spin_unlock_bh(&sc->rx.rxbuflock); 479 480 return 0; 481} 482 483bool ath_stoprecv(struct ath_softc *sc) 484{ 485 struct ath_hw *ah = sc->sc_ah; 486 bool stopped, reset = false; 487 488 spin_lock_bh(&sc->rx.rxbuflock); 489 ath9k_hw_abortpcurecv(ah); 490 ath9k_hw_setrxfilter(ah, 0); 491 stopped = ath9k_hw_stopdmarecv(ah, &reset); 492 493 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 494 ath_edma_stop_recv(sc); 495 else 496 sc->rx.rxlink = NULL; 497 spin_unlock_bh(&sc->rx.rxbuflock); 498 499 if (!(ah->ah_flags & AH_UNPLUGGED) && 500 unlikely(!stopped)) { 501 ath_err(ath9k_hw_common(sc->sc_ah), 502 "Could not stop RX, we could be " 503 "confusing the DMA engine when we start RX up\n"); 504 ATH_DBG_WARN_ON_ONCE(!stopped); 505 } 506 return stopped && !reset; 507} 508 509void ath_flushrecv(struct ath_softc *sc) 510{ 511 sc->sc_flags |= SC_OP_RXFLUSH; 512 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 513 ath_rx_tasklet(sc, 1, true); 514 ath_rx_tasklet(sc, 1, false); 515 sc->sc_flags &= ~SC_OP_RXFLUSH; 516} 517 518static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 519{ 520 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 521 struct ieee80211_mgmt *mgmt; 522 u8 *pos, *end, id, elen; 523 struct ieee80211_tim_ie *tim; 524 525 mgmt = (struct ieee80211_mgmt *)skb->data; 526 pos = mgmt->u.beacon.variable; 527 end = skb->data + skb->len; 528 529 while (pos + 2 < end) { 530 id = *pos++; 531 elen = *pos++; 532 if (pos + elen > end) 533 break; 534 535 if (id == WLAN_EID_TIM) { 536 if (elen < sizeof(*tim)) 537 break; 538 tim = (struct ieee80211_tim_ie *) pos; 539 if (tim->dtim_count != 0) 540 break; 541 return tim->bitmap_ctrl & 0x01; 542 } 543 544 pos += elen; 545 } 546 547 return false; 548} 549 550static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 551{ 552 struct ieee80211_mgmt *mgmt; 553 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 554 555 if (skb->len < 24 + 8 + 2 + 2) 556 return; 557 558 mgmt = (struct ieee80211_mgmt *)skb->data; 559 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) { 560 /* TODO: This doesn't work well if you have stations 561 * associated to two different APs because curbssid 562 * is just the last AP that any of the stations associated 563 * with. 564 */ 565 return; /* not from our current AP */ 566 } 567 568 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 569 570 if (sc->ps_flags & PS_BEACON_SYNC) { 571 sc->ps_flags &= ~PS_BEACON_SYNC; 572 ath_dbg(common, ATH_DBG_PS, 573 "Reconfigure Beacon timers based on timestamp from the AP\n"); 574 ath_set_beacon(sc); 575 sc->ps_flags &= ~PS_TSFOOR_SYNC; 576 } 577 578 if (ath_beacon_dtim_pending_cab(skb)) { 579 /* 580 * Remain awake waiting for buffered broadcast/multicast 581 * frames. If the last broadcast/multicast frame is not 582 * received properly, the next beacon frame will work as 583 * a backup trigger for returning into NETWORK SLEEP state, 584 * so we are waiting for it as well. 585 */ 586 ath_dbg(common, ATH_DBG_PS, 587 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 588 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 589 return; 590 } 591 592 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 593 /* 594 * This can happen if a broadcast frame is dropped or the AP 595 * fails to send a frame indicating that all CAB frames have 596 * been delivered. 597 */ 598 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 599 ath_dbg(common, ATH_DBG_PS, 600 "PS wait for CAB frames timed out\n"); 601 } 602} 603 604static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 605{ 606 struct ieee80211_hdr *hdr; 607 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 608 609 hdr = (struct ieee80211_hdr *)skb->data; 610 611 /* Process Beacon and CAB receive in PS state */ 612 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 613 && ieee80211_is_beacon(hdr->frame_control)) 614 ath_rx_ps_beacon(sc, skb); 615 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 616 (ieee80211_is_data(hdr->frame_control) || 617 ieee80211_is_action(hdr->frame_control)) && 618 is_multicast_ether_addr(hdr->addr1) && 619 !ieee80211_has_moredata(hdr->frame_control)) { 620 /* 621 * No more broadcast/multicast frames to be received at this 622 * point. 623 */ 624 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 625 ath_dbg(common, ATH_DBG_PS, 626 "All PS CAB frames received, back to sleep\n"); 627 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 628 !is_multicast_ether_addr(hdr->addr1) && 629 !ieee80211_has_morefrags(hdr->frame_control)) { 630 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 631 ath_dbg(common, ATH_DBG_PS, 632 "Going back to sleep after having received PS-Poll data (0x%lx)\n", 633 sc->ps_flags & (PS_WAIT_FOR_BEACON | 634 PS_WAIT_FOR_CAB | 635 PS_WAIT_FOR_PSPOLL_DATA | 636 PS_WAIT_FOR_TX_ACK)); 637 } 638} 639 640static bool ath_edma_get_buffers(struct ath_softc *sc, 641 enum ath9k_rx_qtype qtype) 642{ 643 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 644 struct ath_hw *ah = sc->sc_ah; 645 struct ath_common *common = ath9k_hw_common(ah); 646 struct sk_buff *skb; 647 struct ath_buf *bf; 648 int ret; 649 650 skb = skb_peek(&rx_edma->rx_fifo); 651 if (!skb) 652 return false; 653 654 bf = SKB_CB_ATHBUF(skb); 655 BUG_ON(!bf); 656 657 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 658 common->rx_bufsize, DMA_FROM_DEVICE); 659 660 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 661 if (ret == -EINPROGRESS) { 662 /*let device gain the buffer again*/ 663 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 664 common->rx_bufsize, DMA_FROM_DEVICE); 665 return false; 666 } 667 668 __skb_unlink(skb, &rx_edma->rx_fifo); 669 if (ret == -EINVAL) { 670 /* corrupt descriptor, skip this one and the following one */ 671 list_add_tail(&bf->list, &sc->rx.rxbuf); 672 ath_rx_edma_buf_link(sc, qtype); 673 skb = skb_peek(&rx_edma->rx_fifo); 674 if (!skb) 675 return true; 676 677 bf = SKB_CB_ATHBUF(skb); 678 BUG_ON(!bf); 679 680 __skb_unlink(skb, &rx_edma->rx_fifo); 681 list_add_tail(&bf->list, &sc->rx.rxbuf); 682 ath_rx_edma_buf_link(sc, qtype); 683 return true; 684 } 685 skb_queue_tail(&rx_edma->rx_buffers, skb); 686 687 return true; 688} 689 690static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 691 struct ath_rx_status *rs, 692 enum ath9k_rx_qtype qtype) 693{ 694 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 695 struct sk_buff *skb; 696 struct ath_buf *bf; 697 698 while (ath_edma_get_buffers(sc, qtype)); 699 skb = __skb_dequeue(&rx_edma->rx_buffers); 700 if (!skb) 701 return NULL; 702 703 bf = SKB_CB_ATHBUF(skb); 704 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); 705 return bf; 706} 707 708static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 709 struct ath_rx_status *rs) 710{ 711 struct ath_hw *ah = sc->sc_ah; 712 struct ath_common *common = ath9k_hw_common(ah); 713 struct ath_desc *ds; 714 struct ath_buf *bf; 715 int ret; 716 717 if (list_empty(&sc->rx.rxbuf)) { 718 sc->rx.rxlink = NULL; 719 return NULL; 720 } 721 722 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 723 ds = bf->bf_desc; 724 725 /* 726 * Must provide the virtual address of the current 727 * descriptor, the physical address, and the virtual 728 * address of the next descriptor in the h/w chain. 729 * This allows the HAL to look ahead to see if the 730 * hardware is done with a descriptor by checking the 731 * done bit in the following descriptor and the address 732 * of the current descriptor the DMA engine is working 733 * on. All this is necessary because of our use of 734 * a self-linked list to avoid rx overruns. 735 */ 736 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); 737 if (ret == -EINPROGRESS) { 738 struct ath_rx_status trs; 739 struct ath_buf *tbf; 740 struct ath_desc *tds; 741 742 memset(&trs, 0, sizeof(trs)); 743 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 744 sc->rx.rxlink = NULL; 745 return NULL; 746 } 747 748 tbf = list_entry(bf->list.next, struct ath_buf, list); 749 750 /* 751 * On some hardware the descriptor status words could 752 * get corrupted, including the done bit. Because of 753 * this, check if the next descriptor's done bit is 754 * set or not. 755 * 756 * If the next descriptor's done bit is set, the current 757 * descriptor has been corrupted. Force s/w to discard 758 * this descriptor and continue... 759 */ 760 761 tds = tbf->bf_desc; 762 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); 763 if (ret == -EINPROGRESS) 764 return NULL; 765 } 766 767 if (!bf->bf_mpdu) 768 return bf; 769 770 /* 771 * Synchronize the DMA transfer with CPU before 772 * 1. accessing the frame 773 * 2. requeueing the same buffer to h/w 774 */ 775 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 776 common->rx_bufsize, 777 DMA_FROM_DEVICE); 778 779 return bf; 780} 781 782/* Assumes you've already done the endian to CPU conversion */ 783static bool ath9k_rx_accept(struct ath_common *common, 784 struct ieee80211_hdr *hdr, 785 struct ieee80211_rx_status *rxs, 786 struct ath_rx_status *rx_stats, 787 bool *decrypt_error) 788{ 789#define is_mc_or_valid_tkip_keyix ((is_mc || \ 790 (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \ 791 test_bit(rx_stats->rs_keyix, common->tkip_keymap)))) 792 793 struct ath_hw *ah = common->ah; 794 __le16 fc; 795 u8 rx_status_len = ah->caps.rx_status_len; 796 797 fc = hdr->frame_control; 798 799 if (!rx_stats->rs_datalen) 800 return false; 801 /* 802 * rs_status follows rs_datalen so if rs_datalen is too large 803 * we can take a hint that hardware corrupted it, so ignore 804 * those frames. 805 */ 806 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 807 return false; 808 809 /* Only use error bits from the last fragment */ 810 if (rx_stats->rs_more) 811 return true; 812 813 /* 814 * The rx_stats->rs_status will not be set until the end of the 815 * chained descriptors so it can be ignored if rs_more is set. The 816 * rs_more will be false at the last element of the chained 817 * descriptors. 818 */ 819 if (rx_stats->rs_status != 0) { 820 if (rx_stats->rs_status & ATH9K_RXERR_CRC) 821 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 822 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 823 return false; 824 825 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 826 *decrypt_error = true; 827 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 828 bool is_mc; 829 /* 830 * The MIC error bit is only valid if the frame 831 * is not a control frame or fragment, and it was 832 * decrypted using a valid TKIP key. 833 */ 834 is_mc = !!is_multicast_ether_addr(hdr->addr1); 835 836 if (!ieee80211_is_ctl(fc) && 837 !ieee80211_has_morefrags(fc) && 838 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 839 is_mc_or_valid_tkip_keyix) 840 rxs->flag |= RX_FLAG_MMIC_ERROR; 841 else 842 rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 843 } 844 /* 845 * Reject error frames with the exception of 846 * decryption and MIC failures. For monitor mode, 847 * we also ignore the CRC error. 848 */ 849 if (ah->is_monitoring) { 850 if (rx_stats->rs_status & 851 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 852 ATH9K_RXERR_CRC)) 853 return false; 854 } else { 855 if (rx_stats->rs_status & 856 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 857 return false; 858 } 859 } 860 } 861 return true; 862} 863 864static int ath9k_process_rate(struct ath_common *common, 865 struct ieee80211_hw *hw, 866 struct ath_rx_status *rx_stats, 867 struct ieee80211_rx_status *rxs) 868{ 869 struct ieee80211_supported_band *sband; 870 enum ieee80211_band band; 871 unsigned int i = 0; 872 873 band = hw->conf.channel->band; 874 sband = hw->wiphy->bands[band]; 875 876 if (rx_stats->rs_rate & 0x80) { 877 /* HT rate */ 878 rxs->flag |= RX_FLAG_HT; 879 if (rx_stats->rs_flags & ATH9K_RX_2040) 880 rxs->flag |= RX_FLAG_40MHZ; 881 if (rx_stats->rs_flags & ATH9K_RX_GI) 882 rxs->flag |= RX_FLAG_SHORT_GI; 883 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 884 return 0; 885 } 886 887 for (i = 0; i < sband->n_bitrates; i++) { 888 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 889 rxs->rate_idx = i; 890 return 0; 891 } 892 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 893 rxs->flag |= RX_FLAG_SHORTPRE; 894 rxs->rate_idx = i; 895 return 0; 896 } 897 } 898 899 /* 900 * No valid hardware bitrate found -- we should not get here 901 * because hardware has already validated this frame as OK. 902 */ 903 ath_dbg(common, ATH_DBG_XMIT, 904 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 905 rx_stats->rs_rate); 906 907 return -EINVAL; 908} 909 910static void ath9k_process_rssi(struct ath_common *common, 911 struct ieee80211_hw *hw, 912 struct ieee80211_hdr *hdr, 913 struct ath_rx_status *rx_stats) 914{ 915 struct ath_softc *sc = hw->priv; 916 struct ath_hw *ah = common->ah; 917 int last_rssi; 918 __le16 fc; 919 920 if ((ah->opmode != NL80211_IFTYPE_STATION) && 921 (ah->opmode != NL80211_IFTYPE_ADHOC)) 922 return; 923 924 fc = hdr->frame_control; 925 if (!ieee80211_is_beacon(fc) || 926 compare_ether_addr(hdr->addr3, common->curbssid)) { 927 /* TODO: This doesn't work well if you have stations 928 * associated to two different APs because curbssid 929 * is just the last AP that any of the stations associated 930 * with. 931 */ 932 return; 933 } 934 935 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 936 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 937 938 last_rssi = sc->last_rssi; 939 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 940 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 941 ATH_RSSI_EP_MULTIPLIER); 942 if (rx_stats->rs_rssi < 0) 943 rx_stats->rs_rssi = 0; 944 945 /* Update Beacon RSSI, this is used by ANI. */ 946 ah->stats.avgbrssi = rx_stats->rs_rssi; 947} 948 949/* 950 * For Decrypt or Demic errors, we only mark packet status here and always push 951 * up the frame up to let mac80211 handle the actual error case, be it no 952 * decryption key or real decryption error. This let us keep statistics there. 953 */ 954static int ath9k_rx_skb_preprocess(struct ath_common *common, 955 struct ieee80211_hw *hw, 956 struct ieee80211_hdr *hdr, 957 struct ath_rx_status *rx_stats, 958 struct ieee80211_rx_status *rx_status, 959 bool *decrypt_error) 960{ 961 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 962 963 /* 964 * everything but the rate is checked here, the rate check is done 965 * separately to avoid doing two lookups for a rate for each frame. 966 */ 967 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 968 return -EINVAL; 969 970 /* Only use status info from the last fragment */ 971 if (rx_stats->rs_more) 972 return 0; 973 974 ath9k_process_rssi(common, hw, hdr, rx_stats); 975 976 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 977 return -EINVAL; 978 979 rx_status->band = hw->conf.channel->band; 980 rx_status->freq = hw->conf.channel->center_freq; 981 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 982 rx_status->antenna = rx_stats->rs_antenna; 983 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 984 985 return 0; 986} 987 988static void ath9k_rx_skb_postprocess(struct ath_common *common, 989 struct sk_buff *skb, 990 struct ath_rx_status *rx_stats, 991 struct ieee80211_rx_status *rxs, 992 bool decrypt_error) 993{ 994 struct ath_hw *ah = common->ah; 995 struct ieee80211_hdr *hdr; 996 int hdrlen, padpos, padsize; 997 u8 keyix; 998 __le16 fc; 999 1000 /* see if any padding is done by the hw and remove it */ 1001 hdr = (struct ieee80211_hdr *) skb->data; 1002 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1003 fc = hdr->frame_control; 1004 padpos = ath9k_cmn_padpos(hdr->frame_control); 1005 1006 /* The MAC header is padded to have 32-bit boundary if the 1007 * packet payload is non-zero. The general calculation for 1008 * padsize would take into account odd header lengths: 1009 * padsize = (4 - padpos % 4) % 4; However, since only 1010 * even-length headers are used, padding can only be 0 or 2 1011 * bytes and we can optimize this a bit. In addition, we must 1012 * not try to remove padding from short control frames that do 1013 * not have payload. */ 1014 padsize = padpos & 3; 1015 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1016 memmove(skb->data + padsize, skb->data, padpos); 1017 skb_pull(skb, padsize); 1018 } 1019 1020 keyix = rx_stats->rs_keyix; 1021 1022 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1023 ieee80211_has_protected(fc)) { 1024 rxs->flag |= RX_FLAG_DECRYPTED; 1025 } else if (ieee80211_has_protected(fc) 1026 && !decrypt_error && skb->len >= hdrlen + 4) { 1027 keyix = skb->data[hdrlen + 3] >> 6; 1028 1029 if (test_bit(keyix, common->keymap)) 1030 rxs->flag |= RX_FLAG_DECRYPTED; 1031 } 1032 if (ah->sw_mgmt_crypto && 1033 (rxs->flag & RX_FLAG_DECRYPTED) && 1034 ieee80211_is_mgmt(fc)) 1035 /* Use software decrypt for management frames. */ 1036 rxs->flag &= ~RX_FLAG_DECRYPTED; 1037} 1038 1039static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, 1040 struct ath_hw_antcomb_conf ant_conf, 1041 int main_rssi_avg) 1042{ 1043 antcomb->quick_scan_cnt = 0; 1044 1045 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) 1046 antcomb->rssi_lna2 = main_rssi_avg; 1047 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) 1048 antcomb->rssi_lna1 = main_rssi_avg; 1049 1050 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { 1051 case (0x10): /* LNA2 A-B */ 1052 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1053 antcomb->first_quick_scan_conf = 1054 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1055 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1056 break; 1057 case (0x20): /* LNA1 A-B */ 1058 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1059 antcomb->first_quick_scan_conf = 1060 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1061 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1062 break; 1063 case (0x21): /* LNA1 LNA2 */ 1064 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; 1065 antcomb->first_quick_scan_conf = 1066 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1067 antcomb->second_quick_scan_conf = 1068 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1069 break; 1070 case (0x12): /* LNA2 LNA1 */ 1071 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; 1072 antcomb->first_quick_scan_conf = 1073 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1074 antcomb->second_quick_scan_conf = 1075 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1076 break; 1077 case (0x13): /* LNA2 A+B */ 1078 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1079 antcomb->first_quick_scan_conf = 1080 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1081 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1082 break; 1083 case (0x23): /* LNA1 A+B */ 1084 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1085 antcomb->first_quick_scan_conf = 1086 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1087 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1088 break; 1089 default: 1090 break; 1091 } 1092} 1093 1094static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 1095 struct ath_hw_antcomb_conf *div_ant_conf, 1096 int main_rssi_avg, int alt_rssi_avg, 1097 int alt_ratio) 1098{ 1099 /* alt_good */ 1100 switch (antcomb->quick_scan_cnt) { 1101 case 0: 1102 /* set alt to main, and alt to first conf */ 1103 div_ant_conf->main_lna_conf = antcomb->main_conf; 1104 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; 1105 break; 1106 case 1: 1107 /* set alt to main, and alt to first conf */ 1108 div_ant_conf->main_lna_conf = antcomb->main_conf; 1109 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; 1110 antcomb->rssi_first = main_rssi_avg; 1111 antcomb->rssi_second = alt_rssi_avg; 1112 1113 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1114 /* main is LNA1 */ 1115 if (ath_is_alt_ant_ratio_better(alt_ratio, 1116 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1117 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1118 main_rssi_avg, alt_rssi_avg, 1119 antcomb->total_pkt_count)) 1120 antcomb->first_ratio = true; 1121 else 1122 antcomb->first_ratio = false; 1123 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1124 if (ath_is_alt_ant_ratio_better(alt_ratio, 1125 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1126 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1127 main_rssi_avg, alt_rssi_avg, 1128 antcomb->total_pkt_count)) 1129 antcomb->first_ratio = true; 1130 else 1131 antcomb->first_ratio = false; 1132 } else { 1133 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1134 (alt_rssi_avg > main_rssi_avg + 1135 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1136 (alt_rssi_avg > main_rssi_avg)) && 1137 (antcomb->total_pkt_count > 50)) 1138 antcomb->first_ratio = true; 1139 else 1140 antcomb->first_ratio = false; 1141 } 1142 break; 1143 case 2: 1144 antcomb->alt_good = false; 1145 antcomb->scan_not_start = false; 1146 antcomb->scan = false; 1147 antcomb->rssi_first = main_rssi_avg; 1148 antcomb->rssi_third = alt_rssi_avg; 1149 1150 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 1151 antcomb->rssi_lna1 = alt_rssi_avg; 1152 else if (antcomb->second_quick_scan_conf == 1153 ATH_ANT_DIV_COMB_LNA2) 1154 antcomb->rssi_lna2 = alt_rssi_avg; 1155 else if (antcomb->second_quick_scan_conf == 1156 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 1157 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 1158 antcomb->rssi_lna2 = main_rssi_avg; 1159 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 1160 antcomb->rssi_lna1 = main_rssi_avg; 1161 } 1162 1163 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 1164 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 1165 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1166 else 1167 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 1168 1169 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1170 if (ath_is_alt_ant_ratio_better(alt_ratio, 1171 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1172 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1173 main_rssi_avg, alt_rssi_avg, 1174 antcomb->total_pkt_count)) 1175 antcomb->second_ratio = true; 1176 else 1177 antcomb->second_ratio = false; 1178 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1179 if (ath_is_alt_ant_ratio_better(alt_ratio, 1180 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1181 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1182 main_rssi_avg, alt_rssi_avg, 1183 antcomb->total_pkt_count)) 1184 antcomb->second_ratio = true; 1185 else 1186 antcomb->second_ratio = false; 1187 } else { 1188 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1189 (alt_rssi_avg > main_rssi_avg + 1190 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1191 (alt_rssi_avg > main_rssi_avg)) && 1192 (antcomb->total_pkt_count > 50)) 1193 antcomb->second_ratio = true; 1194 else 1195 antcomb->second_ratio = false; 1196 } 1197 1198 /* set alt to the conf with maximun ratio */ 1199 if (antcomb->first_ratio && antcomb->second_ratio) { 1200 if (antcomb->rssi_second > antcomb->rssi_third) { 1201 /* first alt*/ 1202 if ((antcomb->first_quick_scan_conf == 1203 ATH_ANT_DIV_COMB_LNA1) || 1204 (antcomb->first_quick_scan_conf == 1205 ATH_ANT_DIV_COMB_LNA2)) 1206 /* Set alt LNA1 or LNA2*/ 1207 if (div_ant_conf->main_lna_conf == 1208 ATH_ANT_DIV_COMB_LNA2) 1209 div_ant_conf->alt_lna_conf = 1210 ATH_ANT_DIV_COMB_LNA1; 1211 else 1212 div_ant_conf->alt_lna_conf = 1213 ATH_ANT_DIV_COMB_LNA2; 1214 else 1215 /* Set alt to A+B or A-B */ 1216 div_ant_conf->alt_lna_conf = 1217 antcomb->first_quick_scan_conf; 1218 } else if ((antcomb->second_quick_scan_conf == 1219 ATH_ANT_DIV_COMB_LNA1) || 1220 (antcomb->second_quick_scan_conf == 1221 ATH_ANT_DIV_COMB_LNA2)) { 1222 /* Set alt LNA1 or LNA2 */ 1223 if (div_ant_conf->main_lna_conf == 1224 ATH_ANT_DIV_COMB_LNA2) 1225 div_ant_conf->alt_lna_conf = 1226 ATH_ANT_DIV_COMB_LNA1; 1227 else 1228 div_ant_conf->alt_lna_conf = 1229 ATH_ANT_DIV_COMB_LNA2; 1230 } else { 1231 /* Set alt to A+B or A-B */ 1232 div_ant_conf->alt_lna_conf = 1233 antcomb->second_quick_scan_conf; 1234 } 1235 } else if (antcomb->first_ratio) { 1236 /* first alt */ 1237 if ((antcomb->first_quick_scan_conf == 1238 ATH_ANT_DIV_COMB_LNA1) || 1239 (antcomb->first_quick_scan_conf == 1240 ATH_ANT_DIV_COMB_LNA2)) 1241 /* Set alt LNA1 or LNA2 */ 1242 if (div_ant_conf->main_lna_conf == 1243 ATH_ANT_DIV_COMB_LNA2) 1244 div_ant_conf->alt_lna_conf = 1245 ATH_ANT_DIV_COMB_LNA1; 1246 else 1247 div_ant_conf->alt_lna_conf = 1248 ATH_ANT_DIV_COMB_LNA2; 1249 else 1250 /* Set alt to A+B or A-B */ 1251 div_ant_conf->alt_lna_conf = 1252 antcomb->first_quick_scan_conf; 1253 } else if (antcomb->second_ratio) { 1254 /* second alt */ 1255 if ((antcomb->second_quick_scan_conf == 1256 ATH_ANT_DIV_COMB_LNA1) || 1257 (antcomb->second_quick_scan_conf == 1258 ATH_ANT_DIV_COMB_LNA2)) 1259 /* Set alt LNA1 or LNA2 */ 1260 if (div_ant_conf->main_lna_conf == 1261 ATH_ANT_DIV_COMB_LNA2) 1262 div_ant_conf->alt_lna_conf = 1263 ATH_ANT_DIV_COMB_LNA1; 1264 else 1265 div_ant_conf->alt_lna_conf = 1266 ATH_ANT_DIV_COMB_LNA2; 1267 else 1268 /* Set alt to A+B or A-B */ 1269 div_ant_conf->alt_lna_conf = 1270 antcomb->second_quick_scan_conf; 1271 } else { 1272 /* main is largest */ 1273 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || 1274 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) 1275 /* Set alt LNA1 or LNA2 */ 1276 if (div_ant_conf->main_lna_conf == 1277 ATH_ANT_DIV_COMB_LNA2) 1278 div_ant_conf->alt_lna_conf = 1279 ATH_ANT_DIV_COMB_LNA1; 1280 else 1281 div_ant_conf->alt_lna_conf = 1282 ATH_ANT_DIV_COMB_LNA2; 1283 else 1284 /* Set alt to A+B or A-B */ 1285 div_ant_conf->alt_lna_conf = antcomb->main_conf; 1286 } 1287 break; 1288 default: 1289 break; 1290 } 1291} 1292 1293static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf) 1294{ 1295 /* Adjust the fast_div_bias based on main and alt lna conf */ 1296 switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) { 1297 case (0x01): /* A-B LNA2 */ 1298 ant_conf->fast_div_bias = 0x3b; 1299 break; 1300 case (0x02): /* A-B LNA1 */ 1301 ant_conf->fast_div_bias = 0x3d; 1302 break; 1303 case (0x03): /* A-B A+B */ 1304 ant_conf->fast_div_bias = 0x1; 1305 break; 1306 case (0x10): /* LNA2 A-B */ 1307 ant_conf->fast_div_bias = 0x7; 1308 break; 1309 case (0x12): /* LNA2 LNA1 */ 1310 ant_conf->fast_div_bias = 0x2; 1311 break; 1312 case (0x13): /* LNA2 A+B */ 1313 ant_conf->fast_div_bias = 0x7; 1314 break; 1315 case (0x20): /* LNA1 A-B */ 1316 ant_conf->fast_div_bias = 0x6; 1317 break; 1318 case (0x21): /* LNA1 LNA2 */ 1319 ant_conf->fast_div_bias = 0x0; 1320 break; 1321 case (0x23): /* LNA1 A+B */ 1322 ant_conf->fast_div_bias = 0x6; 1323 break; 1324 case (0x30): /* A+B A-B */ 1325 ant_conf->fast_div_bias = 0x1; 1326 break; 1327 case (0x31): /* A+B LNA2 */ 1328 ant_conf->fast_div_bias = 0x3b; 1329 break; 1330 case (0x32): /* A+B LNA1 */ 1331 ant_conf->fast_div_bias = 0x3d; 1332 break; 1333 default: 1334 break; 1335 } 1336} 1337 1338/* Antenna diversity and combining */ 1339static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 1340{ 1341 struct ath_hw_antcomb_conf div_ant_conf; 1342 struct ath_ant_comb *antcomb = &sc->ant_comb; 1343 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1344 int curr_main_set; 1345 int main_rssi = rs->rs_rssi_ctl0; 1346 int alt_rssi = rs->rs_rssi_ctl1; 1347 int rx_ant_conf, main_ant_conf; 1348 bool short_scan = false; 1349 1350 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 1351 ATH_ANT_RX_MASK; 1352 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1353 ATH_ANT_RX_MASK; 1354 1355 /* Record packet only when alt_rssi is positive */ 1356 if (alt_rssi > 0) { 1357 antcomb->total_pkt_count++; 1358 antcomb->main_total_rssi += main_rssi; 1359 antcomb->alt_total_rssi += alt_rssi; 1360 if (main_ant_conf == rx_ant_conf) 1361 antcomb->main_recv_cnt++; 1362 else 1363 antcomb->alt_recv_cnt++; 1364 } 1365 1366 /* Short scan check */ 1367 if (antcomb->scan && antcomb->alt_good) { 1368 if (time_after(jiffies, antcomb->scan_start_time + 1369 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 1370 short_scan = true; 1371 else 1372 if (antcomb->total_pkt_count == 1373 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { 1374 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1375 antcomb->total_pkt_count); 1376 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 1377 short_scan = true; 1378 } 1379 } 1380 1381 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 1382 rs->rs_moreaggr) && !short_scan) 1383 return; 1384 1385 if (antcomb->total_pkt_count) { 1386 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1387 antcomb->total_pkt_count); 1388 main_rssi_avg = (antcomb->main_total_rssi / 1389 antcomb->total_pkt_count); 1390 alt_rssi_avg = (antcomb->alt_total_rssi / 1391 antcomb->total_pkt_count); 1392 } 1393 1394 1395 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1396 curr_alt_set = div_ant_conf.alt_lna_conf; 1397 curr_main_set = div_ant_conf.main_lna_conf; 1398 1399 antcomb->count++; 1400 1401 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 1402 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1403 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 1404 main_rssi_avg); 1405 antcomb->alt_good = true; 1406 } else { 1407 antcomb->alt_good = false; 1408 } 1409 1410 antcomb->count = 0; 1411 antcomb->scan = true; 1412 antcomb->scan_not_start = true; 1413 } 1414 1415 if (!antcomb->scan) { 1416 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1417 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1418 /* Switch main and alt LNA */ 1419 div_ant_conf.main_lna_conf = 1420 ATH_ANT_DIV_COMB_LNA2; 1421 div_ant_conf.alt_lna_conf = 1422 ATH_ANT_DIV_COMB_LNA1; 1423 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { 1424 div_ant_conf.main_lna_conf = 1425 ATH_ANT_DIV_COMB_LNA1; 1426 div_ant_conf.alt_lna_conf = 1427 ATH_ANT_DIV_COMB_LNA2; 1428 } 1429 1430 goto div_comb_done; 1431 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && 1432 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { 1433 /* Set alt to another LNA */ 1434 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) 1435 div_ant_conf.alt_lna_conf = 1436 ATH_ANT_DIV_COMB_LNA1; 1437 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) 1438 div_ant_conf.alt_lna_conf = 1439 ATH_ANT_DIV_COMB_LNA2; 1440 1441 goto div_comb_done; 1442 } 1443 1444 if ((alt_rssi_avg < (main_rssi_avg + 1445 div_ant_conf.lna1_lna2_delta))) 1446 goto div_comb_done; 1447 } 1448 1449 if (!antcomb->scan_not_start) { 1450 switch (curr_alt_set) { 1451 case ATH_ANT_DIV_COMB_LNA2: 1452 antcomb->rssi_lna2 = alt_rssi_avg; 1453 antcomb->rssi_lna1 = main_rssi_avg; 1454 antcomb->scan = true; 1455 /* set to A+B */ 1456 div_ant_conf.main_lna_conf = 1457 ATH_ANT_DIV_COMB_LNA1; 1458 div_ant_conf.alt_lna_conf = 1459 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1460 break; 1461 case ATH_ANT_DIV_COMB_LNA1: 1462 antcomb->rssi_lna1 = alt_rssi_avg; 1463 antcomb->rssi_lna2 = main_rssi_avg; 1464 antcomb->scan = true; 1465 /* set to A+B */ 1466 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1467 div_ant_conf.alt_lna_conf = 1468 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1469 break; 1470 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: 1471 antcomb->rssi_add = alt_rssi_avg; 1472 antcomb->scan = true; 1473 /* set to A-B */ 1474 div_ant_conf.alt_lna_conf = 1475 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1476 break; 1477 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: 1478 antcomb->rssi_sub = alt_rssi_avg; 1479 antcomb->scan = false; 1480 if (antcomb->rssi_lna2 > 1481 (antcomb->rssi_lna1 + 1482 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 1483 /* use LNA2 as main LNA */ 1484 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 1485 (antcomb->rssi_add > antcomb->rssi_sub)) { 1486 /* set to A+B */ 1487 div_ant_conf.main_lna_conf = 1488 ATH_ANT_DIV_COMB_LNA2; 1489 div_ant_conf.alt_lna_conf = 1490 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1491 } else if (antcomb->rssi_sub > 1492 antcomb->rssi_lna1) { 1493 /* set to A-B */ 1494 div_ant_conf.main_lna_conf = 1495 ATH_ANT_DIV_COMB_LNA2; 1496 div_ant_conf.alt_lna_conf = 1497 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1498 } else { 1499 /* set to LNA1 */ 1500 div_ant_conf.main_lna_conf = 1501 ATH_ANT_DIV_COMB_LNA2; 1502 div_ant_conf.alt_lna_conf = 1503 ATH_ANT_DIV_COMB_LNA1; 1504 } 1505 } else { 1506 /* use LNA1 as main LNA */ 1507 if ((antcomb->rssi_add > antcomb->rssi_lna2) && 1508 (antcomb->rssi_add > antcomb->rssi_sub)) { 1509 /* set to A+B */ 1510 div_ant_conf.main_lna_conf = 1511 ATH_ANT_DIV_COMB_LNA1; 1512 div_ant_conf.alt_lna_conf = 1513 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1514 } else if (antcomb->rssi_sub > 1515 antcomb->rssi_lna1) { 1516 /* set to A-B */ 1517 div_ant_conf.main_lna_conf = 1518 ATH_ANT_DIV_COMB_LNA1; 1519 div_ant_conf.alt_lna_conf = 1520 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1521 } else { 1522 /* set to LNA2 */ 1523 div_ant_conf.main_lna_conf = 1524 ATH_ANT_DIV_COMB_LNA1; 1525 div_ant_conf.alt_lna_conf = 1526 ATH_ANT_DIV_COMB_LNA2; 1527 } 1528 } 1529 break; 1530 default: 1531 break; 1532 } 1533 } else { 1534 if (!antcomb->alt_good) { 1535 antcomb->scan_not_start = false; 1536 /* Set alt to another LNA */ 1537 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 1538 div_ant_conf.main_lna_conf = 1539 ATH_ANT_DIV_COMB_LNA2; 1540 div_ant_conf.alt_lna_conf = 1541 ATH_ANT_DIV_COMB_LNA1; 1542 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 1543 div_ant_conf.main_lna_conf = 1544 ATH_ANT_DIV_COMB_LNA1; 1545 div_ant_conf.alt_lna_conf = 1546 ATH_ANT_DIV_COMB_LNA2; 1547 } 1548 goto div_comb_done; 1549 } 1550 } 1551 1552 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, 1553 main_rssi_avg, alt_rssi_avg, 1554 alt_ratio); 1555 1556 antcomb->quick_scan_cnt++; 1557 1558div_comb_done: 1559 ath_ant_div_conf_fast_divbias(&div_ant_conf); 1560 1561 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1562 1563 antcomb->scan_start_time = jiffies; 1564 antcomb->total_pkt_count = 0; 1565 antcomb->main_total_rssi = 0; 1566 antcomb->alt_total_rssi = 0; 1567 antcomb->main_recv_cnt = 0; 1568 antcomb->alt_recv_cnt = 0; 1569} 1570 1571int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1572{ 1573 struct ath_buf *bf; 1574 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1575 struct ieee80211_rx_status *rxs; 1576 struct ath_hw *ah = sc->sc_ah; 1577 struct ath_common *common = ath9k_hw_common(ah); 1578 /* 1579 * The hw can technically differ from common->hw when using ath9k 1580 * virtual wiphy so to account for that we iterate over the active 1581 * wiphys and find the appropriate wiphy and therefore hw. 1582 */ 1583 struct ieee80211_hw *hw = sc->hw; 1584 struct ieee80211_hdr *hdr; 1585 int retval; 1586 bool decrypt_error = false; 1587 struct ath_rx_status rs; 1588 enum ath9k_rx_qtype qtype; 1589 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1590 int dma_type; 1591 u8 rx_status_len = ah->caps.rx_status_len; 1592 u64 tsf = 0; 1593 u32 tsf_lower = 0; 1594 unsigned long flags; 1595 1596 if (edma) 1597 dma_type = DMA_BIDIRECTIONAL; 1598 else 1599 dma_type = DMA_FROM_DEVICE; 1600 1601 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1602 spin_lock_bh(&sc->rx.rxbuflock); 1603 1604 tsf = ath9k_hw_gettsf64(ah); 1605 tsf_lower = tsf & 0xffffffff; 1606 1607 do { 1608 /* If handling rx interrupt and flush is in progress => exit */ 1609 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1610 break; 1611 1612 memset(&rs, 0, sizeof(rs)); 1613 if (edma) 1614 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1615 else 1616 bf = ath_get_next_rx_buf(sc, &rs); 1617 1618 if (!bf) 1619 break; 1620 1621 skb = bf->bf_mpdu; 1622 if (!skb) 1623 continue; 1624 1625 /* 1626 * Take frame header from the first fragment and RX status from 1627 * the last one. 1628 */ 1629 if (sc->rx.frag) 1630 hdr_skb = sc->rx.frag; 1631 else 1632 hdr_skb = skb; 1633 1634 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1635 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1636 1637 ath_debug_stat_rx(sc, &rs); 1638 1639 /* 1640 * If we're asked to flush receive queue, directly 1641 * chain it back at the queue without processing it. 1642 */ 1643 if (flush) 1644 goto requeue_drop_frag; 1645 1646 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1647 rxs, &decrypt_error); 1648 if (retval) 1649 goto requeue_drop_frag; 1650 1651 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1652 if (rs.rs_tstamp > tsf_lower && 1653 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1654 rxs->mactime -= 0x100000000ULL; 1655 1656 if (rs.rs_tstamp < tsf_lower && 1657 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1658 rxs->mactime += 0x100000000ULL; 1659 1660 /* Ensure we always have an skb to requeue once we are done 1661 * processing the current buffer's skb */ 1662 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1663 1664 /* If there is no memory we ignore the current RX'd frame, 1665 * tell hardware it can give us a new frame using the old 1666 * skb and put it at the tail of the sc->rx.rxbuf list for 1667 * processing. */ 1668 if (!requeue_skb) 1669 goto requeue_drop_frag; 1670 1671 /* Unmap the frame */ 1672 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1673 common->rx_bufsize, 1674 dma_type); 1675 1676 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1677 if (ah->caps.rx_status_len) 1678 skb_pull(skb, ah->caps.rx_status_len); 1679 1680 if (!rs.rs_more) 1681 ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1682 rxs, decrypt_error); 1683 1684 /* We will now give hardware our shiny new allocated skb */ 1685 bf->bf_mpdu = requeue_skb; 1686 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1687 common->rx_bufsize, 1688 dma_type); 1689 if (unlikely(dma_mapping_error(sc->dev, 1690 bf->bf_buf_addr))) { 1691 dev_kfree_skb_any(requeue_skb); 1692 bf->bf_mpdu = NULL; 1693 bf->bf_buf_addr = 0; 1694 ath_err(common, "dma_mapping_error() on RX\n"); 1695 ieee80211_rx(hw, skb); 1696 break; 1697 } 1698 1699 if (rs.rs_more) { 1700 /* 1701 * rs_more indicates chained descriptors which can be 1702 * used to link buffers together for a sort of 1703 * scatter-gather operation. 1704 */ 1705 if (sc->rx.frag) { 1706 /* too many fragments - cannot handle frame */ 1707 dev_kfree_skb_any(sc->rx.frag); 1708 dev_kfree_skb_any(skb); 1709 skb = NULL; 1710 } 1711 sc->rx.frag = skb; 1712 goto requeue; 1713 } 1714 1715 if (sc->rx.frag) { 1716 int space = skb->len - skb_tailroom(hdr_skb); 1717 1718 sc->rx.frag = NULL; 1719 1720 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1721 dev_kfree_skb(skb); 1722 goto requeue_drop_frag; 1723 } 1724 1725 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 1726 skb->len); 1727 dev_kfree_skb_any(skb); 1728 skb = hdr_skb; 1729 } 1730 1731 /* 1732 * change the default rx antenna if rx diversity chooses the 1733 * other antenna 3 times in a row. 1734 */ 1735 if (sc->rx.defant != rs.rs_antenna) { 1736 if (++sc->rx.rxotherant >= 3) 1737 ath_setdefantenna(sc, rs.rs_antenna); 1738 } else { 1739 sc->rx.rxotherant = 0; 1740 } 1741 1742 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1743 1744 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 1745 PS_WAIT_FOR_CAB | 1746 PS_WAIT_FOR_PSPOLL_DATA)) || 1747 ath9k_check_auto_sleep(sc)) 1748 ath_rx_ps(sc, skb); 1749 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1750 1751 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 1752 ath_ant_comb_scan(sc, &rs); 1753 1754 ieee80211_rx(hw, skb); 1755 1756requeue_drop_frag: 1757 if (sc->rx.frag) { 1758 dev_kfree_skb_any(sc->rx.frag); 1759 sc->rx.frag = NULL; 1760 } 1761requeue: 1762 if (edma) { 1763 list_add_tail(&bf->list, &sc->rx.rxbuf); 1764 ath_rx_edma_buf_link(sc, qtype); 1765 } else { 1766 list_move_tail(&bf->list, &sc->rx.rxbuf); 1767 ath_rx_buf_link(sc, bf); 1768 ath9k_hw_rxena(ah); 1769 } 1770 } while (1); 1771 1772 spin_unlock_bh(&sc->rx.rxbuflock); 1773 1774 return 0; 1775} 1776