recv.c revision 9c1d8e4affe6748d884a677cf5db19ae0c20ef07
1/* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17#include "ath9k.h" 18#include "ar9003_mac.h" 19 20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21 22static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta, 23 int mindelta, int main_rssi_avg, 24 int alt_rssi_avg, int pkt_count) 25{ 26 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 27 (alt_rssi_avg > main_rssi_avg + maxdelta)) || 28 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50); 29} 30 31static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 32{ 33 return sc->ps_enabled && 34 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 35} 36 37static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 38 struct ieee80211_hdr *hdr) 39{ 40 struct ieee80211_hw *hw = sc->pri_wiphy->hw; 41 int i; 42 43 spin_lock_bh(&sc->wiphy_lock); 44 for (i = 0; i < sc->num_sec_wiphy; i++) { 45 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 46 if (aphy == NULL) 47 continue; 48 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) 49 == 0) { 50 hw = aphy->hw; 51 break; 52 } 53 } 54 spin_unlock_bh(&sc->wiphy_lock); 55 return hw; 56} 57 58/* 59 * Setup and link descriptors. 60 * 61 * 11N: we can no longer afford to self link the last descriptor. 62 * MAC acknowledges BA status as long as it copies frames to host 63 * buffer (or rx fifo). This can incorrectly acknowledge packets 64 * to a sender if last desc is self-linked. 65 */ 66static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 67{ 68 struct ath_hw *ah = sc->sc_ah; 69 struct ath_common *common = ath9k_hw_common(ah); 70 struct ath_desc *ds; 71 struct sk_buff *skb; 72 73 ATH_RXBUF_RESET(bf); 74 75 ds = bf->bf_desc; 76 ds->ds_link = 0; /* link to null */ 77 ds->ds_data = bf->bf_buf_addr; 78 79 /* virtual addr of the beginning of the buffer. */ 80 skb = bf->bf_mpdu; 81 BUG_ON(skb == NULL); 82 ds->ds_vdata = skb->data; 83 84 /* 85 * setup rx descriptors. The rx_bufsize here tells the hardware 86 * how much data it can DMA to us and that we are prepared 87 * to process 88 */ 89 ath9k_hw_setuprxdesc(ah, ds, 90 common->rx_bufsize, 91 0); 92 93 if (sc->rx.rxlink == NULL) 94 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 95 else 96 *sc->rx.rxlink = bf->bf_daddr; 97 98 sc->rx.rxlink = &ds->ds_link; 99 ath9k_hw_rxena(ah); 100} 101 102static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 103{ 104 /* XXX block beacon interrupts */ 105 ath9k_hw_setantenna(sc->sc_ah, antenna); 106 sc->rx.defant = antenna; 107 sc->rx.rxotherant = 0; 108} 109 110static void ath_opmode_init(struct ath_softc *sc) 111{ 112 struct ath_hw *ah = sc->sc_ah; 113 struct ath_common *common = ath9k_hw_common(ah); 114 115 u32 rfilt, mfilt[2]; 116 117 /* configure rx filter */ 118 rfilt = ath_calcrxfilter(sc); 119 ath9k_hw_setrxfilter(ah, rfilt); 120 121 /* configure bssid mask */ 122 ath_hw_setbssidmask(common); 123 124 /* configure operational mode */ 125 ath9k_hw_setopmode(ah); 126 127 /* calculate and install multicast filter */ 128 mfilt[0] = mfilt[1] = ~0; 129 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 130} 131 132static bool ath_rx_edma_buf_link(struct ath_softc *sc, 133 enum ath9k_rx_qtype qtype) 134{ 135 struct ath_hw *ah = sc->sc_ah; 136 struct ath_rx_edma *rx_edma; 137 struct sk_buff *skb; 138 struct ath_buf *bf; 139 140 rx_edma = &sc->rx.rx_edma[qtype]; 141 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 142 return false; 143 144 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 145 list_del_init(&bf->list); 146 147 skb = bf->bf_mpdu; 148 149 ATH_RXBUF_RESET(bf); 150 memset(skb->data, 0, ah->caps.rx_status_len); 151 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 152 ah->caps.rx_status_len, DMA_TO_DEVICE); 153 154 SKB_CB_ATHBUF(skb) = bf; 155 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 156 skb_queue_tail(&rx_edma->rx_fifo, skb); 157 158 return true; 159} 160 161static void ath_rx_addbuffer_edma(struct ath_softc *sc, 162 enum ath9k_rx_qtype qtype, int size) 163{ 164 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 165 u32 nbuf = 0; 166 167 if (list_empty(&sc->rx.rxbuf)) { 168 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); 169 return; 170 } 171 172 while (!list_empty(&sc->rx.rxbuf)) { 173 nbuf++; 174 175 if (!ath_rx_edma_buf_link(sc, qtype)) 176 break; 177 178 if (nbuf >= size) 179 break; 180 } 181} 182 183static void ath_rx_remove_buffer(struct ath_softc *sc, 184 enum ath9k_rx_qtype qtype) 185{ 186 struct ath_buf *bf; 187 struct ath_rx_edma *rx_edma; 188 struct sk_buff *skb; 189 190 rx_edma = &sc->rx.rx_edma[qtype]; 191 192 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 193 bf = SKB_CB_ATHBUF(skb); 194 BUG_ON(!bf); 195 list_add_tail(&bf->list, &sc->rx.rxbuf); 196 } 197} 198 199static void ath_rx_edma_cleanup(struct ath_softc *sc) 200{ 201 struct ath_buf *bf; 202 203 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 204 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 205 206 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 207 if (bf->bf_mpdu) 208 dev_kfree_skb_any(bf->bf_mpdu); 209 } 210 211 INIT_LIST_HEAD(&sc->rx.rxbuf); 212 213 kfree(sc->rx.rx_bufptr); 214 sc->rx.rx_bufptr = NULL; 215} 216 217static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 218{ 219 skb_queue_head_init(&rx_edma->rx_fifo); 220 skb_queue_head_init(&rx_edma->rx_buffers); 221 rx_edma->rx_fifo_hwsize = size; 222} 223 224static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 225{ 226 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 227 struct ath_hw *ah = sc->sc_ah; 228 struct sk_buff *skb; 229 struct ath_buf *bf; 230 int error = 0, i; 231 u32 size; 232 233 234 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + 235 ah->caps.rx_status_len, 236 min(common->cachelsz, (u16)64)); 237 238 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 239 ah->caps.rx_status_len); 240 241 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 242 ah->caps.rx_lp_qdepth); 243 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 244 ah->caps.rx_hp_qdepth); 245 246 size = sizeof(struct ath_buf) * nbufs; 247 bf = kzalloc(size, GFP_KERNEL); 248 if (!bf) 249 return -ENOMEM; 250 251 INIT_LIST_HEAD(&sc->rx.rxbuf); 252 sc->rx.rx_bufptr = bf; 253 254 for (i = 0; i < nbufs; i++, bf++) { 255 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 256 if (!skb) { 257 error = -ENOMEM; 258 goto rx_init_fail; 259 } 260 261 memset(skb->data, 0, common->rx_bufsize); 262 bf->bf_mpdu = skb; 263 264 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 265 common->rx_bufsize, 266 DMA_BIDIRECTIONAL); 267 if (unlikely(dma_mapping_error(sc->dev, 268 bf->bf_buf_addr))) { 269 dev_kfree_skb_any(skb); 270 bf->bf_mpdu = NULL; 271 ath_print(common, ATH_DBG_FATAL, 272 "dma_mapping_error() on RX init\n"); 273 error = -ENOMEM; 274 goto rx_init_fail; 275 } 276 277 list_add_tail(&bf->list, &sc->rx.rxbuf); 278 } 279 280 return 0; 281 282rx_init_fail: 283 ath_rx_edma_cleanup(sc); 284 return error; 285} 286 287static void ath_edma_start_recv(struct ath_softc *sc) 288{ 289 spin_lock_bh(&sc->rx.rxbuflock); 290 291 ath9k_hw_rxena(sc->sc_ah); 292 293 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 294 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 295 296 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 297 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 298 299 spin_unlock_bh(&sc->rx.rxbuflock); 300 301 ath_opmode_init(sc); 302 303 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 304} 305 306static void ath_edma_stop_recv(struct ath_softc *sc) 307{ 308 spin_lock_bh(&sc->rx.rxbuflock); 309 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 310 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 311 spin_unlock_bh(&sc->rx.rxbuflock); 312} 313 314int ath_rx_init(struct ath_softc *sc, int nbufs) 315{ 316 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 317 struct sk_buff *skb; 318 struct ath_buf *bf; 319 int error = 0; 320 321 spin_lock_init(&sc->rx.rxflushlock); 322 sc->sc_flags &= ~SC_OP_RXFLUSH; 323 spin_lock_init(&sc->rx.rxbuflock); 324 325 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 326 return ath_rx_edma_init(sc, nbufs); 327 } else { 328 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 329 min(common->cachelsz, (u16)64)); 330 331 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 332 common->cachelsz, common->rx_bufsize); 333 334 /* Initialize rx descriptors */ 335 336 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 337 "rx", nbufs, 1, 0); 338 if (error != 0) { 339 ath_print(common, ATH_DBG_FATAL, 340 "failed to allocate rx descriptors: %d\n", 341 error); 342 goto err; 343 } 344 345 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 346 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 347 GFP_KERNEL); 348 if (skb == NULL) { 349 error = -ENOMEM; 350 goto err; 351 } 352 353 bf->bf_mpdu = skb; 354 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 355 common->rx_bufsize, 356 DMA_FROM_DEVICE); 357 if (unlikely(dma_mapping_error(sc->dev, 358 bf->bf_buf_addr))) { 359 dev_kfree_skb_any(skb); 360 bf->bf_mpdu = NULL; 361 ath_print(common, ATH_DBG_FATAL, 362 "dma_mapping_error() on RX init\n"); 363 error = -ENOMEM; 364 goto err; 365 } 366 bf->bf_dmacontext = bf->bf_buf_addr; 367 } 368 sc->rx.rxlink = NULL; 369 } 370 371err: 372 if (error) 373 ath_rx_cleanup(sc); 374 375 return error; 376} 377 378void ath_rx_cleanup(struct ath_softc *sc) 379{ 380 struct ath_hw *ah = sc->sc_ah; 381 struct ath_common *common = ath9k_hw_common(ah); 382 struct sk_buff *skb; 383 struct ath_buf *bf; 384 385 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 386 ath_rx_edma_cleanup(sc); 387 return; 388 } else { 389 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 390 skb = bf->bf_mpdu; 391 if (skb) { 392 dma_unmap_single(sc->dev, bf->bf_buf_addr, 393 common->rx_bufsize, 394 DMA_FROM_DEVICE); 395 dev_kfree_skb(skb); 396 } 397 } 398 399 if (sc->rx.rxdma.dd_desc_len != 0) 400 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 401 } 402} 403 404/* 405 * Calculate the receive filter according to the 406 * operating mode and state: 407 * 408 * o always accept unicast, broadcast, and multicast traffic 409 * o maintain current state of phy error reception (the hal 410 * may enable phy error frames for noise immunity work) 411 * o probe request frames are accepted only when operating in 412 * hostap, adhoc, or monitor modes 413 * o enable promiscuous mode according to the interface state 414 * o accept beacons: 415 * - when operating in adhoc mode so the 802.11 layer creates 416 * node table entries for peers, 417 * - when operating in station mode for collecting rssi data when 418 * the station is otherwise quiet, or 419 * - when operating as a repeater so we see repeater-sta beacons 420 * - when scanning 421 */ 422 423u32 ath_calcrxfilter(struct ath_softc *sc) 424{ 425#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 426 427 u32 rfilt; 428 429 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 430 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 431 | ATH9K_RX_FILTER_MCAST; 432 433 if (sc->rx.rxfilter & FIF_PROBE_REQ) 434 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 435 436 /* 437 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 438 * mode interface or when in monitor mode. AP mode does not need this 439 * since it receives all in-BSS frames anyway. 440 */ 441 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 442 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 443 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 444 rfilt |= ATH9K_RX_FILTER_PROM; 445 446 if (sc->rx.rxfilter & FIF_CONTROL) 447 rfilt |= ATH9K_RX_FILTER_CONTROL; 448 449 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 450 (sc->nvifs <= 1) && 451 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 452 rfilt |= ATH9K_RX_FILTER_MYBEACON; 453 else 454 rfilt |= ATH9K_RX_FILTER_BEACON; 455 456 if ((AR_SREV_9280_20_OR_LATER(sc->sc_ah) || 457 AR_SREV_9285_12_OR_LATER(sc->sc_ah)) && 458 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && 459 (sc->rx.rxfilter & FIF_PSPOLL)) 460 rfilt |= ATH9K_RX_FILTER_PSPOLL; 461 462 if (conf_is_ht(&sc->hw->conf)) 463 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 464 465 if (sc->sec_wiphy || (sc->nvifs > 1) || 466 (sc->rx.rxfilter & FIF_OTHER_BSS)) { 467 /* The following may also be needed for other older chips */ 468 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 469 rfilt |= ATH9K_RX_FILTER_PROM; 470 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 471 } 472 473 return rfilt; 474 475#undef RX_FILTER_PRESERVE 476} 477 478int ath_startrecv(struct ath_softc *sc) 479{ 480 struct ath_hw *ah = sc->sc_ah; 481 struct ath_buf *bf, *tbf; 482 483 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 484 ath_edma_start_recv(sc); 485 return 0; 486 } 487 488 spin_lock_bh(&sc->rx.rxbuflock); 489 if (list_empty(&sc->rx.rxbuf)) 490 goto start_recv; 491 492 sc->rx.rxlink = NULL; 493 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 494 ath_rx_buf_link(sc, bf); 495 } 496 497 /* We could have deleted elements so the list may be empty now */ 498 if (list_empty(&sc->rx.rxbuf)) 499 goto start_recv; 500 501 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 502 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 503 ath9k_hw_rxena(ah); 504 505start_recv: 506 spin_unlock_bh(&sc->rx.rxbuflock); 507 ath_opmode_init(sc); 508 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 509 510 return 0; 511} 512 513bool ath_stoprecv(struct ath_softc *sc) 514{ 515 struct ath_hw *ah = sc->sc_ah; 516 bool stopped; 517 518 ath9k_hw_stoppcurecv(ah); 519 ath9k_hw_setrxfilter(ah, 0); 520 stopped = ath9k_hw_stopdmarecv(ah); 521 522 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 523 ath_edma_stop_recv(sc); 524 else 525 sc->rx.rxlink = NULL; 526 527 return stopped; 528} 529 530void ath_flushrecv(struct ath_softc *sc) 531{ 532 spin_lock_bh(&sc->rx.rxflushlock); 533 sc->sc_flags |= SC_OP_RXFLUSH; 534 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 535 ath_rx_tasklet(sc, 1, true); 536 ath_rx_tasklet(sc, 1, false); 537 sc->sc_flags &= ~SC_OP_RXFLUSH; 538 spin_unlock_bh(&sc->rx.rxflushlock); 539} 540 541static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 542{ 543 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 544 struct ieee80211_mgmt *mgmt; 545 u8 *pos, *end, id, elen; 546 struct ieee80211_tim_ie *tim; 547 548 mgmt = (struct ieee80211_mgmt *)skb->data; 549 pos = mgmt->u.beacon.variable; 550 end = skb->data + skb->len; 551 552 while (pos + 2 < end) { 553 id = *pos++; 554 elen = *pos++; 555 if (pos + elen > end) 556 break; 557 558 if (id == WLAN_EID_TIM) { 559 if (elen < sizeof(*tim)) 560 break; 561 tim = (struct ieee80211_tim_ie *) pos; 562 if (tim->dtim_count != 0) 563 break; 564 return tim->bitmap_ctrl & 0x01; 565 } 566 567 pos += elen; 568 } 569 570 return false; 571} 572 573static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 574{ 575 struct ieee80211_mgmt *mgmt; 576 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 577 578 if (skb->len < 24 + 8 + 2 + 2) 579 return; 580 581 mgmt = (struct ieee80211_mgmt *)skb->data; 582 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 583 return; /* not from our current AP */ 584 585 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 586 587 if (sc->ps_flags & PS_BEACON_SYNC) { 588 sc->ps_flags &= ~PS_BEACON_SYNC; 589 ath_print(common, ATH_DBG_PS, 590 "Reconfigure Beacon timers based on " 591 "timestamp from the AP\n"); 592 ath_beacon_config(sc, NULL); 593 } 594 595 if (ath_beacon_dtim_pending_cab(skb)) { 596 /* 597 * Remain awake waiting for buffered broadcast/multicast 598 * frames. If the last broadcast/multicast frame is not 599 * received properly, the next beacon frame will work as 600 * a backup trigger for returning into NETWORK SLEEP state, 601 * so we are waiting for it as well. 602 */ 603 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 604 "buffered broadcast/multicast frame(s)\n"); 605 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 606 return; 607 } 608 609 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 610 /* 611 * This can happen if a broadcast frame is dropped or the AP 612 * fails to send a frame indicating that all CAB frames have 613 * been delivered. 614 */ 615 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 616 ath_print(common, ATH_DBG_PS, 617 "PS wait for CAB frames timed out\n"); 618 } 619} 620 621static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 622{ 623 struct ieee80211_hdr *hdr; 624 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 625 626 hdr = (struct ieee80211_hdr *)skb->data; 627 628 /* Process Beacon and CAB receive in PS state */ 629 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 630 && ieee80211_is_beacon(hdr->frame_control)) 631 ath_rx_ps_beacon(sc, skb); 632 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 633 (ieee80211_is_data(hdr->frame_control) || 634 ieee80211_is_action(hdr->frame_control)) && 635 is_multicast_ether_addr(hdr->addr1) && 636 !ieee80211_has_moredata(hdr->frame_control)) { 637 /* 638 * No more broadcast/multicast frames to be received at this 639 * point. 640 */ 641 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 642 ath_print(common, ATH_DBG_PS, 643 "All PS CAB frames received, back to sleep\n"); 644 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 645 !is_multicast_ether_addr(hdr->addr1) && 646 !ieee80211_has_morefrags(hdr->frame_control)) { 647 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 648 ath_print(common, ATH_DBG_PS, 649 "Going back to sleep after having received " 650 "PS-Poll data (0x%lx)\n", 651 sc->ps_flags & (PS_WAIT_FOR_BEACON | 652 PS_WAIT_FOR_CAB | 653 PS_WAIT_FOR_PSPOLL_DATA | 654 PS_WAIT_FOR_TX_ACK)); 655 } 656} 657 658static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, 659 struct ath_softc *sc, struct sk_buff *skb, 660 struct ieee80211_rx_status *rxs) 661{ 662 struct ieee80211_hdr *hdr; 663 664 hdr = (struct ieee80211_hdr *)skb->data; 665 666 /* Send the frame to mac80211 */ 667 if (is_multicast_ether_addr(hdr->addr1)) { 668 int i; 669 /* 670 * Deliver broadcast/multicast frames to all suitable 671 * virtual wiphys. 672 */ 673 /* TODO: filter based on channel configuration */ 674 for (i = 0; i < sc->num_sec_wiphy; i++) { 675 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 676 struct sk_buff *nskb; 677 if (aphy == NULL) 678 continue; 679 nskb = skb_copy(skb, GFP_ATOMIC); 680 if (!nskb) 681 continue; 682 ieee80211_rx(aphy->hw, nskb); 683 } 684 ieee80211_rx(sc->hw, skb); 685 } else 686 /* Deliver unicast frames based on receiver address */ 687 ieee80211_rx(hw, skb); 688} 689 690static bool ath_edma_get_buffers(struct ath_softc *sc, 691 enum ath9k_rx_qtype qtype) 692{ 693 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 694 struct ath_hw *ah = sc->sc_ah; 695 struct ath_common *common = ath9k_hw_common(ah); 696 struct sk_buff *skb; 697 struct ath_buf *bf; 698 int ret; 699 700 skb = skb_peek(&rx_edma->rx_fifo); 701 if (!skb) 702 return false; 703 704 bf = SKB_CB_ATHBUF(skb); 705 BUG_ON(!bf); 706 707 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 708 common->rx_bufsize, DMA_FROM_DEVICE); 709 710 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 711 if (ret == -EINPROGRESS) { 712 /*let device gain the buffer again*/ 713 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 714 common->rx_bufsize, DMA_FROM_DEVICE); 715 return false; 716 } 717 718 __skb_unlink(skb, &rx_edma->rx_fifo); 719 if (ret == -EINVAL) { 720 /* corrupt descriptor, skip this one and the following one */ 721 list_add_tail(&bf->list, &sc->rx.rxbuf); 722 ath_rx_edma_buf_link(sc, qtype); 723 skb = skb_peek(&rx_edma->rx_fifo); 724 if (!skb) 725 return true; 726 727 bf = SKB_CB_ATHBUF(skb); 728 BUG_ON(!bf); 729 730 __skb_unlink(skb, &rx_edma->rx_fifo); 731 list_add_tail(&bf->list, &sc->rx.rxbuf); 732 ath_rx_edma_buf_link(sc, qtype); 733 return true; 734 } 735 skb_queue_tail(&rx_edma->rx_buffers, skb); 736 737 return true; 738} 739 740static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 741 struct ath_rx_status *rs, 742 enum ath9k_rx_qtype qtype) 743{ 744 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 745 struct sk_buff *skb; 746 struct ath_buf *bf; 747 748 while (ath_edma_get_buffers(sc, qtype)); 749 skb = __skb_dequeue(&rx_edma->rx_buffers); 750 if (!skb) 751 return NULL; 752 753 bf = SKB_CB_ATHBUF(skb); 754 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); 755 return bf; 756} 757 758static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 759 struct ath_rx_status *rs) 760{ 761 struct ath_hw *ah = sc->sc_ah; 762 struct ath_common *common = ath9k_hw_common(ah); 763 struct ath_desc *ds; 764 struct ath_buf *bf; 765 int ret; 766 767 if (list_empty(&sc->rx.rxbuf)) { 768 sc->rx.rxlink = NULL; 769 return NULL; 770 } 771 772 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 773 ds = bf->bf_desc; 774 775 /* 776 * Must provide the virtual address of the current 777 * descriptor, the physical address, and the virtual 778 * address of the next descriptor in the h/w chain. 779 * This allows the HAL to look ahead to see if the 780 * hardware is done with a descriptor by checking the 781 * done bit in the following descriptor and the address 782 * of the current descriptor the DMA engine is working 783 * on. All this is necessary because of our use of 784 * a self-linked list to avoid rx overruns. 785 */ 786 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); 787 if (ret == -EINPROGRESS) { 788 struct ath_rx_status trs; 789 struct ath_buf *tbf; 790 struct ath_desc *tds; 791 792 memset(&trs, 0, sizeof(trs)); 793 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 794 sc->rx.rxlink = NULL; 795 return NULL; 796 } 797 798 tbf = list_entry(bf->list.next, struct ath_buf, list); 799 800 /* 801 * On some hardware the descriptor status words could 802 * get corrupted, including the done bit. Because of 803 * this, check if the next descriptor's done bit is 804 * set or not. 805 * 806 * If the next descriptor's done bit is set, the current 807 * descriptor has been corrupted. Force s/w to discard 808 * this descriptor and continue... 809 */ 810 811 tds = tbf->bf_desc; 812 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); 813 if (ret == -EINPROGRESS) 814 return NULL; 815 } 816 817 if (!bf->bf_mpdu) 818 return bf; 819 820 /* 821 * Synchronize the DMA transfer with CPU before 822 * 1. accessing the frame 823 * 2. requeueing the same buffer to h/w 824 */ 825 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 826 common->rx_bufsize, 827 DMA_FROM_DEVICE); 828 829 return bf; 830} 831 832/* Assumes you've already done the endian to CPU conversion */ 833static bool ath9k_rx_accept(struct ath_common *common, 834 struct ieee80211_hdr *hdr, 835 struct ieee80211_rx_status *rxs, 836 struct ath_rx_status *rx_stats, 837 bool *decrypt_error) 838{ 839 struct ath_hw *ah = common->ah; 840 __le16 fc; 841 u8 rx_status_len = ah->caps.rx_status_len; 842 843 fc = hdr->frame_control; 844 845 if (!rx_stats->rs_datalen) 846 return false; 847 /* 848 * rs_status follows rs_datalen so if rs_datalen is too large 849 * we can take a hint that hardware corrupted it, so ignore 850 * those frames. 851 */ 852 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 853 return false; 854 855 /* 856 * rs_more indicates chained descriptors which can be used 857 * to link buffers together for a sort of scatter-gather 858 * operation. 859 * reject the frame, we don't support scatter-gather yet and 860 * the frame is probably corrupt anyway 861 */ 862 if (rx_stats->rs_more) 863 return false; 864 865 /* 866 * The rx_stats->rs_status will not be set until the end of the 867 * chained descriptors so it can be ignored if rs_more is set. The 868 * rs_more will be false at the last element of the chained 869 * descriptors. 870 */ 871 if (rx_stats->rs_status != 0) { 872 if (rx_stats->rs_status & ATH9K_RXERR_CRC) 873 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 874 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 875 return false; 876 877 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 878 *decrypt_error = true; 879 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 880 /* 881 * The MIC error bit is only valid if the frame 882 * is not a control frame or fragment, and it was 883 * decrypted using a valid TKIP key. 884 */ 885 if (!ieee80211_is_ctl(fc) && 886 !ieee80211_has_morefrags(fc) && 887 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 888 test_bit(rx_stats->rs_keyix, common->tkip_keymap)) 889 rxs->flag |= RX_FLAG_MMIC_ERROR; 890 else 891 rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 892 } 893 /* 894 * Reject error frames with the exception of 895 * decryption and MIC failures. For monitor mode, 896 * we also ignore the CRC error. 897 */ 898 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 899 if (rx_stats->rs_status & 900 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 901 ATH9K_RXERR_CRC)) 902 return false; 903 } else { 904 if (rx_stats->rs_status & 905 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 906 return false; 907 } 908 } 909 } 910 return true; 911} 912 913static int ath9k_process_rate(struct ath_common *common, 914 struct ieee80211_hw *hw, 915 struct ath_rx_status *rx_stats, 916 struct ieee80211_rx_status *rxs) 917{ 918 struct ieee80211_supported_band *sband; 919 enum ieee80211_band band; 920 unsigned int i = 0; 921 922 band = hw->conf.channel->band; 923 sband = hw->wiphy->bands[band]; 924 925 if (rx_stats->rs_rate & 0x80) { 926 /* HT rate */ 927 rxs->flag |= RX_FLAG_HT; 928 if (rx_stats->rs_flags & ATH9K_RX_2040) 929 rxs->flag |= RX_FLAG_40MHZ; 930 if (rx_stats->rs_flags & ATH9K_RX_GI) 931 rxs->flag |= RX_FLAG_SHORT_GI; 932 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 933 return 0; 934 } 935 936 for (i = 0; i < sband->n_bitrates; i++) { 937 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 938 rxs->rate_idx = i; 939 return 0; 940 } 941 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 942 rxs->flag |= RX_FLAG_SHORTPRE; 943 rxs->rate_idx = i; 944 return 0; 945 } 946 } 947 948 /* 949 * No valid hardware bitrate found -- we should not get here 950 * because hardware has already validated this frame as OK. 951 */ 952 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " 953 "0x%02x using 1 Mbit\n", rx_stats->rs_rate); 954 955 return -EINVAL; 956} 957 958static void ath9k_process_rssi(struct ath_common *common, 959 struct ieee80211_hw *hw, 960 struct ieee80211_hdr *hdr, 961 struct ath_rx_status *rx_stats) 962{ 963 struct ath_hw *ah = common->ah; 964 struct ieee80211_sta *sta; 965 struct ath_node *an; 966 int last_rssi = ATH_RSSI_DUMMY_MARKER; 967 __le16 fc; 968 969 fc = hdr->frame_control; 970 971 rcu_read_lock(); 972 /* 973 * XXX: use ieee80211_find_sta! This requires quite a bit of work 974 * under the current ath9k virtual wiphy implementation as we have 975 * no way of tying a vif to wiphy. Typically vifs are attached to 976 * at least one sdata of a wiphy on mac80211 but with ath9k virtual 977 * wiphy you'd have to iterate over every wiphy and each sdata. 978 */ 979 if (is_multicast_ether_addr(hdr->addr1)) 980 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); 981 else 982 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, hdr->addr1); 983 984 if (sta) { 985 an = (struct ath_node *) sta->drv_priv; 986 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && 987 !rx_stats->rs_moreaggr) 988 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); 989 last_rssi = an->last_rssi; 990 } 991 rcu_read_unlock(); 992 993 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 994 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 995 ATH_RSSI_EP_MULTIPLIER); 996 if (rx_stats->rs_rssi < 0) 997 rx_stats->rs_rssi = 0; 998 999 /* Update Beacon RSSI, this is used by ANI. */ 1000 if (ieee80211_is_beacon(fc)) 1001 ah->stats.avgbrssi = rx_stats->rs_rssi; 1002} 1003 1004/* 1005 * For Decrypt or Demic errors, we only mark packet status here and always push 1006 * up the frame up to let mac80211 handle the actual error case, be it no 1007 * decryption key or real decryption error. This let us keep statistics there. 1008 */ 1009static int ath9k_rx_skb_preprocess(struct ath_common *common, 1010 struct ieee80211_hw *hw, 1011 struct ieee80211_hdr *hdr, 1012 struct ath_rx_status *rx_stats, 1013 struct ieee80211_rx_status *rx_status, 1014 bool *decrypt_error) 1015{ 1016 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 1017 1018 /* 1019 * everything but the rate is checked here, the rate check is done 1020 * separately to avoid doing two lookups for a rate for each frame. 1021 */ 1022 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 1023 return -EINVAL; 1024 1025 ath9k_process_rssi(common, hw, hdr, rx_stats); 1026 1027 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1028 return -EINVAL; 1029 1030 rx_status->band = hw->conf.channel->band; 1031 rx_status->freq = hw->conf.channel->center_freq; 1032 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 1033 rx_status->antenna = rx_stats->rs_antenna; 1034 rx_status->flag |= RX_FLAG_TSFT; 1035 1036 return 0; 1037} 1038 1039static void ath9k_rx_skb_postprocess(struct ath_common *common, 1040 struct sk_buff *skb, 1041 struct ath_rx_status *rx_stats, 1042 struct ieee80211_rx_status *rxs, 1043 bool decrypt_error) 1044{ 1045 struct ath_hw *ah = common->ah; 1046 struct ieee80211_hdr *hdr; 1047 int hdrlen, padpos, padsize; 1048 u8 keyix; 1049 __le16 fc; 1050 1051 /* see if any padding is done by the hw and remove it */ 1052 hdr = (struct ieee80211_hdr *) skb->data; 1053 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1054 fc = hdr->frame_control; 1055 padpos = ath9k_cmn_padpos(hdr->frame_control); 1056 1057 /* The MAC header is padded to have 32-bit boundary if the 1058 * packet payload is non-zero. The general calculation for 1059 * padsize would take into account odd header lengths: 1060 * padsize = (4 - padpos % 4) % 4; However, since only 1061 * even-length headers are used, padding can only be 0 or 2 1062 * bytes and we can optimize this a bit. In addition, we must 1063 * not try to remove padding from short control frames that do 1064 * not have payload. */ 1065 padsize = padpos & 3; 1066 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1067 memmove(skb->data + padsize, skb->data, padpos); 1068 skb_pull(skb, padsize); 1069 } 1070 1071 keyix = rx_stats->rs_keyix; 1072 1073 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1074 ieee80211_has_protected(fc)) { 1075 rxs->flag |= RX_FLAG_DECRYPTED; 1076 } else if (ieee80211_has_protected(fc) 1077 && !decrypt_error && skb->len >= hdrlen + 4) { 1078 keyix = skb->data[hdrlen + 3] >> 6; 1079 1080 if (test_bit(keyix, common->keymap)) 1081 rxs->flag |= RX_FLAG_DECRYPTED; 1082 } 1083 if (ah->sw_mgmt_crypto && 1084 (rxs->flag & RX_FLAG_DECRYPTED) && 1085 ieee80211_is_mgmt(fc)) 1086 /* Use software decrypt for management frames. */ 1087 rxs->flag &= ~RX_FLAG_DECRYPTED; 1088} 1089 1090static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb, 1091 struct ath_hw_antcomb_conf ant_conf, 1092 int main_rssi_avg) 1093{ 1094 antcomb->quick_scan_cnt = 0; 1095 1096 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2) 1097 antcomb->rssi_lna2 = main_rssi_avg; 1098 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1) 1099 antcomb->rssi_lna1 = main_rssi_avg; 1100 1101 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) { 1102 case (0x10): /* LNA2 A-B */ 1103 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1104 antcomb->first_quick_scan_conf = 1105 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1106 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1107 break; 1108 case (0x20): /* LNA1 A-B */ 1109 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1110 antcomb->first_quick_scan_conf = 1111 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1112 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1113 break; 1114 case (0x21): /* LNA1 LNA2 */ 1115 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2; 1116 antcomb->first_quick_scan_conf = 1117 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1118 antcomb->second_quick_scan_conf = 1119 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1120 break; 1121 case (0x12): /* LNA2 LNA1 */ 1122 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1; 1123 antcomb->first_quick_scan_conf = 1124 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1125 antcomb->second_quick_scan_conf = 1126 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1127 break; 1128 case (0x13): /* LNA2 A+B */ 1129 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1130 antcomb->first_quick_scan_conf = 1131 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1132 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1; 1133 break; 1134 case (0x23): /* LNA1 A+B */ 1135 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1136 antcomb->first_quick_scan_conf = 1137 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1138 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2; 1139 break; 1140 default: 1141 break; 1142 } 1143} 1144 1145static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb, 1146 struct ath_hw_antcomb_conf *div_ant_conf, 1147 int main_rssi_avg, int alt_rssi_avg, 1148 int alt_ratio) 1149{ 1150 /* alt_good */ 1151 switch (antcomb->quick_scan_cnt) { 1152 case 0: 1153 /* set alt to main, and alt to first conf */ 1154 div_ant_conf->main_lna_conf = antcomb->main_conf; 1155 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf; 1156 break; 1157 case 1: 1158 /* set alt to main, and alt to first conf */ 1159 div_ant_conf->main_lna_conf = antcomb->main_conf; 1160 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf; 1161 antcomb->rssi_first = main_rssi_avg; 1162 antcomb->rssi_second = alt_rssi_avg; 1163 1164 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1165 /* main is LNA1 */ 1166 if (ath_is_alt_ant_ratio_better(alt_ratio, 1167 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1168 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1169 main_rssi_avg, alt_rssi_avg, 1170 antcomb->total_pkt_count)) 1171 antcomb->first_ratio = true; 1172 else 1173 antcomb->first_ratio = false; 1174 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1175 if (ath_is_alt_ant_ratio_better(alt_ratio, 1176 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1177 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1178 main_rssi_avg, alt_rssi_avg, 1179 antcomb->total_pkt_count)) 1180 antcomb->first_ratio = true; 1181 else 1182 antcomb->first_ratio = false; 1183 } else { 1184 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1185 (alt_rssi_avg > main_rssi_avg + 1186 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1187 (alt_rssi_avg > main_rssi_avg)) && 1188 (antcomb->total_pkt_count > 50)) 1189 antcomb->first_ratio = true; 1190 else 1191 antcomb->first_ratio = false; 1192 } 1193 break; 1194 case 2: 1195 antcomb->alt_good = false; 1196 antcomb->scan_not_start = false; 1197 antcomb->scan = false; 1198 antcomb->rssi_first = main_rssi_avg; 1199 antcomb->rssi_third = alt_rssi_avg; 1200 1201 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) 1202 antcomb->rssi_lna1 = alt_rssi_avg; 1203 else if (antcomb->second_quick_scan_conf == 1204 ATH_ANT_DIV_COMB_LNA2) 1205 antcomb->rssi_lna2 = alt_rssi_avg; 1206 else if (antcomb->second_quick_scan_conf == 1207 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) { 1208 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) 1209 antcomb->rssi_lna2 = main_rssi_avg; 1210 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) 1211 antcomb->rssi_lna1 = main_rssi_avg; 1212 } 1213 1214 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 + 1215 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA) 1216 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1217 else 1218 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 1219 1220 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) { 1221 if (ath_is_alt_ant_ratio_better(alt_ratio, 1222 ATH_ANT_DIV_COMB_LNA1_DELTA_HI, 1223 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1224 main_rssi_avg, alt_rssi_avg, 1225 antcomb->total_pkt_count)) 1226 antcomb->second_ratio = true; 1227 else 1228 antcomb->second_ratio = false; 1229 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) { 1230 if (ath_is_alt_ant_ratio_better(alt_ratio, 1231 ATH_ANT_DIV_COMB_LNA1_DELTA_MID, 1232 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW, 1233 main_rssi_avg, alt_rssi_avg, 1234 antcomb->total_pkt_count)) 1235 antcomb->second_ratio = true; 1236 else 1237 antcomb->second_ratio = false; 1238 } else { 1239 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) && 1240 (alt_rssi_avg > main_rssi_avg + 1241 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) || 1242 (alt_rssi_avg > main_rssi_avg)) && 1243 (antcomb->total_pkt_count > 50)) 1244 antcomb->second_ratio = true; 1245 else 1246 antcomb->second_ratio = false; 1247 } 1248 1249 /* set alt to the conf with maximun ratio */ 1250 if (antcomb->first_ratio && antcomb->second_ratio) { 1251 if (antcomb->rssi_second > antcomb->rssi_third) { 1252 /* first alt*/ 1253 if ((antcomb->first_quick_scan_conf == 1254 ATH_ANT_DIV_COMB_LNA1) || 1255 (antcomb->first_quick_scan_conf == 1256 ATH_ANT_DIV_COMB_LNA2)) 1257 /* Set alt LNA1 or LNA2*/ 1258 if (div_ant_conf->main_lna_conf == 1259 ATH_ANT_DIV_COMB_LNA2) 1260 div_ant_conf->alt_lna_conf = 1261 ATH_ANT_DIV_COMB_LNA1; 1262 else 1263 div_ant_conf->alt_lna_conf = 1264 ATH_ANT_DIV_COMB_LNA2; 1265 else 1266 /* Set alt to A+B or A-B */ 1267 div_ant_conf->alt_lna_conf = 1268 antcomb->first_quick_scan_conf; 1269 } else if ((antcomb->second_quick_scan_conf == 1270 ATH_ANT_DIV_COMB_LNA1) || 1271 (antcomb->second_quick_scan_conf == 1272 ATH_ANT_DIV_COMB_LNA2)) { 1273 /* Set alt LNA1 or LNA2 */ 1274 if (div_ant_conf->main_lna_conf == 1275 ATH_ANT_DIV_COMB_LNA2) 1276 div_ant_conf->alt_lna_conf = 1277 ATH_ANT_DIV_COMB_LNA1; 1278 else 1279 div_ant_conf->alt_lna_conf = 1280 ATH_ANT_DIV_COMB_LNA2; 1281 } else { 1282 /* Set alt to A+B or A-B */ 1283 div_ant_conf->alt_lna_conf = 1284 antcomb->second_quick_scan_conf; 1285 } 1286 } else if (antcomb->first_ratio) { 1287 /* first alt */ 1288 if ((antcomb->first_quick_scan_conf == 1289 ATH_ANT_DIV_COMB_LNA1) || 1290 (antcomb->first_quick_scan_conf == 1291 ATH_ANT_DIV_COMB_LNA2)) 1292 /* Set alt LNA1 or LNA2 */ 1293 if (div_ant_conf->main_lna_conf == 1294 ATH_ANT_DIV_COMB_LNA2) 1295 div_ant_conf->alt_lna_conf = 1296 ATH_ANT_DIV_COMB_LNA1; 1297 else 1298 div_ant_conf->alt_lna_conf = 1299 ATH_ANT_DIV_COMB_LNA2; 1300 else 1301 /* Set alt to A+B or A-B */ 1302 div_ant_conf->alt_lna_conf = 1303 antcomb->first_quick_scan_conf; 1304 } else if (antcomb->second_ratio) { 1305 /* second alt */ 1306 if ((antcomb->second_quick_scan_conf == 1307 ATH_ANT_DIV_COMB_LNA1) || 1308 (antcomb->second_quick_scan_conf == 1309 ATH_ANT_DIV_COMB_LNA2)) 1310 /* Set alt LNA1 or LNA2 */ 1311 if (div_ant_conf->main_lna_conf == 1312 ATH_ANT_DIV_COMB_LNA2) 1313 div_ant_conf->alt_lna_conf = 1314 ATH_ANT_DIV_COMB_LNA1; 1315 else 1316 div_ant_conf->alt_lna_conf = 1317 ATH_ANT_DIV_COMB_LNA2; 1318 else 1319 /* Set alt to A+B or A-B */ 1320 div_ant_conf->alt_lna_conf = 1321 antcomb->second_quick_scan_conf; 1322 } else { 1323 /* main is largest */ 1324 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) || 1325 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)) 1326 /* Set alt LNA1 or LNA2 */ 1327 if (div_ant_conf->main_lna_conf == 1328 ATH_ANT_DIV_COMB_LNA2) 1329 div_ant_conf->alt_lna_conf = 1330 ATH_ANT_DIV_COMB_LNA1; 1331 else 1332 div_ant_conf->alt_lna_conf = 1333 ATH_ANT_DIV_COMB_LNA2; 1334 else 1335 /* Set alt to A+B or A-B */ 1336 div_ant_conf->alt_lna_conf = antcomb->main_conf; 1337 } 1338 break; 1339 default: 1340 break; 1341 } 1342} 1343 1344static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf) 1345{ 1346 /* Adjust the fast_div_bias based on main and alt lna conf */ 1347 switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) { 1348 case (0x01): /* A-B LNA2 */ 1349 ant_conf->fast_div_bias = 0x3b; 1350 break; 1351 case (0x02): /* A-B LNA1 */ 1352 ant_conf->fast_div_bias = 0x3d; 1353 break; 1354 case (0x03): /* A-B A+B */ 1355 ant_conf->fast_div_bias = 0x1; 1356 break; 1357 case (0x10): /* LNA2 A-B */ 1358 ant_conf->fast_div_bias = 0x7; 1359 break; 1360 case (0x12): /* LNA2 LNA1 */ 1361 ant_conf->fast_div_bias = 0x2; 1362 break; 1363 case (0x13): /* LNA2 A+B */ 1364 ant_conf->fast_div_bias = 0x7; 1365 break; 1366 case (0x20): /* LNA1 A-B */ 1367 ant_conf->fast_div_bias = 0x6; 1368 break; 1369 case (0x21): /* LNA1 LNA2 */ 1370 ant_conf->fast_div_bias = 0x0; 1371 break; 1372 case (0x23): /* LNA1 A+B */ 1373 ant_conf->fast_div_bias = 0x6; 1374 break; 1375 case (0x30): /* A+B A-B */ 1376 ant_conf->fast_div_bias = 0x1; 1377 break; 1378 case (0x31): /* A+B LNA2 */ 1379 ant_conf->fast_div_bias = 0x3b; 1380 break; 1381 case (0x32): /* A+B LNA1 */ 1382 ant_conf->fast_div_bias = 0x3d; 1383 break; 1384 default: 1385 break; 1386 } 1387} 1388 1389/* Antenna diversity and combining */ 1390static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs) 1391{ 1392 struct ath_hw_antcomb_conf div_ant_conf; 1393 struct ath_ant_comb *antcomb = &sc->ant_comb; 1394 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set; 1395 int curr_main_set, curr_bias; 1396 int main_rssi = rs->rs_rssi_ctl0; 1397 int alt_rssi = rs->rs_rssi_ctl1; 1398 int rx_ant_conf, main_ant_conf; 1399 bool short_scan = false; 1400 1401 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) & 1402 ATH_ANT_RX_MASK; 1403 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) & 1404 ATH_ANT_RX_MASK; 1405 1406 /* Record packet only when alt_rssi is positive */ 1407 if (alt_rssi > 0) { 1408 antcomb->total_pkt_count++; 1409 antcomb->main_total_rssi += main_rssi; 1410 antcomb->alt_total_rssi += alt_rssi; 1411 if (main_ant_conf == rx_ant_conf) 1412 antcomb->main_recv_cnt++; 1413 else 1414 antcomb->alt_recv_cnt++; 1415 } 1416 1417 /* Short scan check */ 1418 if (antcomb->scan && antcomb->alt_good) { 1419 if (time_after(jiffies, antcomb->scan_start_time + 1420 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR))) 1421 short_scan = true; 1422 else 1423 if (antcomb->total_pkt_count == 1424 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) { 1425 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1426 antcomb->total_pkt_count); 1427 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO) 1428 short_scan = true; 1429 } 1430 } 1431 1432 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) || 1433 rs->rs_moreaggr) && !short_scan) 1434 return; 1435 1436 if (antcomb->total_pkt_count) { 1437 alt_ratio = ((antcomb->alt_recv_cnt * 100) / 1438 antcomb->total_pkt_count); 1439 main_rssi_avg = (antcomb->main_total_rssi / 1440 antcomb->total_pkt_count); 1441 alt_rssi_avg = (antcomb->alt_total_rssi / 1442 antcomb->total_pkt_count); 1443 } 1444 1445 1446 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf); 1447 curr_alt_set = div_ant_conf.alt_lna_conf; 1448 curr_main_set = div_ant_conf.main_lna_conf; 1449 curr_bias = div_ant_conf.fast_div_bias; 1450 1451 antcomb->count++; 1452 1453 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) { 1454 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1455 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf, 1456 main_rssi_avg); 1457 antcomb->alt_good = true; 1458 } else { 1459 antcomb->alt_good = false; 1460 } 1461 1462 antcomb->count = 0; 1463 antcomb->scan = true; 1464 antcomb->scan_not_start = true; 1465 } 1466 1467 if (!antcomb->scan) { 1468 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) { 1469 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) { 1470 /* Switch main and alt LNA */ 1471 div_ant_conf.main_lna_conf = 1472 ATH_ANT_DIV_COMB_LNA2; 1473 div_ant_conf.alt_lna_conf = 1474 ATH_ANT_DIV_COMB_LNA1; 1475 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) { 1476 div_ant_conf.main_lna_conf = 1477 ATH_ANT_DIV_COMB_LNA1; 1478 div_ant_conf.alt_lna_conf = 1479 ATH_ANT_DIV_COMB_LNA2; 1480 } 1481 1482 goto div_comb_done; 1483 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) && 1484 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) { 1485 /* Set alt to another LNA */ 1486 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) 1487 div_ant_conf.alt_lna_conf = 1488 ATH_ANT_DIV_COMB_LNA1; 1489 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) 1490 div_ant_conf.alt_lna_conf = 1491 ATH_ANT_DIV_COMB_LNA2; 1492 1493 goto div_comb_done; 1494 } 1495 1496 if ((alt_rssi_avg < (main_rssi_avg + 1497 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA))) 1498 goto div_comb_done; 1499 } 1500 1501 if (!antcomb->scan_not_start) { 1502 switch (curr_alt_set) { 1503 case ATH_ANT_DIV_COMB_LNA2: 1504 antcomb->rssi_lna2 = alt_rssi_avg; 1505 antcomb->rssi_lna1 = main_rssi_avg; 1506 antcomb->scan = true; 1507 /* set to A+B */ 1508 div_ant_conf.main_lna_conf = 1509 ATH_ANT_DIV_COMB_LNA1; 1510 div_ant_conf.alt_lna_conf = 1511 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1512 break; 1513 case ATH_ANT_DIV_COMB_LNA1: 1514 antcomb->rssi_lna1 = alt_rssi_avg; 1515 antcomb->rssi_lna2 = main_rssi_avg; 1516 antcomb->scan = true; 1517 /* set to A+B */ 1518 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2; 1519 div_ant_conf.alt_lna_conf = 1520 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1521 break; 1522 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2: 1523 antcomb->rssi_add = alt_rssi_avg; 1524 antcomb->scan = true; 1525 /* set to A-B */ 1526 div_ant_conf.alt_lna_conf = 1527 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1528 break; 1529 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2: 1530 antcomb->rssi_sub = alt_rssi_avg; 1531 antcomb->scan = false; 1532 if (antcomb->rssi_lna2 > 1533 (antcomb->rssi_lna1 + 1534 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) { 1535 /* use LNA2 as main LNA */ 1536 if ((antcomb->rssi_add > antcomb->rssi_lna1) && 1537 (antcomb->rssi_add > antcomb->rssi_sub)) { 1538 /* set to A+B */ 1539 div_ant_conf.main_lna_conf = 1540 ATH_ANT_DIV_COMB_LNA2; 1541 div_ant_conf.alt_lna_conf = 1542 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1543 } else if (antcomb->rssi_sub > 1544 antcomb->rssi_lna1) { 1545 /* set to A-B */ 1546 div_ant_conf.main_lna_conf = 1547 ATH_ANT_DIV_COMB_LNA2; 1548 div_ant_conf.alt_lna_conf = 1549 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1550 } else { 1551 /* set to LNA1 */ 1552 div_ant_conf.main_lna_conf = 1553 ATH_ANT_DIV_COMB_LNA2; 1554 div_ant_conf.alt_lna_conf = 1555 ATH_ANT_DIV_COMB_LNA1; 1556 } 1557 } else { 1558 /* use LNA1 as main LNA */ 1559 if ((antcomb->rssi_add > antcomb->rssi_lna2) && 1560 (antcomb->rssi_add > antcomb->rssi_sub)) { 1561 /* set to A+B */ 1562 div_ant_conf.main_lna_conf = 1563 ATH_ANT_DIV_COMB_LNA1; 1564 div_ant_conf.alt_lna_conf = 1565 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 1566 } else if (antcomb->rssi_sub > 1567 antcomb->rssi_lna1) { 1568 /* set to A-B */ 1569 div_ant_conf.main_lna_conf = 1570 ATH_ANT_DIV_COMB_LNA1; 1571 div_ant_conf.alt_lna_conf = 1572 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2; 1573 } else { 1574 /* set to LNA2 */ 1575 div_ant_conf.main_lna_conf = 1576 ATH_ANT_DIV_COMB_LNA1; 1577 div_ant_conf.alt_lna_conf = 1578 ATH_ANT_DIV_COMB_LNA2; 1579 } 1580 } 1581 break; 1582 default: 1583 break; 1584 } 1585 } else { 1586 if (!antcomb->alt_good) { 1587 antcomb->scan_not_start = false; 1588 /* Set alt to another LNA */ 1589 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) { 1590 div_ant_conf.main_lna_conf = 1591 ATH_ANT_DIV_COMB_LNA2; 1592 div_ant_conf.alt_lna_conf = 1593 ATH_ANT_DIV_COMB_LNA1; 1594 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) { 1595 div_ant_conf.main_lna_conf = 1596 ATH_ANT_DIV_COMB_LNA1; 1597 div_ant_conf.alt_lna_conf = 1598 ATH_ANT_DIV_COMB_LNA2; 1599 } 1600 goto div_comb_done; 1601 } 1602 } 1603 1604 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf, 1605 main_rssi_avg, alt_rssi_avg, 1606 alt_ratio); 1607 1608 antcomb->quick_scan_cnt++; 1609 1610div_comb_done: 1611 ath_ant_div_conf_fast_divbias(&div_ant_conf); 1612 1613 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf); 1614 1615 antcomb->scan_start_time = jiffies; 1616 antcomb->total_pkt_count = 0; 1617 antcomb->main_total_rssi = 0; 1618 antcomb->alt_total_rssi = 0; 1619 antcomb->main_recv_cnt = 0; 1620 antcomb->alt_recv_cnt = 0; 1621} 1622 1623int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1624{ 1625 struct ath_buf *bf; 1626 struct sk_buff *skb = NULL, *requeue_skb; 1627 struct ieee80211_rx_status *rxs; 1628 struct ath_hw *ah = sc->sc_ah; 1629 struct ath_common *common = ath9k_hw_common(ah); 1630 /* 1631 * The hw can techncically differ from common->hw when using ath9k 1632 * virtual wiphy so to account for that we iterate over the active 1633 * wiphys and find the appropriate wiphy and therefore hw. 1634 */ 1635 struct ieee80211_hw *hw = NULL; 1636 struct ieee80211_hdr *hdr; 1637 int retval; 1638 bool decrypt_error = false; 1639 struct ath_rx_status rs; 1640 enum ath9k_rx_qtype qtype; 1641 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1642 int dma_type; 1643 u8 rx_status_len = ah->caps.rx_status_len; 1644 u64 tsf = 0; 1645 u32 tsf_lower = 0; 1646 unsigned long flags; 1647 1648 if (edma) 1649 dma_type = DMA_BIDIRECTIONAL; 1650 else 1651 dma_type = DMA_FROM_DEVICE; 1652 1653 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1654 spin_lock_bh(&sc->rx.rxbuflock); 1655 1656 tsf = ath9k_hw_gettsf64(ah); 1657 tsf_lower = tsf & 0xffffffff; 1658 1659 do { 1660 /* If handling rx interrupt and flush is in progress => exit */ 1661 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1662 break; 1663 1664 memset(&rs, 0, sizeof(rs)); 1665 if (edma) 1666 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1667 else 1668 bf = ath_get_next_rx_buf(sc, &rs); 1669 1670 if (!bf) 1671 break; 1672 1673 skb = bf->bf_mpdu; 1674 if (!skb) 1675 continue; 1676 1677 hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); 1678 rxs = IEEE80211_SKB_RXCB(skb); 1679 1680 hw = ath_get_virt_hw(sc, hdr); 1681 1682 ath_debug_stat_rx(sc, &rs); 1683 1684 /* 1685 * If we're asked to flush receive queue, directly 1686 * chain it back at the queue without processing it. 1687 */ 1688 if (flush) 1689 goto requeue; 1690 1691 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1692 rxs, &decrypt_error); 1693 if (retval) 1694 goto requeue; 1695 1696 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1697 if (rs.rs_tstamp > tsf_lower && 1698 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1699 rxs->mactime -= 0x100000000ULL; 1700 1701 if (rs.rs_tstamp < tsf_lower && 1702 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1703 rxs->mactime += 0x100000000ULL; 1704 1705 /* Ensure we always have an skb to requeue once we are done 1706 * processing the current buffer's skb */ 1707 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1708 1709 /* If there is no memory we ignore the current RX'd frame, 1710 * tell hardware it can give us a new frame using the old 1711 * skb and put it at the tail of the sc->rx.rxbuf list for 1712 * processing. */ 1713 if (!requeue_skb) 1714 goto requeue; 1715 1716 /* Unmap the frame */ 1717 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1718 common->rx_bufsize, 1719 dma_type); 1720 1721 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1722 if (ah->caps.rx_status_len) 1723 skb_pull(skb, ah->caps.rx_status_len); 1724 1725 ath9k_rx_skb_postprocess(common, skb, &rs, 1726 rxs, decrypt_error); 1727 1728 /* We will now give hardware our shiny new allocated skb */ 1729 bf->bf_mpdu = requeue_skb; 1730 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1731 common->rx_bufsize, 1732 dma_type); 1733 if (unlikely(dma_mapping_error(sc->dev, 1734 bf->bf_buf_addr))) { 1735 dev_kfree_skb_any(requeue_skb); 1736 bf->bf_mpdu = NULL; 1737 ath_print(common, ATH_DBG_FATAL, 1738 "dma_mapping_error() on RX\n"); 1739 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1740 break; 1741 } 1742 bf->bf_dmacontext = bf->bf_buf_addr; 1743 1744 /* 1745 * change the default rx antenna if rx diversity chooses the 1746 * other antenna 3 times in a row. 1747 */ 1748 if (sc->rx.defant != rs.rs_antenna) { 1749 if (++sc->rx.rxotherant >= 3) 1750 ath_setdefantenna(sc, rs.rs_antenna); 1751 } else { 1752 sc->rx.rxotherant = 0; 1753 } 1754 1755 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1756 if (unlikely(ath9k_check_auto_sleep(sc) || 1757 (sc->ps_flags & (PS_WAIT_FOR_BEACON | 1758 PS_WAIT_FOR_CAB | 1759 PS_WAIT_FOR_PSPOLL_DATA)))) 1760 ath_rx_ps(sc, skb); 1761 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1762 1763 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 1764 ath_ant_comb_scan(sc, &rs); 1765 1766 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1767 1768requeue: 1769 if (edma) { 1770 list_add_tail(&bf->list, &sc->rx.rxbuf); 1771 ath_rx_edma_buf_link(sc, qtype); 1772 } else { 1773 list_move_tail(&bf->list, &sc->rx.rxbuf); 1774 ath_rx_buf_link(sc, bf); 1775 } 1776 } while (1); 1777 1778 spin_unlock_bh(&sc->rx.rxbuflock); 1779 1780 return 0; 1781} 1782