recv.c revision f0e9a8606ce60880249fd570fbebf4472c3d37c0
1/* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17#include "ath9k.h" 18 19static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 20 struct ieee80211_hdr *hdr) 21{ 22 struct ieee80211_hw *hw = sc->pri_wiphy->hw; 23 int i; 24 25 spin_lock_bh(&sc->wiphy_lock); 26 for (i = 0; i < sc->num_sec_wiphy; i++) { 27 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 28 if (aphy == NULL) 29 continue; 30 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) 31 == 0) { 32 hw = aphy->hw; 33 break; 34 } 35 } 36 spin_unlock_bh(&sc->wiphy_lock); 37 return hw; 38} 39 40/* 41 * Setup and link descriptors. 42 * 43 * 11N: we can no longer afford to self link the last descriptor. 44 * MAC acknowledges BA status as long as it copies frames to host 45 * buffer (or rx fifo). This can incorrectly acknowledge packets 46 * to a sender if last desc is self-linked. 47 */ 48static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 49{ 50 struct ath_hw *ah = sc->sc_ah; 51 struct ath_desc *ds; 52 struct sk_buff *skb; 53 54 ATH_RXBUF_RESET(bf); 55 56 ds = bf->bf_desc; 57 ds->ds_link = 0; /* link to null */ 58 ds->ds_data = bf->bf_buf_addr; 59 60 /* virtual addr of the beginning of the buffer. */ 61 skb = bf->bf_mpdu; 62 ASSERT(skb != NULL); 63 ds->ds_vdata = skb->data; 64 65 /* setup rx descriptors. The rx.bufsize here tells the harware 66 * how much data it can DMA to us and that we are prepared 67 * to process */ 68 ath9k_hw_setuprxdesc(ah, ds, 69 sc->rx.bufsize, 70 0); 71 72 if (sc->rx.rxlink == NULL) 73 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 74 else 75 *sc->rx.rxlink = bf->bf_daddr; 76 77 sc->rx.rxlink = &ds->ds_link; 78 ath9k_hw_rxena(ah); 79} 80 81static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 82{ 83 /* XXX block beacon interrupts */ 84 ath9k_hw_setantenna(sc->sc_ah, antenna); 85 sc->rx.defant = antenna; 86 sc->rx.rxotherant = 0; 87} 88 89/* 90 * Extend 15-bit time stamp from rx descriptor to 91 * a full 64-bit TSF using the current h/w TSF. 92*/ 93static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp) 94{ 95 u64 tsf; 96 97 tsf = ath9k_hw_gettsf64(sc->sc_ah); 98 if ((tsf & 0x7fff) < rstamp) 99 tsf -= 0x8000; 100 return (tsf & ~0x7fff) | rstamp; 101} 102 103static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len, gfp_t gfp_mask) 104{ 105 struct sk_buff *skb; 106 u32 off; 107 108 /* 109 * Cache-line-align. This is important (for the 110 * 5210 at least) as not doing so causes bogus data 111 * in rx'd frames. 112 */ 113 114 /* Note: the kernel can allocate a value greater than 115 * what we ask it to give us. We really only need 4 KB as that 116 * is this hardware supports and in fact we need at least 3849 117 * as that is the MAX AMSDU size this hardware supports. 118 * Unfortunately this means we may get 8 KB here from the 119 * kernel... and that is actually what is observed on some 120 * systems :( */ 121 skb = __dev_alloc_skb(len + sc->cachelsz - 1, gfp_mask); 122 if (skb != NULL) { 123 off = ((unsigned long) skb->data) % sc->cachelsz; 124 if (off != 0) 125 skb_reserve(skb, sc->cachelsz - off); 126 } else { 127 DPRINTF(sc, ATH_DBG_FATAL, 128 "skbuff alloc of size %u failed\n", len); 129 return NULL; 130 } 131 132 return skb; 133} 134 135/* 136 * For Decrypt or Demic errors, we only mark packet status here and always push 137 * up the frame up to let mac80211 handle the actual error case, be it no 138 * decryption key or real decryption error. This let us keep statistics there. 139 */ 140static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds, 141 struct ieee80211_rx_status *rx_status, bool *decrypt_error, 142 struct ath_softc *sc) 143{ 144 struct ieee80211_hdr *hdr; 145 u8 ratecode; 146 __le16 fc; 147 struct ieee80211_hw *hw; 148 149 hdr = (struct ieee80211_hdr *)skb->data; 150 fc = hdr->frame_control; 151 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 152 hw = ath_get_virt_hw(sc, hdr); 153 154 if (ds->ds_rxstat.rs_more) { 155 /* 156 * Frame spans multiple descriptors; this cannot happen yet 157 * as we don't support jumbograms. If not in monitor mode, 158 * discard the frame. Enable this if you want to see 159 * error frames in Monitor mode. 160 */ 161 if (sc->sc_ah->opmode != NL80211_IFTYPE_MONITOR) 162 goto rx_next; 163 } else if (ds->ds_rxstat.rs_status != 0) { 164 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) 165 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 166 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) 167 goto rx_next; 168 169 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { 170 *decrypt_error = true; 171 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { 172 if (ieee80211_is_ctl(fc)) 173 /* 174 * Sometimes, we get invalid 175 * MIC failures on valid control frames. 176 * Remove these mic errors. 177 */ 178 ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC; 179 else 180 rx_status->flag |= RX_FLAG_MMIC_ERROR; 181 } 182 /* 183 * Reject error frames with the exception of 184 * decryption and MIC failures. For monitor mode, 185 * we also ignore the CRC error. 186 */ 187 if (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR) { 188 if (ds->ds_rxstat.rs_status & 189 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 190 ATH9K_RXERR_CRC)) 191 goto rx_next; 192 } else { 193 if (ds->ds_rxstat.rs_status & 194 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 195 goto rx_next; 196 } 197 } 198 } 199 200 ratecode = ds->ds_rxstat.rs_rate; 201 202 if (ratecode & 0x80) { 203 /* HT rate */ 204 rx_status->flag |= RX_FLAG_HT; 205 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) 206 rx_status->flag |= RX_FLAG_40MHZ; 207 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) 208 rx_status->flag |= RX_FLAG_SHORT_GI; 209 rx_status->rate_idx = ratecode & 0x7f; 210 } else { 211 int i = 0, cur_band, n_rates; 212 213 cur_band = hw->conf.channel->band; 214 n_rates = sc->sbands[cur_band].n_bitrates; 215 216 for (i = 0; i < n_rates; i++) { 217 if (sc->sbands[cur_band].bitrates[i].hw_value == 218 ratecode) { 219 rx_status->rate_idx = i; 220 break; 221 } 222 223 if (sc->sbands[cur_band].bitrates[i].hw_value_short == 224 ratecode) { 225 rx_status->rate_idx = i; 226 rx_status->flag |= RX_FLAG_SHORTPRE; 227 break; 228 } 229 } 230 } 231 232 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); 233 rx_status->band = hw->conf.channel->band; 234 rx_status->freq = hw->conf.channel->center_freq; 235 rx_status->noise = sc->ani.noise_floor; 236 rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi; 237 rx_status->antenna = ds->ds_rxstat.rs_antenna; 238 239 /* at 45 you will be able to use MCS 15 reliably. A more elaborate 240 * scheme can be used here but it requires tables of SNR/throughput for 241 * each possible mode used. */ 242 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45; 243 244 /* rssi can be more than 45 though, anything above that 245 * should be considered at 100% */ 246 if (rx_status->qual > 100) 247 rx_status->qual = 100; 248 249 rx_status->flag |= RX_FLAG_TSFT; 250 251 return 1; 252rx_next: 253 return 0; 254} 255 256static void ath_opmode_init(struct ath_softc *sc) 257{ 258 struct ath_hw *ah = sc->sc_ah; 259 u32 rfilt, mfilt[2]; 260 261 /* configure rx filter */ 262 rfilt = ath_calcrxfilter(sc); 263 ath9k_hw_setrxfilter(ah, rfilt); 264 265 /* configure bssid mask */ 266 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 267 ath9k_hw_setbssidmask(sc); 268 269 /* configure operational mode */ 270 ath9k_hw_setopmode(ah); 271 272 /* Handle any link-level address change. */ 273 ath9k_hw_setmac(ah, sc->sc_ah->macaddr); 274 275 /* calculate and install multicast filter */ 276 mfilt[0] = mfilt[1] = ~0; 277 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 278} 279 280int ath_rx_init(struct ath_softc *sc, int nbufs) 281{ 282 struct sk_buff *skb; 283 struct ath_buf *bf; 284 int error = 0; 285 286 spin_lock_init(&sc->rx.rxflushlock); 287 sc->sc_flags &= ~SC_OP_RXFLUSH; 288 spin_lock_init(&sc->rx.rxbuflock); 289 290 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 291 min(sc->cachelsz, (u16)64)); 292 293 DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 294 sc->cachelsz, sc->rx.bufsize); 295 296 /* Initialize rx descriptors */ 297 298 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 299 "rx", nbufs, 1); 300 if (error != 0) { 301 DPRINTF(sc, ATH_DBG_FATAL, 302 "failed to allocate rx descriptors: %d\n", error); 303 goto err; 304 } 305 306 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 307 skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL); 308 if (skb == NULL) { 309 error = -ENOMEM; 310 goto err; 311 } 312 313 bf->bf_mpdu = skb; 314 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 315 sc->rx.bufsize, 316 DMA_FROM_DEVICE); 317 if (unlikely(dma_mapping_error(sc->dev, 318 bf->bf_buf_addr))) { 319 dev_kfree_skb_any(skb); 320 bf->bf_mpdu = NULL; 321 DPRINTF(sc, ATH_DBG_FATAL, 322 "dma_mapping_error() on RX init\n"); 323 error = -ENOMEM; 324 goto err; 325 } 326 bf->bf_dmacontext = bf->bf_buf_addr; 327 } 328 sc->rx.rxlink = NULL; 329 330err: 331 if (error) 332 ath_rx_cleanup(sc); 333 334 return error; 335} 336 337void ath_rx_cleanup(struct ath_softc *sc) 338{ 339 struct sk_buff *skb; 340 struct ath_buf *bf; 341 342 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 343 skb = bf->bf_mpdu; 344 if (skb) { 345 dma_unmap_single(sc->dev, bf->bf_buf_addr, 346 sc->rx.bufsize, DMA_FROM_DEVICE); 347 dev_kfree_skb(skb); 348 } 349 } 350 351 if (sc->rx.rxdma.dd_desc_len != 0) 352 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 353} 354 355/* 356 * Calculate the receive filter according to the 357 * operating mode and state: 358 * 359 * o always accept unicast, broadcast, and multicast traffic 360 * o maintain current state of phy error reception (the hal 361 * may enable phy error frames for noise immunity work) 362 * o probe request frames are accepted only when operating in 363 * hostap, adhoc, or monitor modes 364 * o enable promiscuous mode according to the interface state 365 * o accept beacons: 366 * - when operating in adhoc mode so the 802.11 layer creates 367 * node table entries for peers, 368 * - when operating in station mode for collecting rssi data when 369 * the station is otherwise quiet, or 370 * - when operating as a repeater so we see repeater-sta beacons 371 * - when scanning 372 */ 373 374u32 ath_calcrxfilter(struct ath_softc *sc) 375{ 376#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 377 378 u32 rfilt; 379 380 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 381 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 382 | ATH9K_RX_FILTER_MCAST; 383 384 /* If not a STA, enable processing of Probe Requests */ 385 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 386 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 387 388 /* 389 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 390 * mode interface or when in monitor mode. AP mode does not need this 391 * since it receives all in-BSS frames anyway. 392 */ 393 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 394 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 395 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 396 rfilt |= ATH9K_RX_FILTER_PROM; 397 398 if (sc->rx.rxfilter & FIF_CONTROL) 399 rfilt |= ATH9K_RX_FILTER_CONTROL; 400 401 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 402 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 403 rfilt |= ATH9K_RX_FILTER_MYBEACON; 404 else 405 rfilt |= ATH9K_RX_FILTER_BEACON; 406 407 /* If in HOSTAP mode, want to enable reception of PSPOLL frames */ 408 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) 409 rfilt |= ATH9K_RX_FILTER_PSPOLL; 410 411 if (sc->sec_wiphy) { 412 /* TODO: only needed if more than one BSSID is in use in 413 * station/adhoc mode */ 414 /* TODO: for older chips, may need to add ATH9K_RX_FILTER_PROM 415 */ 416 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 417 } 418 419 return rfilt; 420 421#undef RX_FILTER_PRESERVE 422} 423 424int ath_startrecv(struct ath_softc *sc) 425{ 426 struct ath_hw *ah = sc->sc_ah; 427 struct ath_buf *bf, *tbf; 428 429 spin_lock_bh(&sc->rx.rxbuflock); 430 if (list_empty(&sc->rx.rxbuf)) 431 goto start_recv; 432 433 sc->rx.rxlink = NULL; 434 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 435 ath_rx_buf_link(sc, bf); 436 } 437 438 /* We could have deleted elements so the list may be empty now */ 439 if (list_empty(&sc->rx.rxbuf)) 440 goto start_recv; 441 442 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 443 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 444 ath9k_hw_rxena(ah); 445 446start_recv: 447 spin_unlock_bh(&sc->rx.rxbuflock); 448 ath_opmode_init(sc); 449 ath9k_hw_startpcureceive(ah); 450 451 return 0; 452} 453 454bool ath_stoprecv(struct ath_softc *sc) 455{ 456 struct ath_hw *ah = sc->sc_ah; 457 bool stopped; 458 459 ath9k_hw_stoppcurecv(ah); 460 ath9k_hw_setrxfilter(ah, 0); 461 stopped = ath9k_hw_stopdmarecv(ah); 462 sc->rx.rxlink = NULL; 463 464 return stopped; 465} 466 467void ath_flushrecv(struct ath_softc *sc) 468{ 469 spin_lock_bh(&sc->rx.rxflushlock); 470 sc->sc_flags |= SC_OP_RXFLUSH; 471 ath_rx_tasklet(sc, 1); 472 sc->sc_flags &= ~SC_OP_RXFLUSH; 473 spin_unlock_bh(&sc->rx.rxflushlock); 474} 475 476static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 477{ 478 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 479 struct ieee80211_mgmt *mgmt; 480 u8 *pos, *end, id, elen; 481 struct ieee80211_tim_ie *tim; 482 483 mgmt = (struct ieee80211_mgmt *)skb->data; 484 pos = mgmt->u.beacon.variable; 485 end = skb->data + skb->len; 486 487 while (pos + 2 < end) { 488 id = *pos++; 489 elen = *pos++; 490 if (pos + elen > end) 491 break; 492 493 if (id == WLAN_EID_TIM) { 494 if (elen < sizeof(*tim)) 495 break; 496 tim = (struct ieee80211_tim_ie *) pos; 497 if (tim->dtim_count != 0) 498 break; 499 return tim->bitmap_ctrl & 0x01; 500 } 501 502 pos += elen; 503 } 504 505 return false; 506} 507 508static void ath_rx_ps_back_to_sleep(struct ath_softc *sc) 509{ 510 sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON | SC_OP_WAIT_FOR_CAB); 511} 512 513static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 514{ 515 struct ieee80211_mgmt *mgmt; 516 517 if (skb->len < 24 + 8 + 2 + 2) 518 return; 519 520 mgmt = (struct ieee80211_mgmt *)skb->data; 521 if (memcmp(sc->curbssid, mgmt->bssid, ETH_ALEN) != 0) 522 return; /* not from our current AP */ 523 524 if (sc->sc_flags & SC_OP_BEACON_SYNC) { 525 sc->sc_flags &= ~SC_OP_BEACON_SYNC; 526 DPRINTF(sc, ATH_DBG_PS, "Reconfigure Beacon timers based on " 527 "timestamp from the AP\n"); 528 ath_beacon_config(sc, NULL); 529 } 530 531 if (!(sc->hw->conf.flags & IEEE80211_CONF_PS)) { 532 /* We are not in PS mode anymore; remain awake */ 533 DPRINTF(sc, ATH_DBG_PS, "Not in PS mode anymore, remain " 534 "awake\n"); 535 sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON | SC_OP_WAIT_FOR_CAB); 536 return; 537 } 538 539 if (ath_beacon_dtim_pending_cab(skb)) { 540 /* 541 * Remain awake waiting for buffered broadcast/multicast 542 * frames. 543 */ 544 DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " 545 "buffered broadcast/multicast frame(s)\n"); 546 sc->sc_flags |= SC_OP_WAIT_FOR_CAB; 547 return; 548 } 549 550 if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) { 551 /* 552 * This can happen if a broadcast frame is dropped or the AP 553 * fails to send a frame indicating that all CAB frames have 554 * been delivered. 555 */ 556 DPRINTF(sc, ATH_DBG_PS, "PS wait for CAB frames timed out\n"); 557 } 558 559 /* No more broadcast/multicast frames to be received at this point. */ 560 ath_rx_ps_back_to_sleep(sc); 561} 562 563static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 564{ 565 struct ieee80211_hdr *hdr; 566 567 hdr = (struct ieee80211_hdr *)skb->data; 568 569 /* Process Beacon and CAB receive in PS state */ 570 if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) && 571 ieee80211_is_beacon(hdr->frame_control)) 572 ath_rx_ps_beacon(sc, skb); 573 else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) && 574 (ieee80211_is_data(hdr->frame_control) || 575 ieee80211_is_action(hdr->frame_control)) && 576 is_multicast_ether_addr(hdr->addr1) && 577 !ieee80211_has_moredata(hdr->frame_control)) { 578 DPRINTF(sc, ATH_DBG_PS, "All PS CAB frames received, back to " 579 "sleep\n"); 580 /* 581 * No more broadcast/multicast frames to be received at this 582 * point. 583 */ 584 ath_rx_ps_back_to_sleep(sc); 585 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) && 586 !is_multicast_ether_addr(hdr->addr1) && 587 !ieee80211_has_morefrags(hdr->frame_control)) { 588 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA; 589 DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having " 590 "received PS-Poll data (0x%x)\n", 591 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 592 SC_OP_WAIT_FOR_CAB | 593 SC_OP_WAIT_FOR_PSPOLL_DATA | 594 SC_OP_WAIT_FOR_TX_ACK)); 595 } 596} 597 598static void ath_rx_send_to_mac80211(struct ath_softc *sc, struct sk_buff *skb, 599 struct ieee80211_rx_status *rx_status) 600{ 601 struct ieee80211_hdr *hdr; 602 603 hdr = (struct ieee80211_hdr *)skb->data; 604 605 /* Send the frame to mac80211 */ 606 if (is_multicast_ether_addr(hdr->addr1)) { 607 int i; 608 /* 609 * Deliver broadcast/multicast frames to all suitable 610 * virtual wiphys. 611 */ 612 /* TODO: filter based on channel configuration */ 613 for (i = 0; i < sc->num_sec_wiphy; i++) { 614 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 615 struct sk_buff *nskb; 616 if (aphy == NULL) 617 continue; 618 nskb = skb_copy(skb, GFP_ATOMIC); 619 if (nskb) 620 __ieee80211_rx(aphy->hw, nskb, rx_status); 621 } 622 __ieee80211_rx(sc->hw, skb, rx_status); 623 } else { 624 /* Deliver unicast frames based on receiver address */ 625 __ieee80211_rx(ath_get_virt_hw(sc, hdr), skb, rx_status); 626 } 627} 628 629int ath_rx_tasklet(struct ath_softc *sc, int flush) 630{ 631#define PA2DESC(_sc, _pa) \ 632 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ 633 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) 634 635 struct ath_buf *bf; 636 struct ath_desc *ds; 637 struct sk_buff *skb = NULL, *requeue_skb; 638 struct ieee80211_rx_status rx_status; 639 struct ath_hw *ah = sc->sc_ah; 640 struct ieee80211_hdr *hdr; 641 int hdrlen, padsize, retval; 642 bool decrypt_error = false; 643 u8 keyix; 644 __le16 fc; 645 646 spin_lock_bh(&sc->rx.rxbuflock); 647 648 do { 649 /* If handling rx interrupt and flush is in progress => exit */ 650 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 651 break; 652 653 if (list_empty(&sc->rx.rxbuf)) { 654 sc->rx.rxlink = NULL; 655 break; 656 } 657 658 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 659 ds = bf->bf_desc; 660 661 /* 662 * Must provide the virtual address of the current 663 * descriptor, the physical address, and the virtual 664 * address of the next descriptor in the h/w chain. 665 * This allows the HAL to look ahead to see if the 666 * hardware is done with a descriptor by checking the 667 * done bit in the following descriptor and the address 668 * of the current descriptor the DMA engine is working 669 * on. All this is necessary because of our use of 670 * a self-linked list to avoid rx overruns. 671 */ 672 retval = ath9k_hw_rxprocdesc(ah, ds, 673 bf->bf_daddr, 674 PA2DESC(sc, ds->ds_link), 675 0); 676 if (retval == -EINPROGRESS) { 677 struct ath_buf *tbf; 678 struct ath_desc *tds; 679 680 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 681 sc->rx.rxlink = NULL; 682 break; 683 } 684 685 tbf = list_entry(bf->list.next, struct ath_buf, list); 686 687 /* 688 * On some hardware the descriptor status words could 689 * get corrupted, including the done bit. Because of 690 * this, check if the next descriptor's done bit is 691 * set or not. 692 * 693 * If the next descriptor's done bit is set, the current 694 * descriptor has been corrupted. Force s/w to discard 695 * this descriptor and continue... 696 */ 697 698 tds = tbf->bf_desc; 699 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, 700 PA2DESC(sc, tds->ds_link), 0); 701 if (retval == -EINPROGRESS) { 702 break; 703 } 704 } 705 706 skb = bf->bf_mpdu; 707 if (!skb) 708 continue; 709 710 /* 711 * Synchronize the DMA transfer with CPU before 712 * 1. accessing the frame 713 * 2. requeueing the same buffer to h/w 714 */ 715 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 716 sc->rx.bufsize, 717 DMA_FROM_DEVICE); 718 719 /* 720 * If we're asked to flush receive queue, directly 721 * chain it back at the queue without processing it. 722 */ 723 if (flush) 724 goto requeue; 725 726 if (!ds->ds_rxstat.rs_datalen) 727 goto requeue; 728 729 /* The status portion of the descriptor could get corrupted. */ 730 if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen) 731 goto requeue; 732 733 if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) 734 goto requeue; 735 736 /* Ensure we always have an skb to requeue once we are done 737 * processing the current buffer's skb */ 738 requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_ATOMIC); 739 740 /* If there is no memory we ignore the current RX'd frame, 741 * tell hardware it can give us a new frame using the old 742 * skb and put it at the tail of the sc->rx.rxbuf list for 743 * processing. */ 744 if (!requeue_skb) 745 goto requeue; 746 747 /* Unmap the frame */ 748 dma_unmap_single(sc->dev, bf->bf_buf_addr, 749 sc->rx.bufsize, 750 DMA_FROM_DEVICE); 751 752 skb_put(skb, ds->ds_rxstat.rs_datalen); 753 skb->protocol = cpu_to_be16(ETH_P_CONTROL); 754 755 /* see if any padding is done by the hw and remove it */ 756 hdr = (struct ieee80211_hdr *)skb->data; 757 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 758 fc = hdr->frame_control; 759 760 /* The MAC header is padded to have 32-bit boundary if the 761 * packet payload is non-zero. The general calculation for 762 * padsize would take into account odd header lengths: 763 * padsize = (4 - hdrlen % 4) % 4; However, since only 764 * even-length headers are used, padding can only be 0 or 2 765 * bytes and we can optimize this a bit. In addition, we must 766 * not try to remove padding from short control frames that do 767 * not have payload. */ 768 padsize = hdrlen & 3; 769 if (padsize && hdrlen >= 24) { 770 memmove(skb->data + padsize, skb->data, hdrlen); 771 skb_pull(skb, padsize); 772 } 773 774 keyix = ds->ds_rxstat.rs_keyix; 775 776 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { 777 rx_status.flag |= RX_FLAG_DECRYPTED; 778 } else if (ieee80211_has_protected(fc) 779 && !decrypt_error && skb->len >= hdrlen + 4) { 780 keyix = skb->data[hdrlen + 3] >> 6; 781 782 if (test_bit(keyix, sc->keymap)) 783 rx_status.flag |= RX_FLAG_DECRYPTED; 784 } 785 if (ah->sw_mgmt_crypto && 786 (rx_status.flag & RX_FLAG_DECRYPTED) && 787 ieee80211_is_mgmt(fc)) { 788 /* Use software decrypt for management frames. */ 789 rx_status.flag &= ~RX_FLAG_DECRYPTED; 790 } 791 792 /* We will now give hardware our shiny new allocated skb */ 793 bf->bf_mpdu = requeue_skb; 794 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 795 sc->rx.bufsize, 796 DMA_FROM_DEVICE); 797 if (unlikely(dma_mapping_error(sc->dev, 798 bf->bf_buf_addr))) { 799 dev_kfree_skb_any(requeue_skb); 800 bf->bf_mpdu = NULL; 801 DPRINTF(sc, ATH_DBG_FATAL, 802 "dma_mapping_error() on RX\n"); 803 ath_rx_send_to_mac80211(sc, skb, &rx_status); 804 break; 805 } 806 bf->bf_dmacontext = bf->bf_buf_addr; 807 808 /* 809 * change the default rx antenna if rx diversity chooses the 810 * other antenna 3 times in a row. 811 */ 812 if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { 813 if (++sc->rx.rxotherant >= 3) 814 ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); 815 } else { 816 sc->rx.rxotherant = 0; 817 } 818 819 if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 820 SC_OP_WAIT_FOR_CAB | 821 SC_OP_WAIT_FOR_PSPOLL_DATA))) 822 ath_rx_ps(sc, skb); 823 824 ath_rx_send_to_mac80211(sc, skb, &rx_status); 825 826requeue: 827 list_move_tail(&bf->list, &sc->rx.rxbuf); 828 ath_rx_buf_link(sc, bf); 829 } while (1); 830 831 spin_unlock_bh(&sc->rx.rxbuflock); 832 833 return 0; 834#undef PA2DESC 835} 836