recv.c revision 22e66a4c15b063aee5d03991c4b9629a3b0c4556
1/* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17#include "ath9k.h" 18 19static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 20 struct ieee80211_hdr *hdr) 21{ 22 struct ieee80211_hw *hw = sc->pri_wiphy->hw; 23 int i; 24 25 spin_lock_bh(&sc->wiphy_lock); 26 for (i = 0; i < sc->num_sec_wiphy; i++) { 27 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 28 if (aphy == NULL) 29 continue; 30 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) 31 == 0) { 32 hw = aphy->hw; 33 break; 34 } 35 } 36 spin_unlock_bh(&sc->wiphy_lock); 37 return hw; 38} 39 40/* 41 * Setup and link descriptors. 42 * 43 * 11N: we can no longer afford to self link the last descriptor. 44 * MAC acknowledges BA status as long as it copies frames to host 45 * buffer (or rx fifo). This can incorrectly acknowledge packets 46 * to a sender if last desc is self-linked. 47 */ 48static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 49{ 50 struct ath_hw *ah = sc->sc_ah; 51 struct ath_desc *ds; 52 struct sk_buff *skb; 53 54 ATH_RXBUF_RESET(bf); 55 56 ds = bf->bf_desc; 57 ds->ds_link = 0; /* link to null */ 58 ds->ds_data = bf->bf_buf_addr; 59 60 /* virtual addr of the beginning of the buffer. */ 61 skb = bf->bf_mpdu; 62 ASSERT(skb != NULL); 63 ds->ds_vdata = skb->data; 64 65 /* setup rx descriptors. The rx.bufsize here tells the harware 66 * how much data it can DMA to us and that we are prepared 67 * to process */ 68 ath9k_hw_setuprxdesc(ah, ds, 69 sc->rx.bufsize, 70 0); 71 72 if (sc->rx.rxlink == NULL) 73 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 74 else 75 *sc->rx.rxlink = bf->bf_daddr; 76 77 sc->rx.rxlink = &ds->ds_link; 78 ath9k_hw_rxena(ah); 79} 80 81static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 82{ 83 /* XXX block beacon interrupts */ 84 ath9k_hw_setantenna(sc->sc_ah, antenna); 85 sc->rx.defant = antenna; 86 sc->rx.rxotherant = 0; 87} 88 89/* 90 * Extend 15-bit time stamp from rx descriptor to 91 * a full 64-bit TSF using the current h/w TSF. 92*/ 93static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp) 94{ 95 u64 tsf; 96 97 tsf = ath9k_hw_gettsf64(sc->sc_ah); 98 if ((tsf & 0x7fff) < rstamp) 99 tsf -= 0x8000; 100 return (tsf & ~0x7fff) | rstamp; 101} 102 103/* 104 * For Decrypt or Demic errors, we only mark packet status here and always push 105 * up the frame up to let mac80211 handle the actual error case, be it no 106 * decryption key or real decryption error. This let us keep statistics there. 107 */ 108static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds, 109 struct ieee80211_rx_status *rx_status, bool *decrypt_error, 110 struct ath_softc *sc) 111{ 112 struct ieee80211_hdr *hdr; 113 u8 ratecode; 114 __le16 fc; 115 struct ieee80211_hw *hw; 116 struct ieee80211_sta *sta; 117 struct ath_node *an; 118 int last_rssi = ATH_RSSI_DUMMY_MARKER; 119 120 121 hdr = (struct ieee80211_hdr *)skb->data; 122 fc = hdr->frame_control; 123 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 124 hw = ath_get_virt_hw(sc, hdr); 125 126 if (ds->ds_rxstat.rs_more) { 127 /* 128 * Frame spans multiple descriptors; this cannot happen yet 129 * as we don't support jumbograms. If not in monitor mode, 130 * discard the frame. Enable this if you want to see 131 * error frames in Monitor mode. 132 */ 133 if (sc->sc_ah->opmode != NL80211_IFTYPE_MONITOR) 134 goto rx_next; 135 } else if (ds->ds_rxstat.rs_status != 0) { 136 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) 137 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 138 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) 139 goto rx_next; 140 141 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { 142 *decrypt_error = true; 143 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { 144 if (ieee80211_is_ctl(fc)) 145 /* 146 * Sometimes, we get invalid 147 * MIC failures on valid control frames. 148 * Remove these mic errors. 149 */ 150 ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC; 151 else 152 rx_status->flag |= RX_FLAG_MMIC_ERROR; 153 } 154 /* 155 * Reject error frames with the exception of 156 * decryption and MIC failures. For monitor mode, 157 * we also ignore the CRC error. 158 */ 159 if (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR) { 160 if (ds->ds_rxstat.rs_status & 161 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 162 ATH9K_RXERR_CRC)) 163 goto rx_next; 164 } else { 165 if (ds->ds_rxstat.rs_status & 166 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 167 goto rx_next; 168 } 169 } 170 } 171 172 ratecode = ds->ds_rxstat.rs_rate; 173 174 if (ratecode & 0x80) { 175 /* HT rate */ 176 rx_status->flag |= RX_FLAG_HT; 177 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) 178 rx_status->flag |= RX_FLAG_40MHZ; 179 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) 180 rx_status->flag |= RX_FLAG_SHORT_GI; 181 rx_status->rate_idx = ratecode & 0x7f; 182 } else { 183 int i = 0, cur_band, n_rates; 184 185 cur_band = hw->conf.channel->band; 186 n_rates = sc->sbands[cur_band].n_bitrates; 187 188 for (i = 0; i < n_rates; i++) { 189 if (sc->sbands[cur_band].bitrates[i].hw_value == 190 ratecode) { 191 rx_status->rate_idx = i; 192 break; 193 } 194 195 if (sc->sbands[cur_band].bitrates[i].hw_value_short == 196 ratecode) { 197 rx_status->rate_idx = i; 198 rx_status->flag |= RX_FLAG_SHORTPRE; 199 break; 200 } 201 } 202 } 203 204 rcu_read_lock(); 205 sta = ieee80211_find_sta(sc->hw, hdr->addr2); 206 if (sta) { 207 an = (struct ath_node *) sta->drv_priv; 208 if (ds->ds_rxstat.rs_rssi != ATH9K_RSSI_BAD && 209 !ds->ds_rxstat.rs_moreaggr) 210 ATH_RSSI_LPF(an->last_rssi, ds->ds_rxstat.rs_rssi); 211 last_rssi = an->last_rssi; 212 } 213 rcu_read_unlock(); 214 215 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 216 ds->ds_rxstat.rs_rssi = ATH_EP_RND(last_rssi, 217 ATH_RSSI_EP_MULTIPLIER); 218 if (ds->ds_rxstat.rs_rssi < 0) 219 ds->ds_rxstat.rs_rssi = 0; 220 else if (ds->ds_rxstat.rs_rssi > 127) 221 ds->ds_rxstat.rs_rssi = 127; 222 223 /* Update Beacon RSSI, this is used by ANI. */ 224 if (ieee80211_is_beacon(fc)) 225 sc->sc_ah->stats.avgbrssi = ds->ds_rxstat.rs_rssi; 226 227 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); 228 rx_status->band = hw->conf.channel->band; 229 rx_status->freq = hw->conf.channel->center_freq; 230 rx_status->noise = sc->ani.noise_floor; 231 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + ds->ds_rxstat.rs_rssi; 232 rx_status->antenna = ds->ds_rxstat.rs_antenna; 233 234 /* 235 * Theory for reporting quality: 236 * 237 * At a hardware RSSI of 45 you will be able to use MCS 7 reliably. 238 * At a hardware RSSI of 45 you will be able to use MCS 15 reliably. 239 * At a hardware RSSI of 35 you should be able use 54 Mbps reliably. 240 * 241 * MCS 7 is the highets MCS index usable by a 1-stream device. 242 * MCS 15 is the highest MCS index usable by a 2-stream device. 243 * 244 * All ath9k devices are either 1-stream or 2-stream. 245 * 246 * How many bars you see is derived from the qual reporting. 247 * 248 * A more elaborate scheme can be used here but it requires tables 249 * of SNR/throughput for each possible mode used. For the MCS table 250 * you can refer to the wireless wiki: 251 * 252 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 253 * 254 */ 255 if (conf_is_ht(&hw->conf)) 256 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45; 257 else 258 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 35; 259 260 /* rssi can be more than 45 though, anything above that 261 * should be considered at 100% */ 262 if (rx_status->qual > 100) 263 rx_status->qual = 100; 264 265 rx_status->flag |= RX_FLAG_TSFT; 266 267 return 1; 268rx_next: 269 return 0; 270} 271 272static void ath_opmode_init(struct ath_softc *sc) 273{ 274 struct ath_hw *ah = sc->sc_ah; 275 u32 rfilt, mfilt[2]; 276 277 /* configure rx filter */ 278 rfilt = ath_calcrxfilter(sc); 279 ath9k_hw_setrxfilter(ah, rfilt); 280 281 /* configure bssid mask */ 282 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 283 ath9k_hw_setbssidmask(sc); 284 285 /* configure operational mode */ 286 ath9k_hw_setopmode(ah); 287 288 /* Handle any link-level address change. */ 289 ath9k_hw_setmac(ah, sc->sc_ah->macaddr); 290 291 /* calculate and install multicast filter */ 292 mfilt[0] = mfilt[1] = ~0; 293 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 294} 295 296int ath_rx_init(struct ath_softc *sc, int nbufs) 297{ 298 struct sk_buff *skb; 299 struct ath_buf *bf; 300 int error = 0; 301 302 spin_lock_init(&sc->rx.rxflushlock); 303 sc->sc_flags &= ~SC_OP_RXFLUSH; 304 spin_lock_init(&sc->rx.rxbuflock); 305 306 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 307 min(sc->common.cachelsz, (u16)64)); 308 309 DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 310 sc->common.cachelsz, sc->rx.bufsize); 311 312 /* Initialize rx descriptors */ 313 314 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 315 "rx", nbufs, 1); 316 if (error != 0) { 317 DPRINTF(sc, ATH_DBG_FATAL, 318 "failed to allocate rx descriptors: %d\n", error); 319 goto err; 320 } 321 322 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 323 skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_KERNEL); 324 if (skb == NULL) { 325 error = -ENOMEM; 326 goto err; 327 } 328 329 bf->bf_mpdu = skb; 330 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 331 sc->rx.bufsize, 332 DMA_FROM_DEVICE); 333 if (unlikely(dma_mapping_error(sc->dev, 334 bf->bf_buf_addr))) { 335 dev_kfree_skb_any(skb); 336 bf->bf_mpdu = NULL; 337 DPRINTF(sc, ATH_DBG_FATAL, 338 "dma_mapping_error() on RX init\n"); 339 error = -ENOMEM; 340 goto err; 341 } 342 bf->bf_dmacontext = bf->bf_buf_addr; 343 } 344 sc->rx.rxlink = NULL; 345 346err: 347 if (error) 348 ath_rx_cleanup(sc); 349 350 return error; 351} 352 353void ath_rx_cleanup(struct ath_softc *sc) 354{ 355 struct sk_buff *skb; 356 struct ath_buf *bf; 357 358 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 359 skb = bf->bf_mpdu; 360 if (skb) { 361 dma_unmap_single(sc->dev, bf->bf_buf_addr, 362 sc->rx.bufsize, DMA_FROM_DEVICE); 363 dev_kfree_skb(skb); 364 } 365 } 366 367 if (sc->rx.rxdma.dd_desc_len != 0) 368 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 369} 370 371/* 372 * Calculate the receive filter according to the 373 * operating mode and state: 374 * 375 * o always accept unicast, broadcast, and multicast traffic 376 * o maintain current state of phy error reception (the hal 377 * may enable phy error frames for noise immunity work) 378 * o probe request frames are accepted only when operating in 379 * hostap, adhoc, or monitor modes 380 * o enable promiscuous mode according to the interface state 381 * o accept beacons: 382 * - when operating in adhoc mode so the 802.11 layer creates 383 * node table entries for peers, 384 * - when operating in station mode for collecting rssi data when 385 * the station is otherwise quiet, or 386 * - when operating as a repeater so we see repeater-sta beacons 387 * - when scanning 388 */ 389 390u32 ath_calcrxfilter(struct ath_softc *sc) 391{ 392#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 393 394 u32 rfilt; 395 396 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 397 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 398 | ATH9K_RX_FILTER_MCAST; 399 400 /* If not a STA, enable processing of Probe Requests */ 401 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 402 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 403 404 /* 405 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 406 * mode interface or when in monitor mode. AP mode does not need this 407 * since it receives all in-BSS frames anyway. 408 */ 409 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 410 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 411 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 412 rfilt |= ATH9K_RX_FILTER_PROM; 413 414 if (sc->rx.rxfilter & FIF_CONTROL) 415 rfilt |= ATH9K_RX_FILTER_CONTROL; 416 417 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 418 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 419 rfilt |= ATH9K_RX_FILTER_MYBEACON; 420 else 421 rfilt |= ATH9K_RX_FILTER_BEACON; 422 423 if (sc->rx.rxfilter & FIF_PSPOLL) 424 rfilt |= ATH9K_RX_FILTER_PSPOLL; 425 426 if (sc->sec_wiphy) { 427 /* TODO: only needed if more than one BSSID is in use in 428 * station/adhoc mode */ 429 /* TODO: for older chips, may need to add ATH9K_RX_FILTER_PROM 430 */ 431 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 432 } 433 434 return rfilt; 435 436#undef RX_FILTER_PRESERVE 437} 438 439int ath_startrecv(struct ath_softc *sc) 440{ 441 struct ath_hw *ah = sc->sc_ah; 442 struct ath_buf *bf, *tbf; 443 444 spin_lock_bh(&sc->rx.rxbuflock); 445 if (list_empty(&sc->rx.rxbuf)) 446 goto start_recv; 447 448 sc->rx.rxlink = NULL; 449 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 450 ath_rx_buf_link(sc, bf); 451 } 452 453 /* We could have deleted elements so the list may be empty now */ 454 if (list_empty(&sc->rx.rxbuf)) 455 goto start_recv; 456 457 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 458 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 459 ath9k_hw_rxena(ah); 460 461start_recv: 462 spin_unlock_bh(&sc->rx.rxbuflock); 463 ath_opmode_init(sc); 464 ath9k_hw_startpcureceive(ah); 465 466 return 0; 467} 468 469bool ath_stoprecv(struct ath_softc *sc) 470{ 471 struct ath_hw *ah = sc->sc_ah; 472 bool stopped; 473 474 ath9k_hw_stoppcurecv(ah); 475 ath9k_hw_setrxfilter(ah, 0); 476 stopped = ath9k_hw_stopdmarecv(ah); 477 sc->rx.rxlink = NULL; 478 479 return stopped; 480} 481 482void ath_flushrecv(struct ath_softc *sc) 483{ 484 spin_lock_bh(&sc->rx.rxflushlock); 485 sc->sc_flags |= SC_OP_RXFLUSH; 486 ath_rx_tasklet(sc, 1); 487 sc->sc_flags &= ~SC_OP_RXFLUSH; 488 spin_unlock_bh(&sc->rx.rxflushlock); 489} 490 491static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 492{ 493 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 494 struct ieee80211_mgmt *mgmt; 495 u8 *pos, *end, id, elen; 496 struct ieee80211_tim_ie *tim; 497 498 mgmt = (struct ieee80211_mgmt *)skb->data; 499 pos = mgmt->u.beacon.variable; 500 end = skb->data + skb->len; 501 502 while (pos + 2 < end) { 503 id = *pos++; 504 elen = *pos++; 505 if (pos + elen > end) 506 break; 507 508 if (id == WLAN_EID_TIM) { 509 if (elen < sizeof(*tim)) 510 break; 511 tim = (struct ieee80211_tim_ie *) pos; 512 if (tim->dtim_count != 0) 513 break; 514 return tim->bitmap_ctrl & 0x01; 515 } 516 517 pos += elen; 518 } 519 520 return false; 521} 522 523static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 524{ 525 struct ieee80211_mgmt *mgmt; 526 527 if (skb->len < 24 + 8 + 2 + 2) 528 return; 529 530 mgmt = (struct ieee80211_mgmt *)skb->data; 531 if (memcmp(sc->curbssid, mgmt->bssid, ETH_ALEN) != 0) 532 return; /* not from our current AP */ 533 534 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON; 535 536 if (sc->sc_flags & SC_OP_BEACON_SYNC) { 537 sc->sc_flags &= ~SC_OP_BEACON_SYNC; 538 DPRINTF(sc, ATH_DBG_PS, "Reconfigure Beacon timers based on " 539 "timestamp from the AP\n"); 540 ath_beacon_config(sc, NULL); 541 } 542 543 if (ath_beacon_dtim_pending_cab(skb)) { 544 /* 545 * Remain awake waiting for buffered broadcast/multicast 546 * frames. If the last broadcast/multicast frame is not 547 * received properly, the next beacon frame will work as 548 * a backup trigger for returning into NETWORK SLEEP state, 549 * so we are waiting for it as well. 550 */ 551 DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " 552 "buffered broadcast/multicast frame(s)\n"); 553 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; 554 return; 555 } 556 557 if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) { 558 /* 559 * This can happen if a broadcast frame is dropped or the AP 560 * fails to send a frame indicating that all CAB frames have 561 * been delivered. 562 */ 563 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 564 DPRINTF(sc, ATH_DBG_PS, "PS wait for CAB frames timed out\n"); 565 } 566} 567 568static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 569{ 570 struct ieee80211_hdr *hdr; 571 572 hdr = (struct ieee80211_hdr *)skb->data; 573 574 /* Process Beacon and CAB receive in PS state */ 575 if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) && 576 ieee80211_is_beacon(hdr->frame_control)) 577 ath_rx_ps_beacon(sc, skb); 578 else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) && 579 (ieee80211_is_data(hdr->frame_control) || 580 ieee80211_is_action(hdr->frame_control)) && 581 is_multicast_ether_addr(hdr->addr1) && 582 !ieee80211_has_moredata(hdr->frame_control)) { 583 /* 584 * No more broadcast/multicast frames to be received at this 585 * point. 586 */ 587 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 588 DPRINTF(sc, ATH_DBG_PS, "All PS CAB frames received, back to " 589 "sleep\n"); 590 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) && 591 !is_multicast_ether_addr(hdr->addr1) && 592 !ieee80211_has_morefrags(hdr->frame_control)) { 593 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA; 594 DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having " 595 "received PS-Poll data (0x%x)\n", 596 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 597 SC_OP_WAIT_FOR_CAB | 598 SC_OP_WAIT_FOR_PSPOLL_DATA | 599 SC_OP_WAIT_FOR_TX_ACK)); 600 } 601} 602 603static void ath_rx_send_to_mac80211(struct ath_softc *sc, struct sk_buff *skb, 604 struct ieee80211_rx_status *rx_status) 605{ 606 struct ieee80211_hdr *hdr; 607 608 hdr = (struct ieee80211_hdr *)skb->data; 609 610 /* Send the frame to mac80211 */ 611 if (is_multicast_ether_addr(hdr->addr1)) { 612 int i; 613 /* 614 * Deliver broadcast/multicast frames to all suitable 615 * virtual wiphys. 616 */ 617 /* TODO: filter based on channel configuration */ 618 for (i = 0; i < sc->num_sec_wiphy; i++) { 619 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 620 struct sk_buff *nskb; 621 if (aphy == NULL) 622 continue; 623 nskb = skb_copy(skb, GFP_ATOMIC); 624 if (nskb) { 625 memcpy(IEEE80211_SKB_RXCB(nskb), rx_status, 626 sizeof(*rx_status)); 627 ieee80211_rx(aphy->hw, nskb); 628 } 629 } 630 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); 631 ieee80211_rx(sc->hw, skb); 632 } else { 633 /* Deliver unicast frames based on receiver address */ 634 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); 635 ieee80211_rx(ath_get_virt_hw(sc, hdr), skb); 636 } 637} 638 639int ath_rx_tasklet(struct ath_softc *sc, int flush) 640{ 641#define PA2DESC(_sc, _pa) \ 642 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ 643 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) 644 645 struct ath_buf *bf; 646 struct ath_desc *ds; 647 struct sk_buff *skb = NULL, *requeue_skb; 648 struct ieee80211_rx_status rx_status; 649 struct ath_hw *ah = sc->sc_ah; 650 struct ieee80211_hdr *hdr; 651 int hdrlen, padsize, retval; 652 bool decrypt_error = false; 653 u8 keyix; 654 __le16 fc; 655 656 spin_lock_bh(&sc->rx.rxbuflock); 657 658 do { 659 /* If handling rx interrupt and flush is in progress => exit */ 660 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 661 break; 662 663 if (list_empty(&sc->rx.rxbuf)) { 664 sc->rx.rxlink = NULL; 665 break; 666 } 667 668 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 669 ds = bf->bf_desc; 670 671 /* 672 * Must provide the virtual address of the current 673 * descriptor, the physical address, and the virtual 674 * address of the next descriptor in the h/w chain. 675 * This allows the HAL to look ahead to see if the 676 * hardware is done with a descriptor by checking the 677 * done bit in the following descriptor and the address 678 * of the current descriptor the DMA engine is working 679 * on. All this is necessary because of our use of 680 * a self-linked list to avoid rx overruns. 681 */ 682 retval = ath9k_hw_rxprocdesc(ah, ds, 683 bf->bf_daddr, 684 PA2DESC(sc, ds->ds_link), 685 0); 686 if (retval == -EINPROGRESS) { 687 struct ath_buf *tbf; 688 struct ath_desc *tds; 689 690 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 691 sc->rx.rxlink = NULL; 692 break; 693 } 694 695 tbf = list_entry(bf->list.next, struct ath_buf, list); 696 697 /* 698 * On some hardware the descriptor status words could 699 * get corrupted, including the done bit. Because of 700 * this, check if the next descriptor's done bit is 701 * set or not. 702 * 703 * If the next descriptor's done bit is set, the current 704 * descriptor has been corrupted. Force s/w to discard 705 * this descriptor and continue... 706 */ 707 708 tds = tbf->bf_desc; 709 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, 710 PA2DESC(sc, tds->ds_link), 0); 711 if (retval == -EINPROGRESS) { 712 break; 713 } 714 } 715 716 skb = bf->bf_mpdu; 717 if (!skb) 718 continue; 719 720 /* 721 * Synchronize the DMA transfer with CPU before 722 * 1. accessing the frame 723 * 2. requeueing the same buffer to h/w 724 */ 725 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 726 sc->rx.bufsize, 727 DMA_FROM_DEVICE); 728 729 /* 730 * If we're asked to flush receive queue, directly 731 * chain it back at the queue without processing it. 732 */ 733 if (flush) 734 goto requeue; 735 736 if (!ds->ds_rxstat.rs_datalen) 737 goto requeue; 738 739 /* The status portion of the descriptor could get corrupted. */ 740 if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen) 741 goto requeue; 742 743 if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) 744 goto requeue; 745 746 /* Ensure we always have an skb to requeue once we are done 747 * processing the current buffer's skb */ 748 requeue_skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_ATOMIC); 749 750 /* If there is no memory we ignore the current RX'd frame, 751 * tell hardware it can give us a new frame using the old 752 * skb and put it at the tail of the sc->rx.rxbuf list for 753 * processing. */ 754 if (!requeue_skb) 755 goto requeue; 756 757 /* Unmap the frame */ 758 dma_unmap_single(sc->dev, bf->bf_buf_addr, 759 sc->rx.bufsize, 760 DMA_FROM_DEVICE); 761 762 skb_put(skb, ds->ds_rxstat.rs_datalen); 763 764 /* see if any padding is done by the hw and remove it */ 765 hdr = (struct ieee80211_hdr *)skb->data; 766 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 767 fc = hdr->frame_control; 768 769 /* The MAC header is padded to have 32-bit boundary if the 770 * packet payload is non-zero. The general calculation for 771 * padsize would take into account odd header lengths: 772 * padsize = (4 - hdrlen % 4) % 4; However, since only 773 * even-length headers are used, padding can only be 0 or 2 774 * bytes and we can optimize this a bit. In addition, we must 775 * not try to remove padding from short control frames that do 776 * not have payload. */ 777 padsize = hdrlen & 3; 778 if (padsize && hdrlen >= 24) { 779 memmove(skb->data + padsize, skb->data, hdrlen); 780 skb_pull(skb, padsize); 781 } 782 783 keyix = ds->ds_rxstat.rs_keyix; 784 785 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { 786 rx_status.flag |= RX_FLAG_DECRYPTED; 787 } else if (ieee80211_has_protected(fc) 788 && !decrypt_error && skb->len >= hdrlen + 4) { 789 keyix = skb->data[hdrlen + 3] >> 6; 790 791 if (test_bit(keyix, sc->keymap)) 792 rx_status.flag |= RX_FLAG_DECRYPTED; 793 } 794 if (ah->sw_mgmt_crypto && 795 (rx_status.flag & RX_FLAG_DECRYPTED) && 796 ieee80211_is_mgmt(fc)) { 797 /* Use software decrypt for management frames. */ 798 rx_status.flag &= ~RX_FLAG_DECRYPTED; 799 } 800 801 /* We will now give hardware our shiny new allocated skb */ 802 bf->bf_mpdu = requeue_skb; 803 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 804 sc->rx.bufsize, 805 DMA_FROM_DEVICE); 806 if (unlikely(dma_mapping_error(sc->dev, 807 bf->bf_buf_addr))) { 808 dev_kfree_skb_any(requeue_skb); 809 bf->bf_mpdu = NULL; 810 DPRINTF(sc, ATH_DBG_FATAL, 811 "dma_mapping_error() on RX\n"); 812 ath_rx_send_to_mac80211(sc, skb, &rx_status); 813 break; 814 } 815 bf->bf_dmacontext = bf->bf_buf_addr; 816 817 /* 818 * change the default rx antenna if rx diversity chooses the 819 * other antenna 3 times in a row. 820 */ 821 if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { 822 if (++sc->rx.rxotherant >= 3) 823 ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); 824 } else { 825 sc->rx.rxotherant = 0; 826 } 827 828 if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 829 SC_OP_WAIT_FOR_CAB | 830 SC_OP_WAIT_FOR_PSPOLL_DATA))) 831 ath_rx_ps(sc, skb); 832 833 ath_rx_send_to_mac80211(sc, skb, &rx_status); 834 835requeue: 836 list_move_tail(&bf->list, &sc->rx.rxbuf); 837 ath_rx_buf_link(sc, bf); 838 } while (1); 839 840 spin_unlock_bh(&sc->rx.rxbuflock); 841 842 return 0; 843#undef PA2DESC 844} 845