rx.c revision b1f93314bfc4d5753391616735f6b8df96db901d
1/* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12#include <linux/jiffies.h> 13#include <linux/slab.h> 14#include <linux/kernel.h> 15#include <linux/skbuff.h> 16#include <linux/netdevice.h> 17#include <linux/etherdevice.h> 18#include <linux/rcupdate.h> 19#include <net/mac80211.h> 20#include <net/ieee80211_radiotap.h> 21 22#include "ieee80211_i.h" 23#include "driver-ops.h" 24#include "led.h" 25#include "mesh.h" 26#include "wep.h" 27#include "wpa.h" 28#include "tkip.h" 29#include "wme.h" 30 31/* 32 * monitor mode reception 33 * 34 * This function cleans up the SKB, i.e. it removes all the stuff 35 * only useful for monitoring. 36 */ 37static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 38 struct sk_buff *skb) 39{ 40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 41 if (likely(skb->len > FCS_LEN)) 42 __pskb_trim(skb, skb->len - FCS_LEN); 43 else { 44 /* driver bug */ 45 WARN_ON(1); 46 dev_kfree_skb(skb); 47 skb = NULL; 48 } 49 } 50 51 return skb; 52} 53 54static inline int should_drop_frame(struct sk_buff *skb, 55 int present_fcs_len) 56{ 57 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 58 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 59 60 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 61 return 1; 62 if (unlikely(skb->len < 16 + present_fcs_len)) 63 return 1; 64 if (ieee80211_is_ctl(hdr->frame_control) && 65 !ieee80211_is_pspoll(hdr->frame_control) && 66 !ieee80211_is_back_req(hdr->frame_control)) 67 return 1; 68 return 0; 69} 70 71static int 72ieee80211_rx_radiotap_len(struct ieee80211_local *local, 73 struct ieee80211_rx_status *status) 74{ 75 int len; 76 77 /* always present fields */ 78 len = sizeof(struct ieee80211_radiotap_header) + 9; 79 80 if (status->flag & RX_FLAG_TSFT) 81 len += 8; 82 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 83 len += 1; 84 85 if (len & 1) /* padding for RX_FLAGS if necessary */ 86 len++; 87 88 if (status->flag & RX_FLAG_HT) /* HT info */ 89 len += 3; 90 91 return len; 92} 93 94/* 95 * ieee80211_add_rx_radiotap_header - add radiotap header 96 * 97 * add a radiotap header containing all the fields which the hardware provided. 98 */ 99static void 100ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 101 struct sk_buff *skb, 102 struct ieee80211_rate *rate, 103 int rtap_len) 104{ 105 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 106 struct ieee80211_radiotap_header *rthdr; 107 unsigned char *pos; 108 u16 rx_flags = 0; 109 110 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 111 memset(rthdr, 0, rtap_len); 112 113 /* radiotap header, set always present flags */ 114 rthdr->it_present = 115 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 116 (1 << IEEE80211_RADIOTAP_CHANNEL) | 117 (1 << IEEE80211_RADIOTAP_ANTENNA) | 118 (1 << IEEE80211_RADIOTAP_RX_FLAGS)); 119 rthdr->it_len = cpu_to_le16(rtap_len); 120 121 pos = (unsigned char *)(rthdr+1); 122 123 /* the order of the following fields is important */ 124 125 /* IEEE80211_RADIOTAP_TSFT */ 126 if (status->flag & RX_FLAG_TSFT) { 127 put_unaligned_le64(status->mactime, pos); 128 rthdr->it_present |= 129 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 130 pos += 8; 131 } 132 133 /* IEEE80211_RADIOTAP_FLAGS */ 134 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 135 *pos |= IEEE80211_RADIOTAP_F_FCS; 136 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 137 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 138 if (status->flag & RX_FLAG_SHORTPRE) 139 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 140 pos++; 141 142 /* IEEE80211_RADIOTAP_RATE */ 143 if (status->flag & RX_FLAG_HT) { 144 /* 145 * TODO: add following information into radiotap header once 146 * suitable fields are defined for it: 147 * - MCS index (status->rate_idx) 148 * - HT40 (status->flag & RX_FLAG_40MHZ) 149 * - short-GI (status->flag & RX_FLAG_SHORT_GI) 150 */ 151 *pos = 0; 152 } else { 153 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 154 *pos = rate->bitrate / 5; 155 } 156 pos++; 157 158 /* IEEE80211_RADIOTAP_CHANNEL */ 159 put_unaligned_le16(status->freq, pos); 160 pos += 2; 161 if (status->band == IEEE80211_BAND_5GHZ) 162 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, 163 pos); 164 else if (status->flag & RX_FLAG_HT) 165 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 166 pos); 167 else if (rate->flags & IEEE80211_RATE_ERP_G) 168 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, 169 pos); 170 else 171 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, 172 pos); 173 pos += 2; 174 175 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 176 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { 177 *pos = status->signal; 178 rthdr->it_present |= 179 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 180 pos++; 181 } 182 183 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 184 185 /* IEEE80211_RADIOTAP_ANTENNA */ 186 *pos = status->antenna; 187 pos++; 188 189 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 190 191 /* IEEE80211_RADIOTAP_RX_FLAGS */ 192 /* ensure 2 byte alignment for the 2 byte field as required */ 193 if ((pos - (u8 *)rthdr) & 1) 194 pos++; 195 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 196 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 197 put_unaligned_le16(rx_flags, pos); 198 pos += 2; 199 200 if (status->flag & RX_FLAG_HT) { 201 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 202 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS | 203 IEEE80211_RADIOTAP_MCS_HAVE_GI | 204 IEEE80211_RADIOTAP_MCS_HAVE_BW; 205 *pos = 0; 206 if (status->flag & RX_FLAG_SHORT_GI) 207 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 208 if (status->flag & RX_FLAG_40MHZ) 209 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 210 pos++; 211 *pos++ = status->rate_idx; 212 } 213} 214 215/* 216 * This function copies a received frame to all monitor interfaces and 217 * returns a cleaned-up SKB that no longer includes the FCS nor the 218 * radiotap header the driver might have added. 219 */ 220static struct sk_buff * 221ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 222 struct ieee80211_rate *rate) 223{ 224 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 225 struct ieee80211_sub_if_data *sdata; 226 int needed_headroom = 0; 227 struct sk_buff *skb, *skb2; 228 struct net_device *prev_dev = NULL; 229 int present_fcs_len = 0; 230 231 /* 232 * First, we may need to make a copy of the skb because 233 * (1) we need to modify it for radiotap (if not present), and 234 * (2) the other RX handlers will modify the skb we got. 235 * 236 * We don't need to, of course, if we aren't going to return 237 * the SKB because it has a bad FCS/PLCP checksum. 238 */ 239 240 /* room for the radiotap header based on driver features */ 241 needed_headroom = ieee80211_rx_radiotap_len(local, status); 242 243 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 244 present_fcs_len = FCS_LEN; 245 246 /* make sure hdr->frame_control is on the linear part */ 247 if (!pskb_may_pull(origskb, 2)) { 248 dev_kfree_skb(origskb); 249 return NULL; 250 } 251 252 if (!local->monitors) { 253 if (should_drop_frame(origskb, present_fcs_len)) { 254 dev_kfree_skb(origskb); 255 return NULL; 256 } 257 258 return remove_monitor_info(local, origskb); 259 } 260 261 if (should_drop_frame(origskb, present_fcs_len)) { 262 /* only need to expand headroom if necessary */ 263 skb = origskb; 264 origskb = NULL; 265 266 /* 267 * This shouldn't trigger often because most devices have an 268 * RX header they pull before we get here, and that should 269 * be big enough for our radiotap information. We should 270 * probably export the length to drivers so that we can have 271 * them allocate enough headroom to start with. 272 */ 273 if (skb_headroom(skb) < needed_headroom && 274 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 275 dev_kfree_skb(skb); 276 return NULL; 277 } 278 } else { 279 /* 280 * Need to make a copy and possibly remove radiotap header 281 * and FCS from the original. 282 */ 283 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 284 285 origskb = remove_monitor_info(local, origskb); 286 287 if (!skb) 288 return origskb; 289 } 290 291 /* prepend radiotap information */ 292 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 293 294 skb_reset_mac_header(skb); 295 skb->ip_summed = CHECKSUM_UNNECESSARY; 296 skb->pkt_type = PACKET_OTHERHOST; 297 skb->protocol = htons(ETH_P_802_2); 298 299 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 300 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 301 continue; 302 303 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 304 continue; 305 306 if (!ieee80211_sdata_running(sdata)) 307 continue; 308 309 if (prev_dev) { 310 skb2 = skb_clone(skb, GFP_ATOMIC); 311 if (skb2) { 312 skb2->dev = prev_dev; 313 netif_receive_skb(skb2); 314 } 315 } 316 317 prev_dev = sdata->dev; 318 sdata->dev->stats.rx_packets++; 319 sdata->dev->stats.rx_bytes += skb->len; 320 } 321 322 if (prev_dev) { 323 skb->dev = prev_dev; 324 netif_receive_skb(skb); 325 } else 326 dev_kfree_skb(skb); 327 328 return origskb; 329} 330 331 332static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 333{ 334 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 335 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 336 int tid; 337 338 /* does the frame have a qos control field? */ 339 if (ieee80211_is_data_qos(hdr->frame_control)) { 340 u8 *qc = ieee80211_get_qos_ctl(hdr); 341 /* frame has qos control */ 342 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 343 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) 344 status->rx_flags |= IEEE80211_RX_AMSDU; 345 } else { 346 /* 347 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 348 * 349 * Sequence numbers for management frames, QoS data 350 * frames with a broadcast/multicast address in the 351 * Address 1 field, and all non-QoS data frames sent 352 * by QoS STAs are assigned using an additional single 353 * modulo-4096 counter, [...] 354 * 355 * We also use that counter for non-QoS STAs. 356 */ 357 tid = NUM_RX_DATA_QUEUES - 1; 358 } 359 360 rx->queue = tid; 361 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 362 * For now, set skb->priority to 0 for other cases. */ 363 rx->skb->priority = (tid > 7) ? 0 : tid; 364} 365 366/** 367 * DOC: Packet alignment 368 * 369 * Drivers always need to pass packets that are aligned to two-byte boundaries 370 * to the stack. 371 * 372 * Additionally, should, if possible, align the payload data in a way that 373 * guarantees that the contained IP header is aligned to a four-byte 374 * boundary. In the case of regular frames, this simply means aligning the 375 * payload to a four-byte boundary (because either the IP header is directly 376 * contained, or IV/RFC1042 headers that have a length divisible by four are 377 * in front of it). If the payload data is not properly aligned and the 378 * architecture doesn't support efficient unaligned operations, mac80211 379 * will align the data. 380 * 381 * With A-MSDU frames, however, the payload data address must yield two modulo 382 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 383 * push the IP header further back to a multiple of four again. Thankfully, the 384 * specs were sane enough this time around to require padding each A-MSDU 385 * subframe to a length that is a multiple of four. 386 * 387 * Padding like Atheros hardware adds which is inbetween the 802.11 header and 388 * the payload is not supported, the driver is required to move the 802.11 389 * header to be directly in front of the payload in that case. 390 */ 391static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 392{ 393#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 394 WARN_ONCE((unsigned long)rx->skb->data & 1, 395 "unaligned packet at 0x%p\n", rx->skb->data); 396#endif 397} 398 399 400/* rx handlers */ 401 402static ieee80211_rx_result debug_noinline 403ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) 404{ 405 struct ieee80211_local *local = rx->local; 406 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 407 struct sk_buff *skb = rx->skb; 408 409 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN))) 410 return RX_CONTINUE; 411 412 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) 413 return ieee80211_scan_rx(rx->sdata, skb); 414 415 if (test_bit(SCAN_SW_SCANNING, &local->scanning)) { 416 /* drop all the other packets during a software scan anyway */ 417 if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED) 418 dev_kfree_skb(skb); 419 return RX_QUEUED; 420 } 421 422 /* scanning finished during invoking of handlers */ 423 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); 424 return RX_DROP_UNUSABLE; 425} 426 427 428static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 429{ 430 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 431 432 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1)) 433 return 0; 434 435 return ieee80211_is_robust_mgmt_frame(hdr); 436} 437 438 439static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 440{ 441 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 442 443 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1)) 444 return 0; 445 446 return ieee80211_is_robust_mgmt_frame(hdr); 447} 448 449 450/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 451static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 452{ 453 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 454 struct ieee80211_mmie *mmie; 455 456 if (skb->len < 24 + sizeof(*mmie) || 457 !is_multicast_ether_addr(hdr->da)) 458 return -1; 459 460 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) 461 return -1; /* not a robust management frame */ 462 463 mmie = (struct ieee80211_mmie *) 464 (skb->data + skb->len - sizeof(*mmie)); 465 if (mmie->element_id != WLAN_EID_MMIE || 466 mmie->length != sizeof(*mmie) - 2) 467 return -1; 468 469 return le16_to_cpu(mmie->key_id); 470} 471 472 473static ieee80211_rx_result 474ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 475{ 476 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 477 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 478 char *dev_addr = rx->sdata->vif.addr; 479 480 if (ieee80211_is_data(hdr->frame_control)) { 481 if (is_multicast_ether_addr(hdr->addr1)) { 482 if (ieee80211_has_tods(hdr->frame_control) || 483 !ieee80211_has_fromds(hdr->frame_control)) 484 return RX_DROP_MONITOR; 485 if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0) 486 return RX_DROP_MONITOR; 487 } else { 488 if (!ieee80211_has_a4(hdr->frame_control)) 489 return RX_DROP_MONITOR; 490 if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0) 491 return RX_DROP_MONITOR; 492 } 493 } 494 495 /* If there is not an established peer link and this is not a peer link 496 * establisment frame, beacon or probe, drop the frame. 497 */ 498 499 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { 500 struct ieee80211_mgmt *mgmt; 501 502 if (!ieee80211_is_mgmt(hdr->frame_control)) 503 return RX_DROP_MONITOR; 504 505 if (ieee80211_is_action(hdr->frame_control)) { 506 mgmt = (struct ieee80211_mgmt *)hdr; 507 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK) 508 return RX_DROP_MONITOR; 509 return RX_CONTINUE; 510 } 511 512 if (ieee80211_is_probe_req(hdr->frame_control) || 513 ieee80211_is_probe_resp(hdr->frame_control) || 514 ieee80211_is_beacon(hdr->frame_control)) 515 return RX_CONTINUE; 516 517 return RX_DROP_MONITOR; 518 519 } 520 521#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) 522 523 if (ieee80211_is_data(hdr->frame_control) && 524 is_multicast_ether_addr(hdr->addr1) && 525 mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata)) 526 return RX_DROP_MONITOR; 527#undef msh_h_get 528 529 return RX_CONTINUE; 530} 531 532#define SEQ_MODULO 0x1000 533#define SEQ_MASK 0xfff 534 535static inline int seq_less(u16 sq1, u16 sq2) 536{ 537 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); 538} 539 540static inline u16 seq_inc(u16 sq) 541{ 542 return (sq + 1) & SEQ_MASK; 543} 544 545static inline u16 seq_sub(u16 sq1, u16 sq2) 546{ 547 return (sq1 - sq2) & SEQ_MASK; 548} 549 550 551static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, 552 struct tid_ampdu_rx *tid_agg_rx, 553 int index) 554{ 555 struct ieee80211_local *local = hw_to_local(hw); 556 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 557 struct ieee80211_rx_status *status; 558 559 lockdep_assert_held(&tid_agg_rx->reorder_lock); 560 561 if (!skb) 562 goto no_frame; 563 564 /* release the frame from the reorder ring buffer */ 565 tid_agg_rx->stored_mpdu_num--; 566 tid_agg_rx->reorder_buf[index] = NULL; 567 status = IEEE80211_SKB_RXCB(skb); 568 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 569 skb_queue_tail(&local->rx_skb_queue, skb); 570 571no_frame: 572 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 573} 574 575static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, 576 struct tid_ampdu_rx *tid_agg_rx, 577 u16 head_seq_num) 578{ 579 int index; 580 581 lockdep_assert_held(&tid_agg_rx->reorder_lock); 582 583 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 584 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 585 tid_agg_rx->buf_size; 586 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 587 } 588} 589 590/* 591 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 592 * the skb was added to the buffer longer than this time ago, the earlier 593 * frames that have not yet been received are assumed to be lost and the skb 594 * can be released for processing. This may also release other skb's from the 595 * reorder buffer if there are no additional gaps between the frames. 596 * 597 * Callers must hold tid_agg_rx->reorder_lock. 598 */ 599#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 600 601static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, 602 struct tid_ampdu_rx *tid_agg_rx) 603{ 604 int index, j; 605 606 lockdep_assert_held(&tid_agg_rx->reorder_lock); 607 608 /* release the buffer until next missing frame */ 609 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 610 tid_agg_rx->buf_size; 611 if (!tid_agg_rx->reorder_buf[index] && 612 tid_agg_rx->stored_mpdu_num > 1) { 613 /* 614 * No buffers ready to be released, but check whether any 615 * frames in the reorder buffer have timed out. 616 */ 617 int skipped = 1; 618 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 619 j = (j + 1) % tid_agg_rx->buf_size) { 620 if (!tid_agg_rx->reorder_buf[j]) { 621 skipped++; 622 continue; 623 } 624 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + 625 HT_RX_REORDER_BUF_TIMEOUT)) 626 goto set_release_timer; 627 628#ifdef CONFIG_MAC80211_HT_DEBUG 629 if (net_ratelimit()) 630 wiphy_debug(hw->wiphy, 631 "release an RX reorder frame due to timeout on earlier frames\n"); 632#endif 633 ieee80211_release_reorder_frame(hw, tid_agg_rx, j); 634 635 /* 636 * Increment the head seq# also for the skipped slots. 637 */ 638 tid_agg_rx->head_seq_num = 639 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; 640 skipped = 0; 641 } 642 } else while (tid_agg_rx->reorder_buf[index]) { 643 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 644 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 645 tid_agg_rx->buf_size; 646 } 647 648 if (tid_agg_rx->stored_mpdu_num) { 649 j = index = seq_sub(tid_agg_rx->head_seq_num, 650 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 651 652 for (; j != (index - 1) % tid_agg_rx->buf_size; 653 j = (j + 1) % tid_agg_rx->buf_size) { 654 if (tid_agg_rx->reorder_buf[j]) 655 break; 656 } 657 658 set_release_timer: 659 660 mod_timer(&tid_agg_rx->reorder_timer, 661 tid_agg_rx->reorder_time[j] + 662 HT_RX_REORDER_BUF_TIMEOUT); 663 } else { 664 del_timer(&tid_agg_rx->reorder_timer); 665 } 666} 667 668/* 669 * As this function belongs to the RX path it must be under 670 * rcu_read_lock protection. It returns false if the frame 671 * can be processed immediately, true if it was consumed. 672 */ 673static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 674 struct tid_ampdu_rx *tid_agg_rx, 675 struct sk_buff *skb) 676{ 677 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 678 u16 sc = le16_to_cpu(hdr->seq_ctrl); 679 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 680 u16 head_seq_num, buf_size; 681 int index; 682 bool ret = true; 683 684 spin_lock(&tid_agg_rx->reorder_lock); 685 686 buf_size = tid_agg_rx->buf_size; 687 head_seq_num = tid_agg_rx->head_seq_num; 688 689 /* frame with out of date sequence number */ 690 if (seq_less(mpdu_seq_num, head_seq_num)) { 691 dev_kfree_skb(skb); 692 goto out; 693 } 694 695 /* 696 * If frame the sequence number exceeds our buffering window 697 * size release some previous frames to make room for this one. 698 */ 699 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 700 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 701 /* release stored frames up to new head to stack */ 702 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); 703 } 704 705 /* Now the new frame is always in the range of the reordering buffer */ 706 707 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; 708 709 /* check if we already stored this frame */ 710 if (tid_agg_rx->reorder_buf[index]) { 711 dev_kfree_skb(skb); 712 goto out; 713 } 714 715 /* 716 * If the current MPDU is in the right order and nothing else 717 * is stored we can process it directly, no need to buffer it. 718 */ 719 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 720 tid_agg_rx->stored_mpdu_num == 0) { 721 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 722 ret = false; 723 goto out; 724 } 725 726 /* put the frame in the reordering buffer */ 727 tid_agg_rx->reorder_buf[index] = skb; 728 tid_agg_rx->reorder_time[index] = jiffies; 729 tid_agg_rx->stored_mpdu_num++; 730 ieee80211_sta_reorder_release(hw, tid_agg_rx); 731 732 out: 733 spin_unlock(&tid_agg_rx->reorder_lock); 734 return ret; 735} 736 737/* 738 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 739 * true if the MPDU was buffered, false if it should be processed. 740 */ 741static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) 742{ 743 struct sk_buff *skb = rx->skb; 744 struct ieee80211_local *local = rx->local; 745 struct ieee80211_hw *hw = &local->hw; 746 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 747 struct sta_info *sta = rx->sta; 748 struct tid_ampdu_rx *tid_agg_rx; 749 u16 sc; 750 int tid; 751 752 if (!ieee80211_is_data_qos(hdr->frame_control)) 753 goto dont_reorder; 754 755 /* 756 * filter the QoS data rx stream according to 757 * STA/TID and check if this STA/TID is on aggregation 758 */ 759 760 if (!sta) 761 goto dont_reorder; 762 763 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 764 765 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 766 if (!tid_agg_rx) 767 goto dont_reorder; 768 769 /* qos null data frames are excluded */ 770 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 771 goto dont_reorder; 772 773 /* new, potentially un-ordered, ampdu frame - process it */ 774 775 /* reset session timer */ 776 if (tid_agg_rx->timeout) 777 mod_timer(&tid_agg_rx->session_timer, 778 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 779 780 /* if this mpdu is fragmented - terminate rx aggregation session */ 781 sc = le16_to_cpu(hdr->seq_ctrl); 782 if (sc & IEEE80211_SCTL_FRAG) { 783 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 784 skb_queue_tail(&rx->sdata->skb_queue, skb); 785 ieee80211_queue_work(&local->hw, &rx->sdata->work); 786 return; 787 } 788 789 /* 790 * No locking needed -- we will only ever process one 791 * RX packet at a time, and thus own tid_agg_rx. All 792 * other code manipulating it needs to (and does) make 793 * sure that we cannot get to it any more before doing 794 * anything with it. 795 */ 796 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb)) 797 return; 798 799 dont_reorder: 800 skb_queue_tail(&local->rx_skb_queue, skb); 801} 802 803static ieee80211_rx_result debug_noinline 804ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 805{ 806 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 807 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 808 809 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 810 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 811 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 812 rx->sta->last_seq_ctrl[rx->queue] == 813 hdr->seq_ctrl)) { 814 if (status->rx_flags & IEEE80211_RX_RA_MATCH) { 815 rx->local->dot11FrameDuplicateCount++; 816 rx->sta->num_duplicates++; 817 } 818 return RX_DROP_UNUSABLE; 819 } else 820 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; 821 } 822 823 if (unlikely(rx->skb->len < 16)) { 824 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 825 return RX_DROP_MONITOR; 826 } 827 828 /* Drop disallowed frame classes based on STA auth/assoc state; 829 * IEEE 802.11, Chap 5.5. 830 * 831 * mac80211 filters only based on association state, i.e. it drops 832 * Class 3 frames from not associated stations. hostapd sends 833 * deauth/disassoc frames when needed. In addition, hostapd is 834 * responsible for filtering on both auth and assoc states. 835 */ 836 837 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 838 return ieee80211_rx_mesh_check(rx); 839 840 if (unlikely((ieee80211_is_data(hdr->frame_control) || 841 ieee80211_is_pspoll(hdr->frame_control)) && 842 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 843 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 844 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { 845 if ((!ieee80211_has_fromds(hdr->frame_control) && 846 !ieee80211_has_tods(hdr->frame_control) && 847 ieee80211_is_data(hdr->frame_control)) || 848 !(status->rx_flags & IEEE80211_RX_RA_MATCH)) { 849 /* Drop IBSS frames and frames for other hosts 850 * silently. */ 851 return RX_DROP_MONITOR; 852 } 853 854 return RX_DROP_MONITOR; 855 } 856 857 return RX_CONTINUE; 858} 859 860 861static ieee80211_rx_result debug_noinline 862ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 863{ 864 struct sk_buff *skb = rx->skb; 865 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 866 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 867 int keyidx; 868 int hdrlen; 869 ieee80211_rx_result result = RX_DROP_UNUSABLE; 870 struct ieee80211_key *sta_ptk = NULL; 871 int mmie_keyidx = -1; 872 __le16 fc; 873 874 /* 875 * Key selection 101 876 * 877 * There are four types of keys: 878 * - GTK (group keys) 879 * - IGTK (group keys for management frames) 880 * - PTK (pairwise keys) 881 * - STK (station-to-station pairwise keys) 882 * 883 * When selecting a key, we have to distinguish between multicast 884 * (including broadcast) and unicast frames, the latter can only 885 * use PTKs and STKs while the former always use GTKs and IGTKs. 886 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 887 * unicast frames can also use key indices like GTKs. Hence, if we 888 * don't have a PTK/STK we check the key index for a WEP key. 889 * 890 * Note that in a regular BSS, multicast frames are sent by the 891 * AP only, associated stations unicast the frame to the AP first 892 * which then multicasts it on their behalf. 893 * 894 * There is also a slight problem in IBSS mode: GTKs are negotiated 895 * with each station, that is something we don't currently handle. 896 * The spec seems to expect that one negotiates the same key with 897 * every station but there's no such requirement; VLANs could be 898 * possible. 899 */ 900 901 /* 902 * No point in finding a key and decrypting if the frame is neither 903 * addressed to us nor a multicast frame. 904 */ 905 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 906 return RX_CONTINUE; 907 908 /* start without a key */ 909 rx->key = NULL; 910 911 if (rx->sta) 912 sta_ptk = rcu_dereference(rx->sta->ptk); 913 914 fc = hdr->frame_control; 915 916 if (!ieee80211_has_protected(fc)) 917 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 918 919 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 920 rx->key = sta_ptk; 921 if ((status->flag & RX_FLAG_DECRYPTED) && 922 (status->flag & RX_FLAG_IV_STRIPPED)) 923 return RX_CONTINUE; 924 /* Skip decryption if the frame is not protected. */ 925 if (!ieee80211_has_protected(fc)) 926 return RX_CONTINUE; 927 } else if (mmie_keyidx >= 0) { 928 /* Broadcast/multicast robust management frame / BIP */ 929 if ((status->flag & RX_FLAG_DECRYPTED) && 930 (status->flag & RX_FLAG_IV_STRIPPED)) 931 return RX_CONTINUE; 932 933 if (mmie_keyidx < NUM_DEFAULT_KEYS || 934 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 935 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 936 if (rx->sta) 937 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 938 if (!rx->key) 939 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 940 } else if (!ieee80211_has_protected(fc)) { 941 /* 942 * The frame was not protected, so skip decryption. However, we 943 * need to set rx->key if there is a key that could have been 944 * used so that the frame may be dropped if encryption would 945 * have been expected. 946 */ 947 struct ieee80211_key *key = NULL; 948 struct ieee80211_sub_if_data *sdata = rx->sdata; 949 int i; 950 951 if (ieee80211_is_mgmt(fc) && 952 is_multicast_ether_addr(hdr->addr1) && 953 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 954 rx->key = key; 955 else { 956 if (rx->sta) { 957 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 958 key = rcu_dereference(rx->sta->gtk[i]); 959 if (key) 960 break; 961 } 962 } 963 if (!key) { 964 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 965 key = rcu_dereference(sdata->keys[i]); 966 if (key) 967 break; 968 } 969 } 970 if (key) 971 rx->key = key; 972 } 973 return RX_CONTINUE; 974 } else { 975 u8 keyid; 976 /* 977 * The device doesn't give us the IV so we won't be 978 * able to look up the key. That's ok though, we 979 * don't need to decrypt the frame, we just won't 980 * be able to keep statistics accurate. 981 * Except for key threshold notifications, should 982 * we somehow allow the driver to tell us which key 983 * the hardware used if this flag is set? 984 */ 985 if ((status->flag & RX_FLAG_DECRYPTED) && 986 (status->flag & RX_FLAG_IV_STRIPPED)) 987 return RX_CONTINUE; 988 989 hdrlen = ieee80211_hdrlen(fc); 990 991 if (rx->skb->len < 8 + hdrlen) 992 return RX_DROP_UNUSABLE; /* TODO: count this? */ 993 994 /* 995 * no need to call ieee80211_wep_get_keyidx, 996 * it verifies a bunch of things we've done already 997 */ 998 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 999 keyidx = keyid >> 6; 1000 1001 /* check per-station GTK first, if multicast packet */ 1002 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 1003 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 1004 1005 /* if not found, try default key */ 1006 if (!rx->key) { 1007 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 1008 1009 /* 1010 * RSNA-protected unicast frames should always be 1011 * sent with pairwise or station-to-station keys, 1012 * but for WEP we allow using a key index as well. 1013 */ 1014 if (rx->key && 1015 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 1016 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1017 !is_multicast_ether_addr(hdr->addr1)) 1018 rx->key = NULL; 1019 } 1020 } 1021 1022 if (rx->key) { 1023 rx->key->tx_rx_count++; 1024 /* TODO: add threshold stuff again */ 1025 } else { 1026 return RX_DROP_MONITOR; 1027 } 1028 1029 if (skb_linearize(rx->skb)) 1030 return RX_DROP_UNUSABLE; 1031 /* the hdr variable is invalid now! */ 1032 1033 switch (rx->key->conf.cipher) { 1034 case WLAN_CIPHER_SUITE_WEP40: 1035 case WLAN_CIPHER_SUITE_WEP104: 1036 /* Check for weak IVs if possible */ 1037 if (rx->sta && ieee80211_is_data(fc) && 1038 (!(status->flag & RX_FLAG_IV_STRIPPED) || 1039 !(status->flag & RX_FLAG_DECRYPTED)) && 1040 ieee80211_wep_is_weak_iv(rx->skb, rx->key)) 1041 rx->sta->wep_weak_iv_count++; 1042 1043 result = ieee80211_crypto_wep_decrypt(rx); 1044 break; 1045 case WLAN_CIPHER_SUITE_TKIP: 1046 result = ieee80211_crypto_tkip_decrypt(rx); 1047 break; 1048 case WLAN_CIPHER_SUITE_CCMP: 1049 result = ieee80211_crypto_ccmp_decrypt(rx); 1050 break; 1051 case WLAN_CIPHER_SUITE_AES_CMAC: 1052 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1053 break; 1054 default: 1055 /* 1056 * We can reach here only with HW-only algorithms 1057 * but why didn't it decrypt the frame?! 1058 */ 1059 return RX_DROP_UNUSABLE; 1060 } 1061 1062 /* either the frame has been decrypted or will be dropped */ 1063 status->flag |= RX_FLAG_DECRYPTED; 1064 1065 return result; 1066} 1067 1068static ieee80211_rx_result debug_noinline 1069ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1070{ 1071 struct ieee80211_local *local; 1072 struct ieee80211_hdr *hdr; 1073 struct sk_buff *skb; 1074 1075 local = rx->local; 1076 skb = rx->skb; 1077 hdr = (struct ieee80211_hdr *) skb->data; 1078 1079 if (!local->pspolling) 1080 return RX_CONTINUE; 1081 1082 if (!ieee80211_has_fromds(hdr->frame_control)) 1083 /* this is not from AP */ 1084 return RX_CONTINUE; 1085 1086 if (!ieee80211_is_data(hdr->frame_control)) 1087 return RX_CONTINUE; 1088 1089 if (!ieee80211_has_moredata(hdr->frame_control)) { 1090 /* AP has no more frames buffered for us */ 1091 local->pspolling = false; 1092 return RX_CONTINUE; 1093 } 1094 1095 /* more data bit is set, let's request a new frame from the AP */ 1096 ieee80211_send_pspoll(local, rx->sdata); 1097 1098 return RX_CONTINUE; 1099} 1100 1101static void ap_sta_ps_start(struct sta_info *sta) 1102{ 1103 struct ieee80211_sub_if_data *sdata = sta->sdata; 1104 struct ieee80211_local *local = sdata->local; 1105 1106 atomic_inc(&sdata->bss->num_sta_ps); 1107 set_sta_flags(sta, WLAN_STA_PS_STA); 1108 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1109 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1110#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1111 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1112 sdata->name, sta->sta.addr, sta->sta.aid); 1113#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1114} 1115 1116static void ap_sta_ps_end(struct sta_info *sta) 1117{ 1118 struct ieee80211_sub_if_data *sdata = sta->sdata; 1119 1120 atomic_dec(&sdata->bss->num_sta_ps); 1121 1122#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1123 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1124 sdata->name, sta->sta.addr, sta->sta.aid); 1125#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1126 1127 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1128#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1129 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1130 sdata->name, sta->sta.addr, sta->sta.aid); 1131#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1132 return; 1133 } 1134 1135 ieee80211_sta_ps_deliver_wakeup(sta); 1136} 1137 1138int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) 1139{ 1140 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta); 1141 bool in_ps; 1142 1143 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS)); 1144 1145 /* Don't let the same PS state be set twice */ 1146 in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA); 1147 if ((start && in_ps) || (!start && !in_ps)) 1148 return -EINVAL; 1149 1150 if (start) 1151 ap_sta_ps_start(sta_inf); 1152 else 1153 ap_sta_ps_end(sta_inf); 1154 1155 return 0; 1156} 1157EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1158 1159static ieee80211_rx_result debug_noinline 1160ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1161{ 1162 struct sta_info *sta = rx->sta; 1163 struct sk_buff *skb = rx->skb; 1164 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1165 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1166 1167 if (!sta) 1168 return RX_CONTINUE; 1169 1170 /* 1171 * Update last_rx only for IBSS packets which are for the current 1172 * BSSID to avoid keeping the current IBSS network alive in cases 1173 * where other STAs start using different BSSID. 1174 */ 1175 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1176 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1177 NL80211_IFTYPE_ADHOC); 1178 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) 1179 sta->last_rx = jiffies; 1180 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1181 /* 1182 * Mesh beacons will update last_rx when if they are found to 1183 * match the current local configuration when processed. 1184 */ 1185 sta->last_rx = jiffies; 1186 } 1187 1188 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1189 return RX_CONTINUE; 1190 1191 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1192 ieee80211_sta_rx_notify(rx->sdata, hdr); 1193 1194 sta->rx_fragments++; 1195 sta->rx_bytes += rx->skb->len; 1196 sta->last_signal = status->signal; 1197 ewma_add(&sta->avg_signal, -status->signal); 1198 1199 /* 1200 * Change STA power saving mode only at the end of a frame 1201 * exchange sequence. 1202 */ 1203 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) && 1204 !ieee80211_has_morefrags(hdr->frame_control) && 1205 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1206 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1207 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1208 if (test_sta_flags(sta, WLAN_STA_PS_STA)) { 1209 /* 1210 * Ignore doze->wake transitions that are 1211 * indicated by non-data frames, the standard 1212 * is unclear here, but for example going to 1213 * PS mode and then scanning would cause a 1214 * doze->wake transition for the probe request, 1215 * and that is clearly undesirable. 1216 */ 1217 if (ieee80211_is_data(hdr->frame_control) && 1218 !ieee80211_has_pm(hdr->frame_control)) 1219 ap_sta_ps_end(sta); 1220 } else { 1221 if (ieee80211_has_pm(hdr->frame_control)) 1222 ap_sta_ps_start(sta); 1223 } 1224 } 1225 1226 /* 1227 * Drop (qos-)data::nullfunc frames silently, since they 1228 * are used only to control station power saving mode. 1229 */ 1230 if (ieee80211_is_nullfunc(hdr->frame_control) || 1231 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1232 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1233 1234 /* 1235 * If we receive a 4-addr nullfunc frame from a STA 1236 * that was not moved to a 4-addr STA vlan yet, drop 1237 * the frame to the monitor interface, to make sure 1238 * that hostapd sees it 1239 */ 1240 if (ieee80211_has_a4(hdr->frame_control) && 1241 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1242 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1243 !rx->sdata->u.vlan.sta))) 1244 return RX_DROP_MONITOR; 1245 /* 1246 * Update counter and free packet here to avoid 1247 * counting this as a dropped packed. 1248 */ 1249 sta->rx_packets++; 1250 dev_kfree_skb(rx->skb); 1251 return RX_QUEUED; 1252 } 1253 1254 return RX_CONTINUE; 1255} /* ieee80211_rx_h_sta_process */ 1256 1257static inline struct ieee80211_fragment_entry * 1258ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1259 unsigned int frag, unsigned int seq, int rx_queue, 1260 struct sk_buff **skb) 1261{ 1262 struct ieee80211_fragment_entry *entry; 1263 int idx; 1264 1265 idx = sdata->fragment_next; 1266 entry = &sdata->fragments[sdata->fragment_next++]; 1267 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1268 sdata->fragment_next = 0; 1269 1270 if (!skb_queue_empty(&entry->skb_list)) { 1271#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1272 struct ieee80211_hdr *hdr = 1273 (struct ieee80211_hdr *) entry->skb_list.next->data; 1274 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 1275 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 1276 "addr1=%pM addr2=%pM\n", 1277 sdata->name, idx, 1278 jiffies - entry->first_frag_time, entry->seq, 1279 entry->last_frag, hdr->addr1, hdr->addr2); 1280#endif 1281 __skb_queue_purge(&entry->skb_list); 1282 } 1283 1284 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1285 *skb = NULL; 1286 entry->first_frag_time = jiffies; 1287 entry->seq = seq; 1288 entry->rx_queue = rx_queue; 1289 entry->last_frag = frag; 1290 entry->ccmp = 0; 1291 entry->extra_len = 0; 1292 1293 return entry; 1294} 1295 1296static inline struct ieee80211_fragment_entry * 1297ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1298 unsigned int frag, unsigned int seq, 1299 int rx_queue, struct ieee80211_hdr *hdr) 1300{ 1301 struct ieee80211_fragment_entry *entry; 1302 int i, idx; 1303 1304 idx = sdata->fragment_next; 1305 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1306 struct ieee80211_hdr *f_hdr; 1307 1308 idx--; 1309 if (idx < 0) 1310 idx = IEEE80211_FRAGMENT_MAX - 1; 1311 1312 entry = &sdata->fragments[idx]; 1313 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1314 entry->rx_queue != rx_queue || 1315 entry->last_frag + 1 != frag) 1316 continue; 1317 1318 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1319 1320 /* 1321 * Check ftype and addresses are equal, else check next fragment 1322 */ 1323 if (((hdr->frame_control ^ f_hdr->frame_control) & 1324 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1325 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 1326 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 1327 continue; 1328 1329 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1330 __skb_queue_purge(&entry->skb_list); 1331 continue; 1332 } 1333 return entry; 1334 } 1335 1336 return NULL; 1337} 1338 1339static ieee80211_rx_result debug_noinline 1340ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1341{ 1342 struct ieee80211_hdr *hdr; 1343 u16 sc; 1344 __le16 fc; 1345 unsigned int frag, seq; 1346 struct ieee80211_fragment_entry *entry; 1347 struct sk_buff *skb; 1348 struct ieee80211_rx_status *status; 1349 1350 hdr = (struct ieee80211_hdr *)rx->skb->data; 1351 fc = hdr->frame_control; 1352 sc = le16_to_cpu(hdr->seq_ctrl); 1353 frag = sc & IEEE80211_SCTL_FRAG; 1354 1355 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1356 (rx->skb)->len < 24 || 1357 is_multicast_ether_addr(hdr->addr1))) { 1358 /* not fragmented */ 1359 goto out; 1360 } 1361 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1362 1363 if (skb_linearize(rx->skb)) 1364 return RX_DROP_UNUSABLE; 1365 1366 /* 1367 * skb_linearize() might change the skb->data and 1368 * previously cached variables (in this case, hdr) need to 1369 * be refreshed with the new data. 1370 */ 1371 hdr = (struct ieee80211_hdr *)rx->skb->data; 1372 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1373 1374 if (frag == 0) { 1375 /* This is the first fragment of a new frame. */ 1376 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1377 rx->queue, &(rx->skb)); 1378 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP && 1379 ieee80211_has_protected(fc)) { 1380 int queue = ieee80211_is_mgmt(fc) ? 1381 NUM_RX_DATA_QUEUES : rx->queue; 1382 /* Store CCMP PN so that we can verify that the next 1383 * fragment has a sequential PN value. */ 1384 entry->ccmp = 1; 1385 memcpy(entry->last_pn, 1386 rx->key->u.ccmp.rx_pn[queue], 1387 CCMP_PN_LEN); 1388 } 1389 return RX_QUEUED; 1390 } 1391 1392 /* This is a fragment for a frame that should already be pending in 1393 * fragment cache. Add this fragment to the end of the pending entry. 1394 */ 1395 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); 1396 if (!entry) { 1397 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1398 return RX_DROP_MONITOR; 1399 } 1400 1401 /* Verify that MPDUs within one MSDU have sequential PN values. 1402 * (IEEE 802.11i, 8.3.3.4.5) */ 1403 if (entry->ccmp) { 1404 int i; 1405 u8 pn[CCMP_PN_LEN], *rpn; 1406 int queue; 1407 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP) 1408 return RX_DROP_UNUSABLE; 1409 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1410 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 1411 pn[i]++; 1412 if (pn[i]) 1413 break; 1414 } 1415 queue = ieee80211_is_mgmt(fc) ? 1416 NUM_RX_DATA_QUEUES : rx->queue; 1417 rpn = rx->key->u.ccmp.rx_pn[queue]; 1418 if (memcmp(pn, rpn, CCMP_PN_LEN)) 1419 return RX_DROP_UNUSABLE; 1420 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 1421 } 1422 1423 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1424 __skb_queue_tail(&entry->skb_list, rx->skb); 1425 entry->last_frag = frag; 1426 entry->extra_len += rx->skb->len; 1427 if (ieee80211_has_morefrags(fc)) { 1428 rx->skb = NULL; 1429 return RX_QUEUED; 1430 } 1431 1432 rx->skb = __skb_dequeue(&entry->skb_list); 1433 if (skb_tailroom(rx->skb) < entry->extra_len) { 1434 I802_DEBUG_INC(rx->local->rx_expand_skb_head2); 1435 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1436 GFP_ATOMIC))) { 1437 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1438 __skb_queue_purge(&entry->skb_list); 1439 return RX_DROP_UNUSABLE; 1440 } 1441 } 1442 while ((skb = __skb_dequeue(&entry->skb_list))) { 1443 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1444 dev_kfree_skb(skb); 1445 } 1446 1447 /* Complete frame has been reassembled - process it now */ 1448 status = IEEE80211_SKB_RXCB(rx->skb); 1449 status->rx_flags |= IEEE80211_RX_FRAGMENTED; 1450 1451 out: 1452 if (rx->sta) 1453 rx->sta->rx_packets++; 1454 if (is_multicast_ether_addr(hdr->addr1)) 1455 rx->local->dot11MulticastReceivedFrameCount++; 1456 else 1457 ieee80211_led_rx(rx->local); 1458 return RX_CONTINUE; 1459} 1460 1461static ieee80211_rx_result debug_noinline 1462ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) 1463{ 1464 struct ieee80211_sub_if_data *sdata = rx->sdata; 1465 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; 1466 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1467 1468 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || 1469 !(status->rx_flags & IEEE80211_RX_RA_MATCH))) 1470 return RX_CONTINUE; 1471 1472 if ((sdata->vif.type != NL80211_IFTYPE_AP) && 1473 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) 1474 return RX_DROP_UNUSABLE; 1475 1476 if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER)) 1477 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1478 else 1479 set_sta_flags(rx->sta, WLAN_STA_PSPOLL); 1480 1481 /* Free PS Poll skb here instead of returning RX_DROP that would 1482 * count as an dropped frame. */ 1483 dev_kfree_skb(rx->skb); 1484 1485 return RX_QUEUED; 1486} 1487 1488static ieee80211_rx_result debug_noinline 1489ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx) 1490{ 1491 u8 *data = rx->skb->data; 1492 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; 1493 1494 if (!ieee80211_is_data_qos(hdr->frame_control)) 1495 return RX_CONTINUE; 1496 1497 /* remove the qos control field, update frame type and meta-data */ 1498 memmove(data + IEEE80211_QOS_CTL_LEN, data, 1499 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); 1500 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); 1501 /* change frame type to non QOS */ 1502 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1503 1504 return RX_CONTINUE; 1505} 1506 1507static int 1508ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1509{ 1510 if (unlikely(!rx->sta || 1511 !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED))) 1512 return -EACCES; 1513 1514 return 0; 1515} 1516 1517static int 1518ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1519{ 1520 struct sk_buff *skb = rx->skb; 1521 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1522 1523 /* 1524 * Pass through unencrypted frames if the hardware has 1525 * decrypted them already. 1526 */ 1527 if (status->flag & RX_FLAG_DECRYPTED) 1528 return 0; 1529 1530 /* Drop unencrypted frames if key is set. */ 1531 if (unlikely(!ieee80211_has_protected(fc) && 1532 !ieee80211_is_nullfunc(fc) && 1533 ieee80211_is_data(fc) && 1534 (rx->key || rx->sdata->drop_unencrypted))) 1535 return -EACCES; 1536 1537 return 0; 1538} 1539 1540static int 1541ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1542{ 1543 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1544 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1545 __le16 fc = hdr->frame_control; 1546 1547 /* 1548 * Pass through unencrypted frames if the hardware has 1549 * decrypted them already. 1550 */ 1551 if (status->flag & RX_FLAG_DECRYPTED) 1552 return 0; 1553 1554 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1555 if (unlikely(!ieee80211_has_protected(fc) && 1556 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1557 rx->key)) { 1558 if (ieee80211_is_deauth(fc)) 1559 cfg80211_send_unprot_deauth(rx->sdata->dev, 1560 rx->skb->data, 1561 rx->skb->len); 1562 else if (ieee80211_is_disassoc(fc)) 1563 cfg80211_send_unprot_disassoc(rx->sdata->dev, 1564 rx->skb->data, 1565 rx->skb->len); 1566 return -EACCES; 1567 } 1568 /* BIP does not use Protected field, so need to check MMIE */ 1569 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1570 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 1571 if (ieee80211_is_deauth(fc)) 1572 cfg80211_send_unprot_deauth(rx->sdata->dev, 1573 rx->skb->data, 1574 rx->skb->len); 1575 else if (ieee80211_is_disassoc(fc)) 1576 cfg80211_send_unprot_disassoc(rx->sdata->dev, 1577 rx->skb->data, 1578 rx->skb->len); 1579 return -EACCES; 1580 } 1581 /* 1582 * When using MFP, Action frames are not allowed prior to 1583 * having configured keys. 1584 */ 1585 if (unlikely(ieee80211_is_action(fc) && !rx->key && 1586 ieee80211_is_robust_mgmt_frame( 1587 (struct ieee80211_hdr *) rx->skb->data))) 1588 return -EACCES; 1589 } 1590 1591 return 0; 1592} 1593 1594static int 1595__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1596{ 1597 struct ieee80211_sub_if_data *sdata = rx->sdata; 1598 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1599 bool check_port_control = false; 1600 struct ethhdr *ehdr; 1601 int ret; 1602 1603 if (ieee80211_has_a4(hdr->frame_control) && 1604 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1605 return -1; 1606 1607 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1608 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 1609 1610 if (!sdata->u.mgd.use_4addr) 1611 return -1; 1612 else 1613 check_port_control = true; 1614 } 1615 1616 if (is_multicast_ether_addr(hdr->addr1) && 1617 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 1618 return -1; 1619 1620 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1621 if (ret < 0 || !check_port_control) 1622 return ret; 1623 1624 ehdr = (struct ethhdr *) rx->skb->data; 1625 if (ehdr->h_proto != rx->sdata->control_port_protocol) 1626 return -1; 1627 1628 return 0; 1629} 1630 1631/* 1632 * requires that rx->skb is a frame with ethernet header 1633 */ 1634static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 1635{ 1636 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 1637 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1638 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1639 1640 /* 1641 * Allow EAPOL frames to us/the PAE group address regardless 1642 * of whether the frame was encrypted or not. 1643 */ 1644 if (ehdr->h_proto == rx->sdata->control_port_protocol && 1645 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || 1646 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1647 return true; 1648 1649 if (ieee80211_802_1x_port_control(rx) || 1650 ieee80211_drop_unencrypted(rx, fc)) 1651 return false; 1652 1653 return true; 1654} 1655 1656/* 1657 * requires that rx->skb is a frame with ethernet header 1658 */ 1659static void 1660ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 1661{ 1662 struct ieee80211_sub_if_data *sdata = rx->sdata; 1663 struct net_device *dev = sdata->dev; 1664 struct sk_buff *skb, *xmit_skb; 1665 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1666 struct sta_info *dsta; 1667 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1668 1669 skb = rx->skb; 1670 xmit_skb = NULL; 1671 1672 if ((sdata->vif.type == NL80211_IFTYPE_AP || 1673 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1674 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 1675 (status->rx_flags & IEEE80211_RX_RA_MATCH) && 1676 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 1677 if (is_multicast_ether_addr(ehdr->h_dest)) { 1678 /* 1679 * send multicast frames both to higher layers in 1680 * local net stack and back to the wireless medium 1681 */ 1682 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1683 if (!xmit_skb && net_ratelimit()) 1684 printk(KERN_DEBUG "%s: failed to clone " 1685 "multicast frame\n", dev->name); 1686 } else { 1687 dsta = sta_info_get(sdata, skb->data); 1688 if (dsta) { 1689 /* 1690 * The destination station is associated to 1691 * this AP (in this VLAN), so send the frame 1692 * directly to it and do not pass it to local 1693 * net stack. 1694 */ 1695 xmit_skb = skb; 1696 skb = NULL; 1697 } 1698 } 1699 } 1700 1701 if (skb) { 1702 int align __maybe_unused; 1703 1704#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1705 /* 1706 * 'align' will only take the values 0 or 2 here 1707 * since all frames are required to be aligned 1708 * to 2-byte boundaries when being passed to 1709 * mac80211. That also explains the __skb_push() 1710 * below. 1711 */ 1712 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; 1713 if (align) { 1714 if (WARN_ON(skb_headroom(skb) < 3)) { 1715 dev_kfree_skb(skb); 1716 skb = NULL; 1717 } else { 1718 u8 *data = skb->data; 1719 size_t len = skb_headlen(skb); 1720 skb->data -= align; 1721 memmove(skb->data, data, len); 1722 skb_set_tail_pointer(skb, len); 1723 } 1724 } 1725#endif 1726 1727 if (skb) { 1728 /* deliver to local stack */ 1729 skb->protocol = eth_type_trans(skb, dev); 1730 memset(skb->cb, 0, sizeof(skb->cb)); 1731 netif_receive_skb(skb); 1732 } 1733 } 1734 1735 if (xmit_skb) { 1736 /* send to wireless media */ 1737 xmit_skb->protocol = htons(ETH_P_802_3); 1738 skb_reset_network_header(xmit_skb); 1739 skb_reset_mac_header(xmit_skb); 1740 dev_queue_xmit(xmit_skb); 1741 } 1742} 1743 1744static ieee80211_rx_result debug_noinline 1745ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1746{ 1747 struct net_device *dev = rx->sdata->dev; 1748 struct sk_buff *skb = rx->skb; 1749 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1750 __le16 fc = hdr->frame_control; 1751 struct sk_buff_head frame_list; 1752 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1753 1754 if (unlikely(!ieee80211_is_data(fc))) 1755 return RX_CONTINUE; 1756 1757 if (unlikely(!ieee80211_is_data_present(fc))) 1758 return RX_DROP_MONITOR; 1759 1760 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 1761 return RX_CONTINUE; 1762 1763 if (ieee80211_has_a4(hdr->frame_control) && 1764 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1765 !rx->sdata->u.vlan.sta) 1766 return RX_DROP_UNUSABLE; 1767 1768 if (is_multicast_ether_addr(hdr->addr1) && 1769 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1770 rx->sdata->u.vlan.sta) || 1771 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1772 rx->sdata->u.mgd.use_4addr))) 1773 return RX_DROP_UNUSABLE; 1774 1775 skb->dev = dev; 1776 __skb_queue_head_init(&frame_list); 1777 1778 if (skb_linearize(skb)) 1779 return RX_DROP_UNUSABLE; 1780 1781 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1782 rx->sdata->vif.type, 1783 rx->local->hw.extra_tx_headroom); 1784 1785 while (!skb_queue_empty(&frame_list)) { 1786 rx->skb = __skb_dequeue(&frame_list); 1787 1788 if (!ieee80211_frame_allowed(rx, fc)) { 1789 dev_kfree_skb(rx->skb); 1790 continue; 1791 } 1792 dev->stats.rx_packets++; 1793 dev->stats.rx_bytes += rx->skb->len; 1794 1795 ieee80211_deliver_skb(rx); 1796 } 1797 1798 return RX_QUEUED; 1799} 1800 1801#ifdef CONFIG_MAC80211_MESH 1802static ieee80211_rx_result 1803ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 1804{ 1805 struct ieee80211_hdr *hdr; 1806 struct ieee80211s_hdr *mesh_hdr; 1807 unsigned int hdrlen; 1808 struct sk_buff *skb = rx->skb, *fwd_skb; 1809 struct ieee80211_local *local = rx->local; 1810 struct ieee80211_sub_if_data *sdata = rx->sdata; 1811 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1812 1813 hdr = (struct ieee80211_hdr *) skb->data; 1814 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1815 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1816 1817 if (!ieee80211_is_data(hdr->frame_control)) 1818 return RX_CONTINUE; 1819 1820 if (!mesh_hdr->ttl) 1821 /* illegal frame */ 1822 return RX_DROP_MONITOR; 1823 1824 if (mesh_hdr->flags & MESH_FLAGS_AE) { 1825 struct mesh_path *mppath; 1826 char *proxied_addr; 1827 char *mpp_addr; 1828 1829 if (is_multicast_ether_addr(hdr->addr1)) { 1830 mpp_addr = hdr->addr3; 1831 proxied_addr = mesh_hdr->eaddr1; 1832 } else { 1833 mpp_addr = hdr->addr4; 1834 proxied_addr = mesh_hdr->eaddr2; 1835 } 1836 1837 rcu_read_lock(); 1838 mppath = mpp_path_lookup(proxied_addr, sdata); 1839 if (!mppath) { 1840 mpp_path_add(proxied_addr, mpp_addr, sdata); 1841 } else { 1842 spin_lock_bh(&mppath->state_lock); 1843 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) 1844 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 1845 spin_unlock_bh(&mppath->state_lock); 1846 } 1847 rcu_read_unlock(); 1848 } 1849 1850 /* Frame has reached destination. Don't forward */ 1851 if (!is_multicast_ether_addr(hdr->addr1) && 1852 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) 1853 return RX_CONTINUE; 1854 1855 mesh_hdr->ttl--; 1856 1857 if (status->rx_flags & IEEE80211_RX_RA_MATCH) { 1858 if (!mesh_hdr->ttl) 1859 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, 1860 dropped_frames_ttl); 1861 else { 1862 struct ieee80211_hdr *fwd_hdr; 1863 struct ieee80211_tx_info *info; 1864 1865 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1866 1867 if (!fwd_skb && net_ratelimit()) 1868 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1869 sdata->name); 1870 if (!fwd_skb) 1871 goto out; 1872 1873 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1874 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 1875 info = IEEE80211_SKB_CB(fwd_skb); 1876 memset(info, 0, sizeof(*info)); 1877 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1878 info->control.vif = &rx->sdata->vif; 1879 skb_set_queue_mapping(skb, 1880 ieee80211_select_queue(rx->sdata, fwd_skb)); 1881 ieee80211_set_qos_hdr(local, skb); 1882 if (is_multicast_ether_addr(fwd_hdr->addr1)) 1883 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1884 fwded_mcast); 1885 else { 1886 int err; 1887 /* 1888 * Save TA to addr1 to send TA a path error if a 1889 * suitable next hop is not found 1890 */ 1891 memcpy(fwd_hdr->addr1, fwd_hdr->addr2, 1892 ETH_ALEN); 1893 err = mesh_nexthop_lookup(fwd_skb, sdata); 1894 /* Failed to immediately resolve next hop: 1895 * fwded frame was dropped or will be added 1896 * later to the pending skb queue. */ 1897 if (err) 1898 return RX_DROP_MONITOR; 1899 1900 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1901 fwded_unicast); 1902 } 1903 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1904 fwded_frames); 1905 ieee80211_add_pending_skb(local, fwd_skb); 1906 } 1907 } 1908 1909 out: 1910 if (is_multicast_ether_addr(hdr->addr1) || 1911 sdata->dev->flags & IFF_PROMISC) 1912 return RX_CONTINUE; 1913 else 1914 return RX_DROP_MONITOR; 1915} 1916#endif 1917 1918static ieee80211_rx_result debug_noinline 1919ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1920{ 1921 struct ieee80211_sub_if_data *sdata = rx->sdata; 1922 struct ieee80211_local *local = rx->local; 1923 struct net_device *dev = sdata->dev; 1924 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1925 __le16 fc = hdr->frame_control; 1926 int err; 1927 1928 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 1929 return RX_CONTINUE; 1930 1931 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 1932 return RX_DROP_MONITOR; 1933 1934 /* 1935 * Allow the cooked monitor interface of an AP to see 4-addr frames so 1936 * that a 4-addr station can be detected and moved into a separate VLAN 1937 */ 1938 if (ieee80211_has_a4(hdr->frame_control) && 1939 sdata->vif.type == NL80211_IFTYPE_AP) 1940 return RX_DROP_MONITOR; 1941 1942 err = __ieee80211_data_to_8023(rx); 1943 if (unlikely(err)) 1944 return RX_DROP_UNUSABLE; 1945 1946 if (!ieee80211_frame_allowed(rx, fc)) 1947 return RX_DROP_MONITOR; 1948 1949 rx->skb->dev = dev; 1950 1951 dev->stats.rx_packets++; 1952 dev->stats.rx_bytes += rx->skb->len; 1953 1954 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 1955 !is_multicast_ether_addr( 1956 ((struct ethhdr *)rx->skb->data)->h_dest) && 1957 (!local->scanning && 1958 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { 1959 mod_timer(&local->dynamic_ps_timer, jiffies + 1960 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 1961 } 1962 1963 ieee80211_deliver_skb(rx); 1964 1965 return RX_QUEUED; 1966} 1967 1968static ieee80211_rx_result debug_noinline 1969ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 1970{ 1971 struct ieee80211_local *local = rx->local; 1972 struct ieee80211_hw *hw = &local->hw; 1973 struct sk_buff *skb = rx->skb; 1974 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 1975 struct tid_ampdu_rx *tid_agg_rx; 1976 u16 start_seq_num; 1977 u16 tid; 1978 1979 if (likely(!ieee80211_is_ctl(bar->frame_control))) 1980 return RX_CONTINUE; 1981 1982 if (ieee80211_is_back_req(bar->frame_control)) { 1983 struct { 1984 __le16 control, start_seq_num; 1985 } __packed bar_data; 1986 1987 if (!rx->sta) 1988 return RX_DROP_MONITOR; 1989 1990 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 1991 &bar_data, sizeof(bar_data))) 1992 return RX_DROP_MONITOR; 1993 1994 tid = le16_to_cpu(bar_data.control) >> 12; 1995 1996 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 1997 if (!tid_agg_rx) 1998 return RX_DROP_MONITOR; 1999 2000 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2001 2002 /* reset session timer */ 2003 if (tid_agg_rx->timeout) 2004 mod_timer(&tid_agg_rx->session_timer, 2005 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2006 2007 spin_lock(&tid_agg_rx->reorder_lock); 2008 /* release stored frames up to start of BAR */ 2009 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); 2010 spin_unlock(&tid_agg_rx->reorder_lock); 2011 2012 kfree_skb(skb); 2013 return RX_QUEUED; 2014 } 2015 2016 /* 2017 * After this point, we only want management frames, 2018 * so we can drop all remaining control frames to 2019 * cooked monitor interfaces. 2020 */ 2021 return RX_DROP_MONITOR; 2022} 2023 2024static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2025 struct ieee80211_mgmt *mgmt, 2026 size_t len) 2027{ 2028 struct ieee80211_local *local = sdata->local; 2029 struct sk_buff *skb; 2030 struct ieee80211_mgmt *resp; 2031 2032 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) { 2033 /* Not to own unicast address */ 2034 return; 2035 } 2036 2037 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || 2038 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { 2039 /* Not from the current AP or not associated yet. */ 2040 return; 2041 } 2042 2043 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2044 /* Too short SA Query request frame */ 2045 return; 2046 } 2047 2048 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2049 if (skb == NULL) 2050 return; 2051 2052 skb_reserve(skb, local->hw.extra_tx_headroom); 2053 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 2054 memset(resp, 0, 24); 2055 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2056 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2057 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2058 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2059 IEEE80211_STYPE_ACTION); 2060 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2061 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2062 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2063 memcpy(resp->u.action.u.sa_query.trans_id, 2064 mgmt->u.action.u.sa_query.trans_id, 2065 WLAN_SA_QUERY_TR_ID_LEN); 2066 2067 ieee80211_tx_skb(sdata, skb); 2068} 2069 2070static ieee80211_rx_result debug_noinline 2071ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2072{ 2073 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2074 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2075 2076 /* 2077 * From here on, look only at management frames. 2078 * Data and control frames are already handled, 2079 * and unknown (reserved) frames are useless. 2080 */ 2081 if (rx->skb->len < 24) 2082 return RX_DROP_MONITOR; 2083 2084 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2085 return RX_DROP_MONITOR; 2086 2087 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2088 return RX_DROP_MONITOR; 2089 2090 if (ieee80211_drop_unencrypted_mgmt(rx)) 2091 return RX_DROP_UNUSABLE; 2092 2093 return RX_CONTINUE; 2094} 2095 2096static ieee80211_rx_result debug_noinline 2097ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2098{ 2099 struct ieee80211_local *local = rx->local; 2100 struct ieee80211_sub_if_data *sdata = rx->sdata; 2101 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2102 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2103 int len = rx->skb->len; 2104 2105 if (!ieee80211_is_action(mgmt->frame_control)) 2106 return RX_CONTINUE; 2107 2108 /* drop too small frames */ 2109 if (len < IEEE80211_MIN_ACTION_SIZE) 2110 return RX_DROP_UNUSABLE; 2111 2112 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) 2113 return RX_DROP_UNUSABLE; 2114 2115 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2116 return RX_DROP_UNUSABLE; 2117 2118 switch (mgmt->u.action.category) { 2119 case WLAN_CATEGORY_BACK: 2120 /* 2121 * The aggregation code is not prepared to handle 2122 * anything but STA/AP due to the BSSID handling; 2123 * IBSS could work in the code but isn't supported 2124 * by drivers or the standard. 2125 */ 2126 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2127 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2128 sdata->vif.type != NL80211_IFTYPE_AP) 2129 break; 2130 2131 /* verify action_code is present */ 2132 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2133 break; 2134 2135 switch (mgmt->u.action.u.addba_req.action_code) { 2136 case WLAN_ACTION_ADDBA_REQ: 2137 if (len < (IEEE80211_MIN_ACTION_SIZE + 2138 sizeof(mgmt->u.action.u.addba_req))) 2139 goto invalid; 2140 break; 2141 case WLAN_ACTION_ADDBA_RESP: 2142 if (len < (IEEE80211_MIN_ACTION_SIZE + 2143 sizeof(mgmt->u.action.u.addba_resp))) 2144 goto invalid; 2145 break; 2146 case WLAN_ACTION_DELBA: 2147 if (len < (IEEE80211_MIN_ACTION_SIZE + 2148 sizeof(mgmt->u.action.u.delba))) 2149 goto invalid; 2150 break; 2151 default: 2152 goto invalid; 2153 } 2154 2155 goto queue; 2156 case WLAN_CATEGORY_SPECTRUM_MGMT: 2157 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 2158 break; 2159 2160 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2161 break; 2162 2163 /* verify action_code is present */ 2164 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2165 break; 2166 2167 switch (mgmt->u.action.u.measurement.action_code) { 2168 case WLAN_ACTION_SPCT_MSR_REQ: 2169 if (len < (IEEE80211_MIN_ACTION_SIZE + 2170 sizeof(mgmt->u.action.u.measurement))) 2171 break; 2172 ieee80211_process_measurement_req(sdata, mgmt, len); 2173 goto handled; 2174 case WLAN_ACTION_SPCT_CHL_SWITCH: 2175 if (len < (IEEE80211_MIN_ACTION_SIZE + 2176 sizeof(mgmt->u.action.u.chan_switch))) 2177 break; 2178 2179 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2180 break; 2181 2182 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 2183 break; 2184 2185 goto queue; 2186 } 2187 break; 2188 case WLAN_CATEGORY_SA_QUERY: 2189 if (len < (IEEE80211_MIN_ACTION_SIZE + 2190 sizeof(mgmt->u.action.u.sa_query))) 2191 break; 2192 2193 switch (mgmt->u.action.u.sa_query.action) { 2194 case WLAN_ACTION_SA_QUERY_REQUEST: 2195 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2196 break; 2197 ieee80211_process_sa_query_req(sdata, mgmt, len); 2198 goto handled; 2199 } 2200 break; 2201 case WLAN_CATEGORY_MESH_PLINK: 2202 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2203 break; 2204 goto queue; 2205 case WLAN_CATEGORY_MESH_PATH_SEL: 2206 if (!mesh_path_sel_is_hwmp(sdata)) 2207 break; 2208 goto queue; 2209 } 2210 2211 return RX_CONTINUE; 2212 2213 invalid: 2214 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 2215 /* will return in the next handlers */ 2216 return RX_CONTINUE; 2217 2218 handled: 2219 if (rx->sta) 2220 rx->sta->rx_packets++; 2221 dev_kfree_skb(rx->skb); 2222 return RX_QUEUED; 2223 2224 queue: 2225 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2226 skb_queue_tail(&sdata->skb_queue, rx->skb); 2227 ieee80211_queue_work(&local->hw, &sdata->work); 2228 if (rx->sta) 2229 rx->sta->rx_packets++; 2230 return RX_QUEUED; 2231} 2232 2233static ieee80211_rx_result debug_noinline 2234ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2235{ 2236 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2237 2238 /* skip known-bad action frames and return them in the next handler */ 2239 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2240 return RX_CONTINUE; 2241 2242 /* 2243 * Getting here means the kernel doesn't know how to handle 2244 * it, but maybe userspace does ... include returned frames 2245 * so userspace can register for those to know whether ones 2246 * it transmitted were processed or returned. 2247 */ 2248 2249 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, 2250 rx->skb->data, rx->skb->len, 2251 GFP_ATOMIC)) { 2252 if (rx->sta) 2253 rx->sta->rx_packets++; 2254 dev_kfree_skb(rx->skb); 2255 return RX_QUEUED; 2256 } 2257 2258 2259 return RX_CONTINUE; 2260} 2261 2262static ieee80211_rx_result debug_noinline 2263ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 2264{ 2265 struct ieee80211_local *local = rx->local; 2266 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2267 struct sk_buff *nskb; 2268 struct ieee80211_sub_if_data *sdata = rx->sdata; 2269 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2270 2271 if (!ieee80211_is_action(mgmt->frame_control)) 2272 return RX_CONTINUE; 2273 2274 /* 2275 * For AP mode, hostapd is responsible for handling any action 2276 * frames that we didn't handle, including returning unknown 2277 * ones. For all other modes we will return them to the sender, 2278 * setting the 0x80 bit in the action category, as required by 2279 * 802.11-2007 7.3.1.11. 2280 * Newer versions of hostapd shall also use the management frame 2281 * registration mechanisms, but older ones still use cooked 2282 * monitor interfaces so push all frames there. 2283 */ 2284 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 2285 (sdata->vif.type == NL80211_IFTYPE_AP || 2286 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2287 return RX_DROP_MONITOR; 2288 2289 /* do not return rejected action frames */ 2290 if (mgmt->u.action.category & 0x80) 2291 return RX_DROP_UNUSABLE; 2292 2293 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2294 GFP_ATOMIC); 2295 if (nskb) { 2296 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 2297 2298 nmgmt->u.action.category |= 0x80; 2299 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 2300 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2301 2302 memset(nskb->cb, 0, sizeof(nskb->cb)); 2303 2304 ieee80211_tx_skb(rx->sdata, nskb); 2305 } 2306 dev_kfree_skb(rx->skb); 2307 return RX_QUEUED; 2308} 2309 2310static ieee80211_rx_result debug_noinline 2311ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2312{ 2313 struct ieee80211_sub_if_data *sdata = rx->sdata; 2314 ieee80211_rx_result rxs; 2315 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2316 __le16 stype; 2317 2318 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb); 2319 if (rxs != RX_CONTINUE) 2320 return rxs; 2321 2322 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2323 2324 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2325 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2326 sdata->vif.type != NL80211_IFTYPE_STATION) 2327 return RX_DROP_MONITOR; 2328 2329 switch (stype) { 2330 case cpu_to_le16(IEEE80211_STYPE_BEACON): 2331 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 2332 /* process for all: mesh, mlme, ibss */ 2333 break; 2334 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2335 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2336 if (is_multicast_ether_addr(mgmt->da) && 2337 !is_broadcast_ether_addr(mgmt->da)) 2338 return RX_DROP_MONITOR; 2339 2340 /* process only for station */ 2341 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2342 return RX_DROP_MONITOR; 2343 break; 2344 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2345 case cpu_to_le16(IEEE80211_STYPE_AUTH): 2346 /* process only for ibss */ 2347 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2348 return RX_DROP_MONITOR; 2349 break; 2350 default: 2351 return RX_DROP_MONITOR; 2352 } 2353 2354 /* queue up frame and kick off work to process it */ 2355 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2356 skb_queue_tail(&sdata->skb_queue, rx->skb); 2357 ieee80211_queue_work(&rx->local->hw, &sdata->work); 2358 if (rx->sta) 2359 rx->sta->rx_packets++; 2360 2361 return RX_QUEUED; 2362} 2363 2364static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, 2365 struct ieee80211_rx_data *rx) 2366{ 2367 int keyidx; 2368 unsigned int hdrlen; 2369 2370 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2371 if (rx->skb->len >= hdrlen + 4) 2372 keyidx = rx->skb->data[hdrlen + 3] >> 6; 2373 else 2374 keyidx = -1; 2375 2376 if (!rx->sta) { 2377 /* 2378 * Some hardware seem to generate incorrect Michael MIC 2379 * reports; ignore them to avoid triggering countermeasures. 2380 */ 2381 return; 2382 } 2383 2384 if (!ieee80211_has_protected(hdr->frame_control)) 2385 return; 2386 2387 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) { 2388 /* 2389 * APs with pairwise keys should never receive Michael MIC 2390 * errors for non-zero keyidx because these are reserved for 2391 * group keys and only the AP is sending real multicast 2392 * frames in the BSS. 2393 */ 2394 return; 2395 } 2396 2397 if (!ieee80211_is_data(hdr->frame_control) && 2398 !ieee80211_is_auth(hdr->frame_control)) 2399 return; 2400 2401 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL, 2402 GFP_ATOMIC); 2403} 2404 2405/* TODO: use IEEE80211_RX_FRAGMENTED */ 2406static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 2407 struct ieee80211_rate *rate) 2408{ 2409 struct ieee80211_sub_if_data *sdata; 2410 struct ieee80211_local *local = rx->local; 2411 struct ieee80211_rtap_hdr { 2412 struct ieee80211_radiotap_header hdr; 2413 u8 flags; 2414 u8 rate_or_pad; 2415 __le16 chan_freq; 2416 __le16 chan_flags; 2417 } __packed *rthdr; 2418 struct sk_buff *skb = rx->skb, *skb2; 2419 struct net_device *prev_dev = NULL; 2420 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2421 2422 /* 2423 * If cooked monitor has been processed already, then 2424 * don't do it again. If not, set the flag. 2425 */ 2426 if (rx->flags & IEEE80211_RX_CMNTR) 2427 goto out_free_skb; 2428 rx->flags |= IEEE80211_RX_CMNTR; 2429 2430 if (skb_headroom(skb) < sizeof(*rthdr) && 2431 pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) 2432 goto out_free_skb; 2433 2434 rthdr = (void *)skb_push(skb, sizeof(*rthdr)); 2435 memset(rthdr, 0, sizeof(*rthdr)); 2436 rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); 2437 rthdr->hdr.it_present = 2438 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 2439 (1 << IEEE80211_RADIOTAP_CHANNEL)); 2440 2441 if (rate) { 2442 rthdr->rate_or_pad = rate->bitrate / 5; 2443 rthdr->hdr.it_present |= 2444 cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 2445 } 2446 rthdr->chan_freq = cpu_to_le16(status->freq); 2447 2448 if (status->band == IEEE80211_BAND_5GHZ) 2449 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM | 2450 IEEE80211_CHAN_5GHZ); 2451 else 2452 rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN | 2453 IEEE80211_CHAN_2GHZ); 2454 2455 skb_set_mac_header(skb, 0); 2456 skb->ip_summed = CHECKSUM_UNNECESSARY; 2457 skb->pkt_type = PACKET_OTHERHOST; 2458 skb->protocol = htons(ETH_P_802_2); 2459 2460 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2461 if (!ieee80211_sdata_running(sdata)) 2462 continue; 2463 2464 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2465 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 2466 continue; 2467 2468 if (prev_dev) { 2469 skb2 = skb_clone(skb, GFP_ATOMIC); 2470 if (skb2) { 2471 skb2->dev = prev_dev; 2472 netif_receive_skb(skb2); 2473 } 2474 } 2475 2476 prev_dev = sdata->dev; 2477 sdata->dev->stats.rx_packets++; 2478 sdata->dev->stats.rx_bytes += skb->len; 2479 } 2480 2481 if (prev_dev) { 2482 skb->dev = prev_dev; 2483 netif_receive_skb(skb); 2484 return; 2485 } 2486 2487 out_free_skb: 2488 dev_kfree_skb(skb); 2489} 2490 2491static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 2492 ieee80211_rx_result res) 2493{ 2494 switch (res) { 2495 case RX_DROP_MONITOR: 2496 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 2497 if (rx->sta) 2498 rx->sta->rx_dropped++; 2499 /* fall through */ 2500 case RX_CONTINUE: { 2501 struct ieee80211_rate *rate = NULL; 2502 struct ieee80211_supported_band *sband; 2503 struct ieee80211_rx_status *status; 2504 2505 status = IEEE80211_SKB_RXCB((rx->skb)); 2506 2507 sband = rx->local->hw.wiphy->bands[status->band]; 2508 if (!(status->flag & RX_FLAG_HT)) 2509 rate = &sband->bitrates[status->rate_idx]; 2510 2511 ieee80211_rx_cooked_monitor(rx, rate); 2512 break; 2513 } 2514 case RX_DROP_UNUSABLE: 2515 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 2516 if (rx->sta) 2517 rx->sta->rx_dropped++; 2518 dev_kfree_skb(rx->skb); 2519 break; 2520 case RX_QUEUED: 2521 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 2522 break; 2523 } 2524} 2525 2526static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) 2527{ 2528 ieee80211_rx_result res = RX_DROP_MONITOR; 2529 struct sk_buff *skb; 2530 2531#define CALL_RXH(rxh) \ 2532 do { \ 2533 res = rxh(rx); \ 2534 if (res != RX_CONTINUE) \ 2535 goto rxh_next; \ 2536 } while (0); 2537 2538 spin_lock(&rx->local->rx_skb_queue.lock); 2539 if (rx->local->running_rx_handler) 2540 goto unlock; 2541 2542 rx->local->running_rx_handler = true; 2543 2544 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) { 2545 spin_unlock(&rx->local->rx_skb_queue.lock); 2546 2547 /* 2548 * all the other fields are valid across frames 2549 * that belong to an aMPDU since they are on the 2550 * same TID from the same station 2551 */ 2552 rx->skb = skb; 2553 rx->flags = 0; 2554 2555 CALL_RXH(ieee80211_rx_h_decrypt) 2556 CALL_RXH(ieee80211_rx_h_check_more_data) 2557 CALL_RXH(ieee80211_rx_h_sta_process) 2558 CALL_RXH(ieee80211_rx_h_defragment) 2559 CALL_RXH(ieee80211_rx_h_ps_poll) 2560 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2561 /* must be after MMIC verify so header is counted in MPDU mic */ 2562 CALL_RXH(ieee80211_rx_h_remove_qos_control) 2563 CALL_RXH(ieee80211_rx_h_amsdu) 2564#ifdef CONFIG_MAC80211_MESH 2565 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 2566 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2567#endif 2568 CALL_RXH(ieee80211_rx_h_data) 2569 CALL_RXH(ieee80211_rx_h_ctrl); 2570 CALL_RXH(ieee80211_rx_h_mgmt_check) 2571 CALL_RXH(ieee80211_rx_h_action) 2572 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 2573 CALL_RXH(ieee80211_rx_h_action_return) 2574 CALL_RXH(ieee80211_rx_h_mgmt) 2575 2576 rxh_next: 2577 ieee80211_rx_handlers_result(rx, res); 2578 spin_lock(&rx->local->rx_skb_queue.lock); 2579#undef CALL_RXH 2580 } 2581 2582 rx->local->running_rx_handler = false; 2583 2584 unlock: 2585 spin_unlock(&rx->local->rx_skb_queue.lock); 2586} 2587 2588static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 2589{ 2590 ieee80211_rx_result res = RX_DROP_MONITOR; 2591 2592#define CALL_RXH(rxh) \ 2593 do { \ 2594 res = rxh(rx); \ 2595 if (res != RX_CONTINUE) \ 2596 goto rxh_next; \ 2597 } while (0); 2598 2599 CALL_RXH(ieee80211_rx_h_passive_scan) 2600 CALL_RXH(ieee80211_rx_h_check) 2601 2602 ieee80211_rx_reorder_ampdu(rx); 2603 2604 ieee80211_rx_handlers(rx); 2605 return; 2606 2607 rxh_next: 2608 ieee80211_rx_handlers_result(rx, res); 2609 2610#undef CALL_RXH 2611} 2612 2613/* 2614 * This function makes calls into the RX path, therefore 2615 * it has to be invoked under RCU read lock. 2616 */ 2617void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 2618{ 2619 struct ieee80211_rx_data rx = { 2620 .sta = sta, 2621 .sdata = sta->sdata, 2622 .local = sta->local, 2623 .queue = tid, 2624 }; 2625 struct tid_ampdu_rx *tid_agg_rx; 2626 2627 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 2628 if (!tid_agg_rx) 2629 return; 2630 2631 spin_lock(&tid_agg_rx->reorder_lock); 2632 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx); 2633 spin_unlock(&tid_agg_rx->reorder_lock); 2634 2635 ieee80211_rx_handlers(&rx); 2636} 2637 2638/* main receive path */ 2639 2640static int prepare_for_handlers(struct ieee80211_rx_data *rx, 2641 struct ieee80211_hdr *hdr) 2642{ 2643 struct ieee80211_sub_if_data *sdata = rx->sdata; 2644 struct sk_buff *skb = rx->skb; 2645 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2646 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 2647 int multicast = is_multicast_ether_addr(hdr->addr1); 2648 2649 switch (sdata->vif.type) { 2650 case NL80211_IFTYPE_STATION: 2651 if (!bssid && !sdata->u.mgd.use_4addr) 2652 return 0; 2653 if (!multicast && 2654 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { 2655 if (!(sdata->dev->flags & IFF_PROMISC)) 2656 return 0; 2657 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2658 } 2659 break; 2660 case NL80211_IFTYPE_ADHOC: 2661 if (!bssid) 2662 return 0; 2663 if (ieee80211_is_beacon(hdr->frame_control)) { 2664 return 1; 2665 } 2666 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 2667 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN)) 2668 return 0; 2669 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2670 } else if (!multicast && 2671 compare_ether_addr(sdata->vif.addr, 2672 hdr->addr1) != 0) { 2673 if (!(sdata->dev->flags & IFF_PROMISC)) 2674 return 0; 2675 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2676 } else if (!rx->sta) { 2677 int rate_idx; 2678 if (status->flag & RX_FLAG_HT) 2679 rate_idx = 0; /* TODO: HT rates */ 2680 else 2681 rate_idx = status->rate_idx; 2682 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, 2683 hdr->addr2, BIT(rate_idx), GFP_ATOMIC); 2684 } 2685 break; 2686 case NL80211_IFTYPE_MESH_POINT: 2687 if (!multicast && 2688 compare_ether_addr(sdata->vif.addr, 2689 hdr->addr1) != 0) { 2690 if (!(sdata->dev->flags & IFF_PROMISC)) 2691 return 0; 2692 2693 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2694 } 2695 break; 2696 case NL80211_IFTYPE_AP_VLAN: 2697 case NL80211_IFTYPE_AP: 2698 if (!bssid) { 2699 if (compare_ether_addr(sdata->vif.addr, 2700 hdr->addr1)) 2701 return 0; 2702 } else if (!ieee80211_bssid_match(bssid, 2703 sdata->vif.addr)) { 2704 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && 2705 !ieee80211_is_beacon(hdr->frame_control)) 2706 return 0; 2707 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2708 } 2709 break; 2710 case NL80211_IFTYPE_WDS: 2711 if (bssid || !ieee80211_is_data(hdr->frame_control)) 2712 return 0; 2713 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 2714 return 0; 2715 break; 2716 default: 2717 /* should never get here */ 2718 WARN_ON(1); 2719 break; 2720 } 2721 2722 return 1; 2723} 2724 2725/* 2726 * This function returns whether or not the SKB 2727 * was destined for RX processing or not, which, 2728 * if consume is true, is equivalent to whether 2729 * or not the skb was consumed. 2730 */ 2731static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 2732 struct sk_buff *skb, bool consume) 2733{ 2734 struct ieee80211_local *local = rx->local; 2735 struct ieee80211_sub_if_data *sdata = rx->sdata; 2736 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2737 struct ieee80211_hdr *hdr = (void *)skb->data; 2738 int prepares; 2739 2740 rx->skb = skb; 2741 status->rx_flags |= IEEE80211_RX_RA_MATCH; 2742 prepares = prepare_for_handlers(rx, hdr); 2743 2744 if (!prepares) 2745 return false; 2746 2747 if (status->flag & RX_FLAG_MMIC_ERROR) { 2748 if (status->rx_flags & IEEE80211_RX_RA_MATCH) 2749 ieee80211_rx_michael_mic_report(hdr, rx); 2750 return false; 2751 } 2752 2753 if (!consume) { 2754 skb = skb_copy(skb, GFP_ATOMIC); 2755 if (!skb) { 2756 if (net_ratelimit()) 2757 wiphy_debug(local->hw.wiphy, 2758 "failed to copy skb for %s\n", 2759 sdata->name); 2760 return true; 2761 } 2762 2763 rx->skb = skb; 2764 } 2765 2766 ieee80211_invoke_rx_handlers(rx); 2767 return true; 2768} 2769 2770/* 2771 * This is the actual Rx frames handler. as it blongs to Rx path it must 2772 * be called with rcu_read_lock protection. 2773 */ 2774static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 2775 struct sk_buff *skb) 2776{ 2777 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2778 struct ieee80211_local *local = hw_to_local(hw); 2779 struct ieee80211_sub_if_data *sdata; 2780 struct ieee80211_hdr *hdr; 2781 __le16 fc; 2782 struct ieee80211_rx_data rx; 2783 struct ieee80211_sub_if_data *prev; 2784 struct sta_info *sta, *tmp, *prev_sta; 2785 int err = 0; 2786 2787 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 2788 memset(&rx, 0, sizeof(rx)); 2789 rx.skb = skb; 2790 rx.local = local; 2791 2792 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 2793 local->dot11ReceivedFragmentCount++; 2794 2795 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2796 test_bit(SCAN_OFF_CHANNEL, &local->scanning))) 2797 status->rx_flags |= IEEE80211_RX_IN_SCAN; 2798 2799 if (ieee80211_is_mgmt(fc)) 2800 err = skb_linearize(skb); 2801 else 2802 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 2803 2804 if (err) { 2805 dev_kfree_skb(skb); 2806 return; 2807 } 2808 2809 hdr = (struct ieee80211_hdr *)skb->data; 2810 ieee80211_parse_qos(&rx); 2811 ieee80211_verify_alignment(&rx); 2812 2813 if (ieee80211_is_data(fc)) { 2814 prev_sta = NULL; 2815 2816 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2817 if (!prev_sta) { 2818 prev_sta = sta; 2819 continue; 2820 } 2821 2822 rx.sta = prev_sta; 2823 rx.sdata = prev_sta->sdata; 2824 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2825 2826 prev_sta = sta; 2827 } 2828 2829 if (prev_sta) { 2830 rx.sta = prev_sta; 2831 rx.sdata = prev_sta->sdata; 2832 2833 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2834 return; 2835 goto out; 2836 } 2837 } 2838 2839 prev = NULL; 2840 2841 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2842 if (!ieee80211_sdata_running(sdata)) 2843 continue; 2844 2845 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 2846 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2847 continue; 2848 2849 /* 2850 * frame is destined for this interface, but if it's 2851 * not also for the previous one we handle that after 2852 * the loop to avoid copying the SKB once too much 2853 */ 2854 2855 if (!prev) { 2856 prev = sdata; 2857 continue; 2858 } 2859 2860 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2861 rx.sdata = prev; 2862 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2863 2864 prev = sdata; 2865 } 2866 2867 if (prev) { 2868 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2869 rx.sdata = prev; 2870 2871 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2872 return; 2873 } 2874 2875 out: 2876 dev_kfree_skb(skb); 2877} 2878 2879/* 2880 * This is the receive path handler. It is called by a low level driver when an 2881 * 802.11 MPDU is received from the hardware. 2882 */ 2883void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) 2884{ 2885 struct ieee80211_local *local = hw_to_local(hw); 2886 struct ieee80211_rate *rate = NULL; 2887 struct ieee80211_supported_band *sband; 2888 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2889 2890 WARN_ON_ONCE(softirq_count() == 0); 2891 2892 if (WARN_ON(status->band < 0 || 2893 status->band >= IEEE80211_NUM_BANDS)) 2894 goto drop; 2895 2896 sband = local->hw.wiphy->bands[status->band]; 2897 if (WARN_ON(!sband)) 2898 goto drop; 2899 2900 /* 2901 * If we're suspending, it is possible although not too likely 2902 * that we'd be receiving frames after having already partially 2903 * quiesced the stack. We can't process such frames then since 2904 * that might, for example, cause stations to be added or other 2905 * driver callbacks be invoked. 2906 */ 2907 if (unlikely(local->quiescing || local->suspended)) 2908 goto drop; 2909 2910 /* 2911 * The same happens when we're not even started, 2912 * but that's worth a warning. 2913 */ 2914 if (WARN_ON(!local->started)) 2915 goto drop; 2916 2917 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 2918 /* 2919 * Validate the rate, unless a PLCP error means that 2920 * we probably can't have a valid rate here anyway. 2921 */ 2922 2923 if (status->flag & RX_FLAG_HT) { 2924 /* 2925 * rate_idx is MCS index, which can be [0-76] 2926 * as documented on: 2927 * 2928 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 2929 * 2930 * Anything else would be some sort of driver or 2931 * hardware error. The driver should catch hardware 2932 * errors. 2933 */ 2934 if (WARN((status->rate_idx < 0 || 2935 status->rate_idx > 76), 2936 "Rate marked as an HT rate but passed " 2937 "status->rate_idx is not " 2938 "an MCS index [0-76]: %d (0x%02x)\n", 2939 status->rate_idx, 2940 status->rate_idx)) 2941 goto drop; 2942 } else { 2943 if (WARN_ON(status->rate_idx < 0 || 2944 status->rate_idx >= sband->n_bitrates)) 2945 goto drop; 2946 rate = &sband->bitrates[status->rate_idx]; 2947 } 2948 } 2949 2950 status->rx_flags = 0; 2951 2952 /* 2953 * key references and virtual interfaces are protected using RCU 2954 * and this requires that we are in a read-side RCU section during 2955 * receive processing 2956 */ 2957 rcu_read_lock(); 2958 2959 /* 2960 * Frames with failed FCS/PLCP checksum are not returned, 2961 * all other frames are returned without radiotap header 2962 * if it was previously present. 2963 * Also, frames with less than 16 bytes are dropped. 2964 */ 2965 skb = ieee80211_rx_monitor(local, skb, rate); 2966 if (!skb) { 2967 rcu_read_unlock(); 2968 return; 2969 } 2970 2971 ieee80211_tpt_led_trig_rx(local, 2972 ((struct ieee80211_hdr *)skb->data)->frame_control, 2973 skb->len); 2974 __ieee80211_rx_handle_packet(hw, skb); 2975 2976 rcu_read_unlock(); 2977 2978 return; 2979 drop: 2980 kfree_skb(skb); 2981} 2982EXPORT_SYMBOL(ieee80211_rx); 2983 2984/* This is a version of the rx handler that can be called from hard irq 2985 * context. Post the skb on the queue and schedule the tasklet */ 2986void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 2987{ 2988 struct ieee80211_local *local = hw_to_local(hw); 2989 2990 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 2991 2992 skb->pkt_type = IEEE80211_RX_MSG; 2993 skb_queue_tail(&local->skb_queue, skb); 2994 tasklet_schedule(&local->tasklet); 2995} 2996EXPORT_SYMBOL(ieee80211_rx_irqsafe); 2997