rx.c revision d3c1597b8d1ba0447ce858c7c385eabcf69f2c8f
1/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/jiffies.h>
13#include <linux/slab.h>
14#include <linux/kernel.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/rcupdate.h>
19#include <linux/export.h>
20#include <net/mac80211.h>
21#include <net/ieee80211_radiotap.h>
22
23#include "ieee80211_i.h"
24#include "driver-ops.h"
25#include "led.h"
26#include "mesh.h"
27#include "wep.h"
28#include "wpa.h"
29#include "tkip.h"
30#include "wme.h"
31
32/*
33 * monitor mode reception
34 *
35 * This function cleans up the SKB, i.e. it removes all the stuff
36 * only useful for monitoring.
37 */
38static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
39					   struct sk_buff *skb)
40{
41	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
42		if (likely(skb->len > FCS_LEN))
43			__pskb_trim(skb, skb->len - FCS_LEN);
44		else {
45			/* driver bug */
46			WARN_ON(1);
47			dev_kfree_skb(skb);
48			skb = NULL;
49		}
50	}
51
52	return skb;
53}
54
55static inline int should_drop_frame(struct sk_buff *skb,
56				    int present_fcs_len)
57{
58	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
59	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
60
61	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
62		return 1;
63	if (unlikely(skb->len < 16 + present_fcs_len))
64		return 1;
65	if (ieee80211_is_ctl(hdr->frame_control) &&
66	    !ieee80211_is_pspoll(hdr->frame_control) &&
67	    !ieee80211_is_back_req(hdr->frame_control))
68		return 1;
69	return 0;
70}
71
72static int
73ieee80211_rx_radiotap_len(struct ieee80211_local *local,
74			  struct ieee80211_rx_status *status)
75{
76	int len;
77
78	/* always present fields */
79	len = sizeof(struct ieee80211_radiotap_header) + 9;
80
81	if (status->flag & RX_FLAG_MACTIME_MPDU)
82		len += 8;
83	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
84		len += 1;
85
86	if (len & 1) /* padding for RX_FLAGS if necessary */
87		len++;
88
89	if (status->flag & RX_FLAG_HT) /* HT info */
90		len += 3;
91
92	return len;
93}
94
95/*
96 * ieee80211_add_rx_radiotap_header - add radiotap header
97 *
98 * add a radiotap header containing all the fields which the hardware provided.
99 */
100static void
101ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
102				 struct sk_buff *skb,
103				 struct ieee80211_rate *rate,
104				 int rtap_len)
105{
106	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
107	struct ieee80211_radiotap_header *rthdr;
108	unsigned char *pos;
109	u16 rx_flags = 0;
110
111	rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
112	memset(rthdr, 0, rtap_len);
113
114	/* radiotap header, set always present flags */
115	rthdr->it_present =
116		cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
117			    (1 << IEEE80211_RADIOTAP_CHANNEL) |
118			    (1 << IEEE80211_RADIOTAP_ANTENNA) |
119			    (1 << IEEE80211_RADIOTAP_RX_FLAGS));
120	rthdr->it_len = cpu_to_le16(rtap_len);
121
122	pos = (unsigned char *)(rthdr+1);
123
124	/* the order of the following fields is important */
125
126	/* IEEE80211_RADIOTAP_TSFT */
127	if (status->flag & RX_FLAG_MACTIME_MPDU) {
128		put_unaligned_le64(status->mactime, pos);
129		rthdr->it_present |=
130			cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
131		pos += 8;
132	}
133
134	/* IEEE80211_RADIOTAP_FLAGS */
135	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
136		*pos |= IEEE80211_RADIOTAP_F_FCS;
137	if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
138		*pos |= IEEE80211_RADIOTAP_F_BADFCS;
139	if (status->flag & RX_FLAG_SHORTPRE)
140		*pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
141	pos++;
142
143	/* IEEE80211_RADIOTAP_RATE */
144	if (status->flag & RX_FLAG_HT) {
145		/*
146		 * MCS information is a separate field in radiotap,
147		 * added below. The byte here is needed as padding
148		 * for the channel though, so initialise it to 0.
149		 */
150		*pos = 0;
151	} else {
152		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
153		*pos = rate->bitrate / 5;
154	}
155	pos++;
156
157	/* IEEE80211_RADIOTAP_CHANNEL */
158	put_unaligned_le16(status->freq, pos);
159	pos += 2;
160	if (status->band == IEEE80211_BAND_5GHZ)
161		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
162				   pos);
163	else if (status->flag & RX_FLAG_HT)
164		put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
165				   pos);
166	else if (rate->flags & IEEE80211_RATE_ERP_G)
167		put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
168				   pos);
169	else
170		put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
171				   pos);
172	pos += 2;
173
174	/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
175	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
176		*pos = status->signal;
177		rthdr->it_present |=
178			cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
179		pos++;
180	}
181
182	/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
183
184	/* IEEE80211_RADIOTAP_ANTENNA */
185	*pos = status->antenna;
186	pos++;
187
188	/* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
189
190	/* IEEE80211_RADIOTAP_RX_FLAGS */
191	/* ensure 2 byte alignment for the 2 byte field as required */
192	if ((pos - (u8 *)rthdr) & 1)
193		pos++;
194	if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
195		rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
196	put_unaligned_le16(rx_flags, pos);
197	pos += 2;
198
199	if (status->flag & RX_FLAG_HT) {
200		rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
201		*pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
202			 IEEE80211_RADIOTAP_MCS_HAVE_GI |
203			 IEEE80211_RADIOTAP_MCS_HAVE_BW;
204		*pos = 0;
205		if (status->flag & RX_FLAG_SHORT_GI)
206			*pos |= IEEE80211_RADIOTAP_MCS_SGI;
207		if (status->flag & RX_FLAG_40MHZ)
208			*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
209		pos++;
210		*pos++ = status->rate_idx;
211	}
212}
213
214/*
215 * This function copies a received frame to all monitor interfaces and
216 * returns a cleaned-up SKB that no longer includes the FCS nor the
217 * radiotap header the driver might have added.
218 */
219static struct sk_buff *
220ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
221		     struct ieee80211_rate *rate)
222{
223	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
224	struct ieee80211_sub_if_data *sdata;
225	int needed_headroom = 0;
226	struct sk_buff *skb, *skb2;
227	struct net_device *prev_dev = NULL;
228	int present_fcs_len = 0;
229
230	/*
231	 * First, we may need to make a copy of the skb because
232	 *  (1) we need to modify it for radiotap (if not present), and
233	 *  (2) the other RX handlers will modify the skb we got.
234	 *
235	 * We don't need to, of course, if we aren't going to return
236	 * the SKB because it has a bad FCS/PLCP checksum.
237	 */
238
239	/* room for the radiotap header based on driver features */
240	needed_headroom = ieee80211_rx_radiotap_len(local, status);
241
242	if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
243		present_fcs_len = FCS_LEN;
244
245	/* make sure hdr->frame_control is on the linear part */
246	if (!pskb_may_pull(origskb, 2)) {
247		dev_kfree_skb(origskb);
248		return NULL;
249	}
250
251	if (!local->monitors) {
252		if (should_drop_frame(origskb, present_fcs_len)) {
253			dev_kfree_skb(origskb);
254			return NULL;
255		}
256
257		return remove_monitor_info(local, origskb);
258	}
259
260	if (should_drop_frame(origskb, present_fcs_len)) {
261		/* only need to expand headroom if necessary */
262		skb = origskb;
263		origskb = NULL;
264
265		/*
266		 * This shouldn't trigger often because most devices have an
267		 * RX header they pull before we get here, and that should
268		 * be big enough for our radiotap information. We should
269		 * probably export the length to drivers so that we can have
270		 * them allocate enough headroom to start with.
271		 */
272		if (skb_headroom(skb) < needed_headroom &&
273		    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
274			dev_kfree_skb(skb);
275			return NULL;
276		}
277	} else {
278		/*
279		 * Need to make a copy and possibly remove radiotap header
280		 * and FCS from the original.
281		 */
282		skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
283
284		origskb = remove_monitor_info(local, origskb);
285
286		if (!skb)
287			return origskb;
288	}
289
290	/* prepend radiotap information */
291	ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
292
293	skb_reset_mac_header(skb);
294	skb->ip_summed = CHECKSUM_UNNECESSARY;
295	skb->pkt_type = PACKET_OTHERHOST;
296	skb->protocol = htons(ETH_P_802_2);
297
298	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
299		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
300			continue;
301
302		if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
303			continue;
304
305		if (!ieee80211_sdata_running(sdata))
306			continue;
307
308		if (prev_dev) {
309			skb2 = skb_clone(skb, GFP_ATOMIC);
310			if (skb2) {
311				skb2->dev = prev_dev;
312				netif_receive_skb(skb2);
313			}
314		}
315
316		prev_dev = sdata->dev;
317		sdata->dev->stats.rx_packets++;
318		sdata->dev->stats.rx_bytes += skb->len;
319	}
320
321	if (prev_dev) {
322		skb->dev = prev_dev;
323		netif_receive_skb(skb);
324	} else
325		dev_kfree_skb(skb);
326
327	return origskb;
328}
329
330
331static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
332{
333	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
334	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
335	int tid, seqno_idx, security_idx;
336
337	/* does the frame have a qos control field? */
338	if (ieee80211_is_data_qos(hdr->frame_control)) {
339		u8 *qc = ieee80211_get_qos_ctl(hdr);
340		/* frame has qos control */
341		tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
342		if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
343			status->rx_flags |= IEEE80211_RX_AMSDU;
344
345		seqno_idx = tid;
346		security_idx = tid;
347	} else {
348		/*
349		 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
350		 *
351		 *	Sequence numbers for management frames, QoS data
352		 *	frames with a broadcast/multicast address in the
353		 *	Address 1 field, and all non-QoS data frames sent
354		 *	by QoS STAs are assigned using an additional single
355		 *	modulo-4096 counter, [...]
356		 *
357		 * We also use that counter for non-QoS STAs.
358		 */
359		seqno_idx = NUM_RX_DATA_QUEUES;
360		security_idx = 0;
361		if (ieee80211_is_mgmt(hdr->frame_control))
362			security_idx = NUM_RX_DATA_QUEUES;
363		tid = 0;
364	}
365
366	rx->seqno_idx = seqno_idx;
367	rx->security_idx = security_idx;
368	/* Set skb->priority to 1d tag if highest order bit of TID is not set.
369	 * For now, set skb->priority to 0 for other cases. */
370	rx->skb->priority = (tid > 7) ? 0 : tid;
371}
372
373/**
374 * DOC: Packet alignment
375 *
376 * Drivers always need to pass packets that are aligned to two-byte boundaries
377 * to the stack.
378 *
379 * Additionally, should, if possible, align the payload data in a way that
380 * guarantees that the contained IP header is aligned to a four-byte
381 * boundary. In the case of regular frames, this simply means aligning the
382 * payload to a four-byte boundary (because either the IP header is directly
383 * contained, or IV/RFC1042 headers that have a length divisible by four are
384 * in front of it).  If the payload data is not properly aligned and the
385 * architecture doesn't support efficient unaligned operations, mac80211
386 * will align the data.
387 *
388 * With A-MSDU frames, however, the payload data address must yield two modulo
389 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
390 * push the IP header further back to a multiple of four again. Thankfully, the
391 * specs were sane enough this time around to require padding each A-MSDU
392 * subframe to a length that is a multiple of four.
393 *
394 * Padding like Atheros hardware adds which is between the 802.11 header and
395 * the payload is not supported, the driver is required to move the 802.11
396 * header to be directly in front of the payload in that case.
397 */
398static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
399{
400#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
401	WARN_ONCE((unsigned long)rx->skb->data & 1,
402		  "unaligned packet at 0x%p\n", rx->skb->data);
403#endif
404}
405
406
407/* rx handlers */
408
409static ieee80211_rx_result debug_noinline
410ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
411{
412	struct ieee80211_local *local = rx->local;
413	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
414	struct sk_buff *skb = rx->skb;
415
416	if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
417		   !local->sched_scanning))
418		return RX_CONTINUE;
419
420	if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
421	    test_bit(SCAN_SW_SCANNING, &local->scanning) ||
422	    local->sched_scanning)
423		return ieee80211_scan_rx(rx->sdata, skb);
424
425	/* scanning finished during invoking of handlers */
426	I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
427	return RX_DROP_UNUSABLE;
428}
429
430
431static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
432{
433	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
434
435	if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
436		return 0;
437
438	return ieee80211_is_robust_mgmt_frame(hdr);
439}
440
441
442static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
443{
444	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
445
446	if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
447		return 0;
448
449	return ieee80211_is_robust_mgmt_frame(hdr);
450}
451
452
453/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
454static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
455{
456	struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
457	struct ieee80211_mmie *mmie;
458
459	if (skb->len < 24 + sizeof(*mmie) ||
460	    !is_multicast_ether_addr(hdr->da))
461		return -1;
462
463	if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
464		return -1; /* not a robust management frame */
465
466	mmie = (struct ieee80211_mmie *)
467		(skb->data + skb->len - sizeof(*mmie));
468	if (mmie->element_id != WLAN_EID_MMIE ||
469	    mmie->length != sizeof(*mmie) - 2)
470		return -1;
471
472	return le16_to_cpu(mmie->key_id);
473}
474
475
476static ieee80211_rx_result
477ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
478{
479	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
480	char *dev_addr = rx->sdata->vif.addr;
481
482	if (ieee80211_is_data(hdr->frame_control)) {
483		if (is_multicast_ether_addr(hdr->addr1)) {
484			if (ieee80211_has_tods(hdr->frame_control) ||
485				!ieee80211_has_fromds(hdr->frame_control))
486				return RX_DROP_MONITOR;
487			if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
488				return RX_DROP_MONITOR;
489		} else {
490			if (!ieee80211_has_a4(hdr->frame_control))
491				return RX_DROP_MONITOR;
492			if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
493				return RX_DROP_MONITOR;
494		}
495	}
496
497	/* If there is not an established peer link and this is not a peer link
498	 * establisment frame, beacon or probe, drop the frame.
499	 */
500
501	if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
502		struct ieee80211_mgmt *mgmt;
503
504		if (!ieee80211_is_mgmt(hdr->frame_control))
505			return RX_DROP_MONITOR;
506
507		if (ieee80211_is_action(hdr->frame_control)) {
508			u8 category;
509			mgmt = (struct ieee80211_mgmt *)hdr;
510			category = mgmt->u.action.category;
511			if (category != WLAN_CATEGORY_MESH_ACTION &&
512				category != WLAN_CATEGORY_SELF_PROTECTED)
513				return RX_DROP_MONITOR;
514			return RX_CONTINUE;
515		}
516
517		if (ieee80211_is_probe_req(hdr->frame_control) ||
518		    ieee80211_is_probe_resp(hdr->frame_control) ||
519		    ieee80211_is_beacon(hdr->frame_control) ||
520		    ieee80211_is_auth(hdr->frame_control))
521			return RX_CONTINUE;
522
523		return RX_DROP_MONITOR;
524
525	}
526
527	return RX_CONTINUE;
528}
529
530#define SEQ_MODULO 0x1000
531#define SEQ_MASK   0xfff
532
533static inline int seq_less(u16 sq1, u16 sq2)
534{
535	return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
536}
537
538static inline u16 seq_inc(u16 sq)
539{
540	return (sq + 1) & SEQ_MASK;
541}
542
543static inline u16 seq_sub(u16 sq1, u16 sq2)
544{
545	return (sq1 - sq2) & SEQ_MASK;
546}
547
548
549static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
550					    struct tid_ampdu_rx *tid_agg_rx,
551					    int index)
552{
553	struct ieee80211_local *local = hw_to_local(hw);
554	struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
555	struct ieee80211_rx_status *status;
556
557	lockdep_assert_held(&tid_agg_rx->reorder_lock);
558
559	if (!skb)
560		goto no_frame;
561
562	/* release the frame from the reorder ring buffer */
563	tid_agg_rx->stored_mpdu_num--;
564	tid_agg_rx->reorder_buf[index] = NULL;
565	status = IEEE80211_SKB_RXCB(skb);
566	status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
567	skb_queue_tail(&local->rx_skb_queue, skb);
568
569no_frame:
570	tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
571}
572
573static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
574					     struct tid_ampdu_rx *tid_agg_rx,
575					     u16 head_seq_num)
576{
577	int index;
578
579	lockdep_assert_held(&tid_agg_rx->reorder_lock);
580
581	while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
582		index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
583							tid_agg_rx->buf_size;
584		ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
585	}
586}
587
588/*
589 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
590 * the skb was added to the buffer longer than this time ago, the earlier
591 * frames that have not yet been received are assumed to be lost and the skb
592 * can be released for processing. This may also release other skb's from the
593 * reorder buffer if there are no additional gaps between the frames.
594 *
595 * Callers must hold tid_agg_rx->reorder_lock.
596 */
597#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
598
599static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
600					  struct tid_ampdu_rx *tid_agg_rx)
601{
602	int index, j;
603
604	lockdep_assert_held(&tid_agg_rx->reorder_lock);
605
606	/* release the buffer until next missing frame */
607	index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
608						tid_agg_rx->buf_size;
609	if (!tid_agg_rx->reorder_buf[index] &&
610	    tid_agg_rx->stored_mpdu_num > 1) {
611		/*
612		 * No buffers ready to be released, but check whether any
613		 * frames in the reorder buffer have timed out.
614		 */
615		int skipped = 1;
616		for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
617		     j = (j + 1) % tid_agg_rx->buf_size) {
618			if (!tid_agg_rx->reorder_buf[j]) {
619				skipped++;
620				continue;
621			}
622			if (skipped &&
623			    !time_after(jiffies, tid_agg_rx->reorder_time[j] +
624					HT_RX_REORDER_BUF_TIMEOUT))
625				goto set_release_timer;
626
627#ifdef CONFIG_MAC80211_HT_DEBUG
628			if (net_ratelimit())
629				wiphy_debug(hw->wiphy,
630					    "release an RX reorder frame due to timeout on earlier frames\n");
631#endif
632			ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
633
634			/*
635			 * Increment the head seq# also for the skipped slots.
636			 */
637			tid_agg_rx->head_seq_num =
638				(tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
639			skipped = 0;
640		}
641	} else while (tid_agg_rx->reorder_buf[index]) {
642		ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
643		index =	seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
644							tid_agg_rx->buf_size;
645	}
646
647	if (tid_agg_rx->stored_mpdu_num) {
648		j = index = seq_sub(tid_agg_rx->head_seq_num,
649				    tid_agg_rx->ssn) % tid_agg_rx->buf_size;
650
651		for (; j != (index - 1) % tid_agg_rx->buf_size;
652		     j = (j + 1) % tid_agg_rx->buf_size) {
653			if (tid_agg_rx->reorder_buf[j])
654				break;
655		}
656
657 set_release_timer:
658
659		mod_timer(&tid_agg_rx->reorder_timer,
660			  tid_agg_rx->reorder_time[j] + 1 +
661			  HT_RX_REORDER_BUF_TIMEOUT);
662	} else {
663		del_timer(&tid_agg_rx->reorder_timer);
664	}
665}
666
667/*
668 * As this function belongs to the RX path it must be under
669 * rcu_read_lock protection. It returns false if the frame
670 * can be processed immediately, true if it was consumed.
671 */
672static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
673					     struct tid_ampdu_rx *tid_agg_rx,
674					     struct sk_buff *skb)
675{
676	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
677	u16 sc = le16_to_cpu(hdr->seq_ctrl);
678	u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
679	u16 head_seq_num, buf_size;
680	int index;
681	bool ret = true;
682
683	spin_lock(&tid_agg_rx->reorder_lock);
684
685	buf_size = tid_agg_rx->buf_size;
686	head_seq_num = tid_agg_rx->head_seq_num;
687
688	/* frame with out of date sequence number */
689	if (seq_less(mpdu_seq_num, head_seq_num)) {
690		dev_kfree_skb(skb);
691		goto out;
692	}
693
694	/*
695	 * If frame the sequence number exceeds our buffering window
696	 * size release some previous frames to make room for this one.
697	 */
698	if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
699		head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
700		/* release stored frames up to new head to stack */
701		ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
702	}
703
704	/* Now the new frame is always in the range of the reordering buffer */
705
706	index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
707
708	/* check if we already stored this frame */
709	if (tid_agg_rx->reorder_buf[index]) {
710		dev_kfree_skb(skb);
711		goto out;
712	}
713
714	/*
715	 * If the current MPDU is in the right order and nothing else
716	 * is stored we can process it directly, no need to buffer it.
717	 * If it is first but there's something stored, we may be able
718	 * to release frames after this one.
719	 */
720	if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
721	    tid_agg_rx->stored_mpdu_num == 0) {
722		tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
723		ret = false;
724		goto out;
725	}
726
727	/* put the frame in the reordering buffer */
728	tid_agg_rx->reorder_buf[index] = skb;
729	tid_agg_rx->reorder_time[index] = jiffies;
730	tid_agg_rx->stored_mpdu_num++;
731	ieee80211_sta_reorder_release(hw, tid_agg_rx);
732
733 out:
734	spin_unlock(&tid_agg_rx->reorder_lock);
735	return ret;
736}
737
738/*
739 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
740 * true if the MPDU was buffered, false if it should be processed.
741 */
742static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
743{
744	struct sk_buff *skb = rx->skb;
745	struct ieee80211_local *local = rx->local;
746	struct ieee80211_hw *hw = &local->hw;
747	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
748	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
749	struct sta_info *sta = rx->sta;
750	struct tid_ampdu_rx *tid_agg_rx;
751	u16 sc;
752	u8 tid, ack_policy;
753
754	if (!ieee80211_is_data_qos(hdr->frame_control))
755		goto dont_reorder;
756
757	/*
758	 * filter the QoS data rx stream according to
759	 * STA/TID and check if this STA/TID is on aggregation
760	 */
761
762	if (!sta)
763		goto dont_reorder;
764
765	ack_policy = *ieee80211_get_qos_ctl(hdr) &
766		     IEEE80211_QOS_CTL_ACK_POLICY_MASK;
767	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
768
769	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
770	if (!tid_agg_rx)
771		goto dont_reorder;
772
773	/* qos null data frames are excluded */
774	if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
775		goto dont_reorder;
776
777	/* not part of a BA session */
778	if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
779	    ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
780		goto dont_reorder;
781
782	/* not actually part of this BA session */
783	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
784		goto dont_reorder;
785
786	/* new, potentially un-ordered, ampdu frame - process it */
787
788	/* reset session timer */
789	if (tid_agg_rx->timeout)
790		mod_timer(&tid_agg_rx->session_timer,
791			  TU_TO_EXP_TIME(tid_agg_rx->timeout));
792
793	/* if this mpdu is fragmented - terminate rx aggregation session */
794	sc = le16_to_cpu(hdr->seq_ctrl);
795	if (sc & IEEE80211_SCTL_FRAG) {
796		skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
797		skb_queue_tail(&rx->sdata->skb_queue, skb);
798		ieee80211_queue_work(&local->hw, &rx->sdata->work);
799		return;
800	}
801
802	/*
803	 * No locking needed -- we will only ever process one
804	 * RX packet at a time, and thus own tid_agg_rx. All
805	 * other code manipulating it needs to (and does) make
806	 * sure that we cannot get to it any more before doing
807	 * anything with it.
808	 */
809	if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
810		return;
811
812 dont_reorder:
813	skb_queue_tail(&local->rx_skb_queue, skb);
814}
815
816static ieee80211_rx_result debug_noinline
817ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
818{
819	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
820	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
821
822	/* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
823	if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
824		if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
825			     rx->sta->last_seq_ctrl[rx->seqno_idx] ==
826			     hdr->seq_ctrl)) {
827			if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
828				rx->local->dot11FrameDuplicateCount++;
829				rx->sta->num_duplicates++;
830			}
831			return RX_DROP_UNUSABLE;
832		} else
833			rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
834	}
835
836	if (unlikely(rx->skb->len < 16)) {
837		I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
838		return RX_DROP_MONITOR;
839	}
840
841	/* Drop disallowed frame classes based on STA auth/assoc state;
842	 * IEEE 802.11, Chap 5.5.
843	 *
844	 * mac80211 filters only based on association state, i.e. it drops
845	 * Class 3 frames from not associated stations. hostapd sends
846	 * deauth/disassoc frames when needed. In addition, hostapd is
847	 * responsible for filtering on both auth and assoc states.
848	 */
849
850	if (ieee80211_vif_is_mesh(&rx->sdata->vif))
851		return ieee80211_rx_mesh_check(rx);
852
853	if (unlikely((ieee80211_is_data(hdr->frame_control) ||
854		      ieee80211_is_pspoll(hdr->frame_control)) &&
855		     rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
856		     rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
857		     (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
858		if (rx->sta && rx->sta->dummy &&
859		    ieee80211_is_data_present(hdr->frame_control)) {
860			u16 ethertype;
861			u8 *payload;
862
863			payload = rx->skb->data +
864				ieee80211_hdrlen(hdr->frame_control);
865			ethertype = (payload[6] << 8) | payload[7];
866			if (cpu_to_be16(ethertype) ==
867			    rx->sdata->control_port_protocol)
868				return RX_CONTINUE;
869		}
870
871		if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
872		    cfg80211_rx_spurious_frame(rx->sdata->dev,
873					       hdr->addr2,
874					       GFP_ATOMIC))
875			return RX_DROP_UNUSABLE;
876
877		return RX_DROP_MONITOR;
878	}
879
880	return RX_CONTINUE;
881}
882
883
884static ieee80211_rx_result debug_noinline
885ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
886{
887	struct sk_buff *skb = rx->skb;
888	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
889	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
890	int keyidx;
891	int hdrlen;
892	ieee80211_rx_result result = RX_DROP_UNUSABLE;
893	struct ieee80211_key *sta_ptk = NULL;
894	int mmie_keyidx = -1;
895	__le16 fc;
896
897	/*
898	 * Key selection 101
899	 *
900	 * There are four types of keys:
901	 *  - GTK (group keys)
902	 *  - IGTK (group keys for management frames)
903	 *  - PTK (pairwise keys)
904	 *  - STK (station-to-station pairwise keys)
905	 *
906	 * When selecting a key, we have to distinguish between multicast
907	 * (including broadcast) and unicast frames, the latter can only
908	 * use PTKs and STKs while the former always use GTKs and IGTKs.
909	 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
910	 * unicast frames can also use key indices like GTKs. Hence, if we
911	 * don't have a PTK/STK we check the key index for a WEP key.
912	 *
913	 * Note that in a regular BSS, multicast frames are sent by the
914	 * AP only, associated stations unicast the frame to the AP first
915	 * which then multicasts it on their behalf.
916	 *
917	 * There is also a slight problem in IBSS mode: GTKs are negotiated
918	 * with each station, that is something we don't currently handle.
919	 * The spec seems to expect that one negotiates the same key with
920	 * every station but there's no such requirement; VLANs could be
921	 * possible.
922	 */
923
924	/*
925	 * No point in finding a key and decrypting if the frame is neither
926	 * addressed to us nor a multicast frame.
927	 */
928	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
929		return RX_CONTINUE;
930
931	/* start without a key */
932	rx->key = NULL;
933
934	if (rx->sta)
935		sta_ptk = rcu_dereference(rx->sta->ptk);
936
937	fc = hdr->frame_control;
938
939	if (!ieee80211_has_protected(fc))
940		mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
941
942	if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
943		rx->key = sta_ptk;
944		if ((status->flag & RX_FLAG_DECRYPTED) &&
945		    (status->flag & RX_FLAG_IV_STRIPPED))
946			return RX_CONTINUE;
947		/* Skip decryption if the frame is not protected. */
948		if (!ieee80211_has_protected(fc))
949			return RX_CONTINUE;
950	} else if (mmie_keyidx >= 0) {
951		/* Broadcast/multicast robust management frame / BIP */
952		if ((status->flag & RX_FLAG_DECRYPTED) &&
953		    (status->flag & RX_FLAG_IV_STRIPPED))
954			return RX_CONTINUE;
955
956		if (mmie_keyidx < NUM_DEFAULT_KEYS ||
957		    mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
958			return RX_DROP_MONITOR; /* unexpected BIP keyidx */
959		if (rx->sta)
960			rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
961		if (!rx->key)
962			rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
963	} else if (!ieee80211_has_protected(fc)) {
964		/*
965		 * The frame was not protected, so skip decryption. However, we
966		 * need to set rx->key if there is a key that could have been
967		 * used so that the frame may be dropped if encryption would
968		 * have been expected.
969		 */
970		struct ieee80211_key *key = NULL;
971		struct ieee80211_sub_if_data *sdata = rx->sdata;
972		int i;
973
974		if (ieee80211_is_mgmt(fc) &&
975		    is_multicast_ether_addr(hdr->addr1) &&
976		    (key = rcu_dereference(rx->sdata->default_mgmt_key)))
977			rx->key = key;
978		else {
979			if (rx->sta) {
980				for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
981					key = rcu_dereference(rx->sta->gtk[i]);
982					if (key)
983						break;
984				}
985			}
986			if (!key) {
987				for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
988					key = rcu_dereference(sdata->keys[i]);
989					if (key)
990						break;
991				}
992			}
993			if (key)
994				rx->key = key;
995		}
996		return RX_CONTINUE;
997	} else {
998		u8 keyid;
999		/*
1000		 * The device doesn't give us the IV so we won't be
1001		 * able to look up the key. That's ok though, we
1002		 * don't need to decrypt the frame, we just won't
1003		 * be able to keep statistics accurate.
1004		 * Except for key threshold notifications, should
1005		 * we somehow allow the driver to tell us which key
1006		 * the hardware used if this flag is set?
1007		 */
1008		if ((status->flag & RX_FLAG_DECRYPTED) &&
1009		    (status->flag & RX_FLAG_IV_STRIPPED))
1010			return RX_CONTINUE;
1011
1012		hdrlen = ieee80211_hdrlen(fc);
1013
1014		if (rx->skb->len < 8 + hdrlen)
1015			return RX_DROP_UNUSABLE; /* TODO: count this? */
1016
1017		/*
1018		 * no need to call ieee80211_wep_get_keyidx,
1019		 * it verifies a bunch of things we've done already
1020		 */
1021		skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1022		keyidx = keyid >> 6;
1023
1024		/* check per-station GTK first, if multicast packet */
1025		if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1026			rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1027
1028		/* if not found, try default key */
1029		if (!rx->key) {
1030			rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1031
1032			/*
1033			 * RSNA-protected unicast frames should always be
1034			 * sent with pairwise or station-to-station keys,
1035			 * but for WEP we allow using a key index as well.
1036			 */
1037			if (rx->key &&
1038			    rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1039			    rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1040			    !is_multicast_ether_addr(hdr->addr1))
1041				rx->key = NULL;
1042		}
1043	}
1044
1045	if (rx->key) {
1046		if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1047			return RX_DROP_MONITOR;
1048
1049		rx->key->tx_rx_count++;
1050		/* TODO: add threshold stuff again */
1051	} else {
1052		return RX_DROP_MONITOR;
1053	}
1054
1055	if (skb_linearize(rx->skb))
1056		return RX_DROP_UNUSABLE;
1057	/* the hdr variable is invalid now! */
1058
1059	switch (rx->key->conf.cipher) {
1060	case WLAN_CIPHER_SUITE_WEP40:
1061	case WLAN_CIPHER_SUITE_WEP104:
1062		/* Check for weak IVs if possible */
1063		if (rx->sta && ieee80211_is_data(fc) &&
1064		    (!(status->flag & RX_FLAG_IV_STRIPPED) ||
1065		     !(status->flag & RX_FLAG_DECRYPTED)) &&
1066		    ieee80211_wep_is_weak_iv(rx->skb, rx->key))
1067			rx->sta->wep_weak_iv_count++;
1068
1069		result = ieee80211_crypto_wep_decrypt(rx);
1070		break;
1071	case WLAN_CIPHER_SUITE_TKIP:
1072		result = ieee80211_crypto_tkip_decrypt(rx);
1073		break;
1074	case WLAN_CIPHER_SUITE_CCMP:
1075		result = ieee80211_crypto_ccmp_decrypt(rx);
1076		break;
1077	case WLAN_CIPHER_SUITE_AES_CMAC:
1078		result = ieee80211_crypto_aes_cmac_decrypt(rx);
1079		break;
1080	default:
1081		/*
1082		 * We can reach here only with HW-only algorithms
1083		 * but why didn't it decrypt the frame?!
1084		 */
1085		return RX_DROP_UNUSABLE;
1086	}
1087
1088	/* either the frame has been decrypted or will be dropped */
1089	status->flag |= RX_FLAG_DECRYPTED;
1090
1091	return result;
1092}
1093
1094static ieee80211_rx_result debug_noinline
1095ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1096{
1097	struct ieee80211_local *local;
1098	struct ieee80211_hdr *hdr;
1099	struct sk_buff *skb;
1100
1101	local = rx->local;
1102	skb = rx->skb;
1103	hdr = (struct ieee80211_hdr *) skb->data;
1104
1105	if (!local->pspolling)
1106		return RX_CONTINUE;
1107
1108	if (!ieee80211_has_fromds(hdr->frame_control))
1109		/* this is not from AP */
1110		return RX_CONTINUE;
1111
1112	if (!ieee80211_is_data(hdr->frame_control))
1113		return RX_CONTINUE;
1114
1115	if (!ieee80211_has_moredata(hdr->frame_control)) {
1116		/* AP has no more frames buffered for us */
1117		local->pspolling = false;
1118		return RX_CONTINUE;
1119	}
1120
1121	/* more data bit is set, let's request a new frame from the AP */
1122	ieee80211_send_pspoll(local, rx->sdata);
1123
1124	return RX_CONTINUE;
1125}
1126
1127static void ap_sta_ps_start(struct sta_info *sta)
1128{
1129	struct ieee80211_sub_if_data *sdata = sta->sdata;
1130	struct ieee80211_local *local = sdata->local;
1131
1132	atomic_inc(&sdata->bss->num_sta_ps);
1133	set_sta_flag(sta, WLAN_STA_PS_STA);
1134	if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
1135		drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1136#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1137	printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1138	       sdata->name, sta->sta.addr, sta->sta.aid);
1139#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1140}
1141
1142static void ap_sta_ps_end(struct sta_info *sta)
1143{
1144	struct ieee80211_sub_if_data *sdata = sta->sdata;
1145
1146	atomic_dec(&sdata->bss->num_sta_ps);
1147
1148#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1149	printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1150	       sdata->name, sta->sta.addr, sta->sta.aid);
1151#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1152
1153	if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1154#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1155		printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1156		       sdata->name, sta->sta.addr, sta->sta.aid);
1157#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1158		return;
1159	}
1160
1161	ieee80211_sta_ps_deliver_wakeup(sta);
1162}
1163
1164int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
1165{
1166	struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
1167	bool in_ps;
1168
1169	WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
1170
1171	/* Don't let the same PS state be set twice */
1172	in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
1173	if ((start && in_ps) || (!start && !in_ps))
1174		return -EINVAL;
1175
1176	if (start)
1177		ap_sta_ps_start(sta_inf);
1178	else
1179		ap_sta_ps_end(sta_inf);
1180
1181	return 0;
1182}
1183EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1184
1185static ieee80211_rx_result debug_noinline
1186ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1187{
1188	struct ieee80211_sub_if_data *sdata = rx->sdata;
1189	struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1190	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1191	int tid, ac;
1192
1193	if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
1194		return RX_CONTINUE;
1195
1196	if (sdata->vif.type != NL80211_IFTYPE_AP &&
1197	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1198		return RX_CONTINUE;
1199
1200	/*
1201	 * The device handles station powersave, so don't do anything about
1202	 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1203	 * it to mac80211 since they're handled.)
1204	 */
1205	if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
1206		return RX_CONTINUE;
1207
1208	/*
1209	 * Don't do anything if the station isn't already asleep. In
1210	 * the uAPSD case, the station will probably be marked asleep,
1211	 * in the PS-Poll case the station must be confused ...
1212	 */
1213	if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1214		return RX_CONTINUE;
1215
1216	if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1217		if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
1218			if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1219				ieee80211_sta_ps_deliver_poll_response(rx->sta);
1220			else
1221				set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
1222		}
1223
1224		/* Free PS Poll skb here instead of returning RX_DROP that would
1225		 * count as an dropped frame. */
1226		dev_kfree_skb(rx->skb);
1227
1228		return RX_QUEUED;
1229	} else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1230		   !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1231		   ieee80211_has_pm(hdr->frame_control) &&
1232		   (ieee80211_is_data_qos(hdr->frame_control) ||
1233		    ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1234		tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
1235		ac = ieee802_1d_to_ac[tid & 7];
1236
1237		/*
1238		 * If this AC is not trigger-enabled do nothing.
1239		 *
1240		 * NB: This could/should check a separate bitmap of trigger-
1241		 * enabled queues, but for now we only implement uAPSD w/o
1242		 * TSPEC changes to the ACs, so they're always the same.
1243		 */
1244		if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
1245			return RX_CONTINUE;
1246
1247		/* if we are in a service period, do nothing */
1248		if (test_sta_flag(rx->sta, WLAN_STA_SP))
1249			return RX_CONTINUE;
1250
1251		if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
1252			ieee80211_sta_ps_deliver_uapsd(rx->sta);
1253		else
1254			set_sta_flag(rx->sta, WLAN_STA_UAPSD);
1255	}
1256
1257	return RX_CONTINUE;
1258}
1259
1260static ieee80211_rx_result debug_noinline
1261ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1262{
1263	struct sta_info *sta = rx->sta;
1264	struct sk_buff *skb = rx->skb;
1265	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1266	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1267
1268	if (!sta)
1269		return RX_CONTINUE;
1270
1271	/*
1272	 * Update last_rx only for IBSS packets which are for the current
1273	 * BSSID to avoid keeping the current IBSS network alive in cases
1274	 * where other STAs start using different BSSID.
1275	 */
1276	if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1277		u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1278						NL80211_IFTYPE_ADHOC);
1279		if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) {
1280			sta->last_rx = jiffies;
1281			if (ieee80211_is_data(hdr->frame_control)) {
1282				sta->last_rx_rate_idx = status->rate_idx;
1283				sta->last_rx_rate_flag = status->flag;
1284			}
1285		}
1286	} else if (!is_multicast_ether_addr(hdr->addr1)) {
1287		/*
1288		 * Mesh beacons will update last_rx when if they are found to
1289		 * match the current local configuration when processed.
1290		 */
1291		sta->last_rx = jiffies;
1292		if (ieee80211_is_data(hdr->frame_control)) {
1293			sta->last_rx_rate_idx = status->rate_idx;
1294			sta->last_rx_rate_flag = status->flag;
1295		}
1296	}
1297
1298	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
1299		return RX_CONTINUE;
1300
1301	if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1302		ieee80211_sta_rx_notify(rx->sdata, hdr);
1303
1304	sta->rx_fragments++;
1305	sta->rx_bytes += rx->skb->len;
1306	sta->last_signal = status->signal;
1307	ewma_add(&sta->avg_signal, -status->signal);
1308
1309	/*
1310	 * Change STA power saving mode only at the end of a frame
1311	 * exchange sequence.
1312	 */
1313	if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
1314	    !ieee80211_has_morefrags(hdr->frame_control) &&
1315	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1316	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1317	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1318		if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1319			/*
1320			 * Ignore doze->wake transitions that are
1321			 * indicated by non-data frames, the standard
1322			 * is unclear here, but for example going to
1323			 * PS mode and then scanning would cause a
1324			 * doze->wake transition for the probe request,
1325			 * and that is clearly undesirable.
1326			 */
1327			if (ieee80211_is_data(hdr->frame_control) &&
1328			    !ieee80211_has_pm(hdr->frame_control))
1329				ap_sta_ps_end(sta);
1330		} else {
1331			if (ieee80211_has_pm(hdr->frame_control))
1332				ap_sta_ps_start(sta);
1333		}
1334	}
1335
1336	/*
1337	 * Drop (qos-)data::nullfunc frames silently, since they
1338	 * are used only to control station power saving mode.
1339	 */
1340	if (ieee80211_is_nullfunc(hdr->frame_control) ||
1341	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1342		I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1343
1344		/*
1345		 * If we receive a 4-addr nullfunc frame from a STA
1346		 * that was not moved to a 4-addr STA vlan yet send
1347		 * the event to userspace and for older hostapd drop
1348		 * the frame to the monitor interface.
1349		 */
1350		if (ieee80211_has_a4(hdr->frame_control) &&
1351		    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1352		     (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1353		      !rx->sdata->u.vlan.sta))) {
1354			if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1355				cfg80211_rx_unexpected_4addr_frame(
1356					rx->sdata->dev, sta->sta.addr,
1357					GFP_ATOMIC);
1358			return RX_DROP_MONITOR;
1359		}
1360		/*
1361		 * Update counter and free packet here to avoid
1362		 * counting this as a dropped packed.
1363		 */
1364		sta->rx_packets++;
1365		dev_kfree_skb(rx->skb);
1366		return RX_QUEUED;
1367	}
1368
1369	return RX_CONTINUE;
1370} /* ieee80211_rx_h_sta_process */
1371
1372static inline struct ieee80211_fragment_entry *
1373ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1374			 unsigned int frag, unsigned int seq, int rx_queue,
1375			 struct sk_buff **skb)
1376{
1377	struct ieee80211_fragment_entry *entry;
1378	int idx;
1379
1380	idx = sdata->fragment_next;
1381	entry = &sdata->fragments[sdata->fragment_next++];
1382	if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
1383		sdata->fragment_next = 0;
1384
1385	if (!skb_queue_empty(&entry->skb_list)) {
1386#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1387		struct ieee80211_hdr *hdr =
1388			(struct ieee80211_hdr *) entry->skb_list.next->data;
1389		printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1390		       "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1391		       "addr1=%pM addr2=%pM\n",
1392		       sdata->name, idx,
1393		       jiffies - entry->first_frag_time, entry->seq,
1394		       entry->last_frag, hdr->addr1, hdr->addr2);
1395#endif
1396		__skb_queue_purge(&entry->skb_list);
1397	}
1398
1399	__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
1400	*skb = NULL;
1401	entry->first_frag_time = jiffies;
1402	entry->seq = seq;
1403	entry->rx_queue = rx_queue;
1404	entry->last_frag = frag;
1405	entry->ccmp = 0;
1406	entry->extra_len = 0;
1407
1408	return entry;
1409}
1410
1411static inline struct ieee80211_fragment_entry *
1412ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
1413			  unsigned int frag, unsigned int seq,
1414			  int rx_queue, struct ieee80211_hdr *hdr)
1415{
1416	struct ieee80211_fragment_entry *entry;
1417	int i, idx;
1418
1419	idx = sdata->fragment_next;
1420	for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
1421		struct ieee80211_hdr *f_hdr;
1422
1423		idx--;
1424		if (idx < 0)
1425			idx = IEEE80211_FRAGMENT_MAX - 1;
1426
1427		entry = &sdata->fragments[idx];
1428		if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
1429		    entry->rx_queue != rx_queue ||
1430		    entry->last_frag + 1 != frag)
1431			continue;
1432
1433		f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
1434
1435		/*
1436		 * Check ftype and addresses are equal, else check next fragment
1437		 */
1438		if (((hdr->frame_control ^ f_hdr->frame_control) &
1439		     cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
1440		    compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
1441		    compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
1442			continue;
1443
1444		if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
1445			__skb_queue_purge(&entry->skb_list);
1446			continue;
1447		}
1448		return entry;
1449	}
1450
1451	return NULL;
1452}
1453
1454static ieee80211_rx_result debug_noinline
1455ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1456{
1457	struct ieee80211_hdr *hdr;
1458	u16 sc;
1459	__le16 fc;
1460	unsigned int frag, seq;
1461	struct ieee80211_fragment_entry *entry;
1462	struct sk_buff *skb;
1463	struct ieee80211_rx_status *status;
1464
1465	hdr = (struct ieee80211_hdr *)rx->skb->data;
1466	fc = hdr->frame_control;
1467	sc = le16_to_cpu(hdr->seq_ctrl);
1468	frag = sc & IEEE80211_SCTL_FRAG;
1469
1470	if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
1471		   (rx->skb)->len < 24 ||
1472		   is_multicast_ether_addr(hdr->addr1))) {
1473		/* not fragmented */
1474		goto out;
1475	}
1476	I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1477
1478	if (skb_linearize(rx->skb))
1479		return RX_DROP_UNUSABLE;
1480
1481	/*
1482	 *  skb_linearize() might change the skb->data and
1483	 *  previously cached variables (in this case, hdr) need to
1484	 *  be refreshed with the new data.
1485	 */
1486	hdr = (struct ieee80211_hdr *)rx->skb->data;
1487	seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1488
1489	if (frag == 0) {
1490		/* This is the first fragment of a new frame. */
1491		entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1492						 rx->seqno_idx, &(rx->skb));
1493		if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1494		    ieee80211_has_protected(fc)) {
1495			int queue = rx->security_idx;
1496			/* Store CCMP PN so that we can verify that the next
1497			 * fragment has a sequential PN value. */
1498			entry->ccmp = 1;
1499			memcpy(entry->last_pn,
1500			       rx->key->u.ccmp.rx_pn[queue],
1501			       CCMP_PN_LEN);
1502		}
1503		return RX_QUEUED;
1504	}
1505
1506	/* This is a fragment for a frame that should already be pending in
1507	 * fragment cache. Add this fragment to the end of the pending entry.
1508	 */
1509	entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1510					  rx->seqno_idx, hdr);
1511	if (!entry) {
1512		I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1513		return RX_DROP_MONITOR;
1514	}
1515
1516	/* Verify that MPDUs within one MSDU have sequential PN values.
1517	 * (IEEE 802.11i, 8.3.3.4.5) */
1518	if (entry->ccmp) {
1519		int i;
1520		u8 pn[CCMP_PN_LEN], *rpn;
1521		int queue;
1522		if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1523			return RX_DROP_UNUSABLE;
1524		memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1525		for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
1526			pn[i]++;
1527			if (pn[i])
1528				break;
1529		}
1530		queue = rx->security_idx;
1531		rpn = rx->key->u.ccmp.rx_pn[queue];
1532		if (memcmp(pn, rpn, CCMP_PN_LEN))
1533			return RX_DROP_UNUSABLE;
1534		memcpy(entry->last_pn, pn, CCMP_PN_LEN);
1535	}
1536
1537	skb_pull(rx->skb, ieee80211_hdrlen(fc));
1538	__skb_queue_tail(&entry->skb_list, rx->skb);
1539	entry->last_frag = frag;
1540	entry->extra_len += rx->skb->len;
1541	if (ieee80211_has_morefrags(fc)) {
1542		rx->skb = NULL;
1543		return RX_QUEUED;
1544	}
1545
1546	rx->skb = __skb_dequeue(&entry->skb_list);
1547	if (skb_tailroom(rx->skb) < entry->extra_len) {
1548		I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
1549		if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
1550					      GFP_ATOMIC))) {
1551			I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1552			__skb_queue_purge(&entry->skb_list);
1553			return RX_DROP_UNUSABLE;
1554		}
1555	}
1556	while ((skb = __skb_dequeue(&entry->skb_list))) {
1557		memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
1558		dev_kfree_skb(skb);
1559	}
1560
1561	/* Complete frame has been reassembled - process it now */
1562	status = IEEE80211_SKB_RXCB(rx->skb);
1563	status->rx_flags |= IEEE80211_RX_FRAGMENTED;
1564
1565 out:
1566	if (rx->sta)
1567		rx->sta->rx_packets++;
1568	if (is_multicast_ether_addr(hdr->addr1))
1569		rx->local->dot11MulticastReceivedFrameCount++;
1570	else
1571		ieee80211_led_rx(rx->local);
1572	return RX_CONTINUE;
1573}
1574
1575static ieee80211_rx_result debug_noinline
1576ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1577{
1578	u8 *data = rx->skb->data;
1579	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
1580
1581	if (!ieee80211_is_data_qos(hdr->frame_control))
1582		return RX_CONTINUE;
1583
1584	/* remove the qos control field, update frame type and meta-data */
1585	memmove(data + IEEE80211_QOS_CTL_LEN, data,
1586		ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1587	hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1588	/* change frame type to non QOS */
1589	hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1590
1591	return RX_CONTINUE;
1592}
1593
1594static int
1595ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1596{
1597	if (unlikely(!rx->sta ||
1598	    !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
1599		return -EACCES;
1600
1601	return 0;
1602}
1603
1604static int
1605ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1606{
1607	struct sk_buff *skb = rx->skb;
1608	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1609
1610	/*
1611	 * Pass through unencrypted frames if the hardware has
1612	 * decrypted them already.
1613	 */
1614	if (status->flag & RX_FLAG_DECRYPTED)
1615		return 0;
1616
1617	/* Drop unencrypted frames if key is set. */
1618	if (unlikely(!ieee80211_has_protected(fc) &&
1619		     !ieee80211_is_nullfunc(fc) &&
1620		     ieee80211_is_data(fc) &&
1621		     (rx->key || rx->sdata->drop_unencrypted)))
1622		return -EACCES;
1623
1624	return 0;
1625}
1626
1627static int
1628ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1629{
1630	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1631	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1632	__le16 fc = hdr->frame_control;
1633
1634	/*
1635	 * Pass through unencrypted frames if the hardware has
1636	 * decrypted them already.
1637	 */
1638	if (status->flag & RX_FLAG_DECRYPTED)
1639		return 0;
1640
1641	if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
1642		if (unlikely(!ieee80211_has_protected(fc) &&
1643			     ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1644			     rx->key)) {
1645			if (ieee80211_is_deauth(fc))
1646				cfg80211_send_unprot_deauth(rx->sdata->dev,
1647							    rx->skb->data,
1648							    rx->skb->len);
1649			else if (ieee80211_is_disassoc(fc))
1650				cfg80211_send_unprot_disassoc(rx->sdata->dev,
1651							      rx->skb->data,
1652							      rx->skb->len);
1653			return -EACCES;
1654		}
1655		/* BIP does not use Protected field, so need to check MMIE */
1656		if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1657			     ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
1658			if (ieee80211_is_deauth(fc))
1659				cfg80211_send_unprot_deauth(rx->sdata->dev,
1660							    rx->skb->data,
1661							    rx->skb->len);
1662			else if (ieee80211_is_disassoc(fc))
1663				cfg80211_send_unprot_disassoc(rx->sdata->dev,
1664							      rx->skb->data,
1665							      rx->skb->len);
1666			return -EACCES;
1667		}
1668		/*
1669		 * When using MFP, Action frames are not allowed prior to
1670		 * having configured keys.
1671		 */
1672		if (unlikely(ieee80211_is_action(fc) && !rx->key &&
1673			     ieee80211_is_robust_mgmt_frame(
1674				     (struct ieee80211_hdr *) rx->skb->data)))
1675			return -EACCES;
1676	}
1677
1678	return 0;
1679}
1680
1681static int
1682__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1683{
1684	struct ieee80211_sub_if_data *sdata = rx->sdata;
1685	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1686	bool check_port_control = false;
1687	struct ethhdr *ehdr;
1688	int ret;
1689
1690	*port_control = false;
1691	if (ieee80211_has_a4(hdr->frame_control) &&
1692	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1693		return -1;
1694
1695	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1696	    !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
1697
1698		if (!sdata->u.mgd.use_4addr)
1699			return -1;
1700		else
1701			check_port_control = true;
1702	}
1703
1704	if (is_multicast_ether_addr(hdr->addr1) &&
1705	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
1706		return -1;
1707
1708	ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1709	if (ret < 0)
1710		return ret;
1711
1712	ehdr = (struct ethhdr *) rx->skb->data;
1713	if (ehdr->h_proto == rx->sdata->control_port_protocol)
1714		*port_control = true;
1715	else if (check_port_control)
1716		return -1;
1717
1718	return 0;
1719}
1720
1721/*
1722 * requires that rx->skb is a frame with ethernet header
1723 */
1724static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1725{
1726	static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1727		= { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
1728	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1729
1730	/*
1731	 * Allow EAPOL frames to us/the PAE group address regardless
1732	 * of whether the frame was encrypted or not.
1733	 */
1734	if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1735	    (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1736	     compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1737		return true;
1738
1739	if (ieee80211_802_1x_port_control(rx) ||
1740	    ieee80211_drop_unencrypted(rx, fc))
1741		return false;
1742
1743	return true;
1744}
1745
1746/*
1747 * requires that rx->skb is a frame with ethernet header
1748 */
1749static void
1750ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1751{
1752	struct ieee80211_sub_if_data *sdata = rx->sdata;
1753	struct net_device *dev = sdata->dev;
1754	struct sk_buff *skb, *xmit_skb;
1755	struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1756	struct sta_info *dsta;
1757	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1758
1759	skb = rx->skb;
1760	xmit_skb = NULL;
1761
1762	if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1763	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1764	    !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1765	    (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
1766	    (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
1767		if (is_multicast_ether_addr(ehdr->h_dest)) {
1768			/*
1769			 * send multicast frames both to higher layers in
1770			 * local net stack and back to the wireless medium
1771			 */
1772			xmit_skb = skb_copy(skb, GFP_ATOMIC);
1773			if (!xmit_skb && net_ratelimit())
1774				printk(KERN_DEBUG "%s: failed to clone "
1775				       "multicast frame\n", dev->name);
1776		} else {
1777			dsta = sta_info_get(sdata, skb->data);
1778			if (dsta) {
1779				/*
1780				 * The destination station is associated to
1781				 * this AP (in this VLAN), so send the frame
1782				 * directly to it and do not pass it to local
1783				 * net stack.
1784				 */
1785				xmit_skb = skb;
1786				skb = NULL;
1787			}
1788		}
1789	}
1790
1791	if (skb) {
1792		int align __maybe_unused;
1793
1794#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1795		/*
1796		 * 'align' will only take the values 0 or 2 here
1797		 * since all frames are required to be aligned
1798		 * to 2-byte boundaries when being passed to
1799		 * mac80211. That also explains the __skb_push()
1800		 * below.
1801		 */
1802		align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1803		if (align) {
1804			if (WARN_ON(skb_headroom(skb) < 3)) {
1805				dev_kfree_skb(skb);
1806				skb = NULL;
1807			} else {
1808				u8 *data = skb->data;
1809				size_t len = skb_headlen(skb);
1810				skb->data -= align;
1811				memmove(skb->data, data, len);
1812				skb_set_tail_pointer(skb, len);
1813			}
1814		}
1815#endif
1816
1817		if (skb) {
1818			/* deliver to local stack */
1819			skb->protocol = eth_type_trans(skb, dev);
1820			memset(skb->cb, 0, sizeof(skb->cb));
1821			netif_receive_skb(skb);
1822		}
1823	}
1824
1825	if (xmit_skb) {
1826		/* send to wireless media */
1827		xmit_skb->protocol = htons(ETH_P_802_3);
1828		skb_reset_network_header(xmit_skb);
1829		skb_reset_mac_header(xmit_skb);
1830		dev_queue_xmit(xmit_skb);
1831	}
1832}
1833
1834static ieee80211_rx_result debug_noinline
1835ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1836{
1837	struct net_device *dev = rx->sdata->dev;
1838	struct sk_buff *skb = rx->skb;
1839	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1840	__le16 fc = hdr->frame_control;
1841	struct sk_buff_head frame_list;
1842	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1843
1844	if (unlikely(!ieee80211_is_data(fc)))
1845		return RX_CONTINUE;
1846
1847	if (unlikely(!ieee80211_is_data_present(fc)))
1848		return RX_DROP_MONITOR;
1849
1850	if (!(status->rx_flags & IEEE80211_RX_AMSDU))
1851		return RX_CONTINUE;
1852
1853	if (ieee80211_has_a4(hdr->frame_control) &&
1854	    rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1855	    !rx->sdata->u.vlan.sta)
1856		return RX_DROP_UNUSABLE;
1857
1858	if (is_multicast_ether_addr(hdr->addr1) &&
1859	    ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1860	      rx->sdata->u.vlan.sta) ||
1861	     (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1862	      rx->sdata->u.mgd.use_4addr)))
1863		return RX_DROP_UNUSABLE;
1864
1865	skb->dev = dev;
1866	__skb_queue_head_init(&frame_list);
1867
1868	if (skb_linearize(skb))
1869		return RX_DROP_UNUSABLE;
1870
1871	ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1872				 rx->sdata->vif.type,
1873				 rx->local->hw.extra_tx_headroom, true);
1874
1875	while (!skb_queue_empty(&frame_list)) {
1876		rx->skb = __skb_dequeue(&frame_list);
1877
1878		if (!ieee80211_frame_allowed(rx, fc)) {
1879			dev_kfree_skb(rx->skb);
1880			continue;
1881		}
1882		dev->stats.rx_packets++;
1883		dev->stats.rx_bytes += rx->skb->len;
1884
1885		ieee80211_deliver_skb(rx);
1886	}
1887
1888	return RX_QUEUED;
1889}
1890
1891#ifdef CONFIG_MAC80211_MESH
1892static ieee80211_rx_result
1893ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1894{
1895	struct ieee80211_hdr *hdr;
1896	struct ieee80211s_hdr *mesh_hdr;
1897	unsigned int hdrlen;
1898	struct sk_buff *skb = rx->skb, *fwd_skb;
1899	struct ieee80211_local *local = rx->local;
1900	struct ieee80211_sub_if_data *sdata = rx->sdata;
1901	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1902	u16 q;
1903
1904	hdr = (struct ieee80211_hdr *) skb->data;
1905	hdrlen = ieee80211_hdrlen(hdr->frame_control);
1906	mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
1907
1908	/* frame is in RMC, don't forward */
1909	if (ieee80211_is_data(hdr->frame_control) &&
1910	    is_multicast_ether_addr(hdr->addr1) &&
1911	    mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
1912		return RX_DROP_MONITOR;
1913
1914	if (!ieee80211_is_data(hdr->frame_control))
1915		return RX_CONTINUE;
1916
1917	if (!mesh_hdr->ttl)
1918		/* illegal frame */
1919		return RX_DROP_MONITOR;
1920
1921	if (mesh_hdr->flags & MESH_FLAGS_AE) {
1922		struct mesh_path *mppath;
1923		char *proxied_addr;
1924		char *mpp_addr;
1925
1926		if (is_multicast_ether_addr(hdr->addr1)) {
1927			mpp_addr = hdr->addr3;
1928			proxied_addr = mesh_hdr->eaddr1;
1929		} else {
1930			mpp_addr = hdr->addr4;
1931			proxied_addr = mesh_hdr->eaddr2;
1932		}
1933
1934		rcu_read_lock();
1935		mppath = mpp_path_lookup(proxied_addr, sdata);
1936		if (!mppath) {
1937			mpp_path_add(proxied_addr, mpp_addr, sdata);
1938		} else {
1939			spin_lock_bh(&mppath->state_lock);
1940			if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
1941				memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
1942			spin_unlock_bh(&mppath->state_lock);
1943		}
1944		rcu_read_unlock();
1945	}
1946
1947	/* Frame has reached destination.  Don't forward */
1948	if (!is_multicast_ether_addr(hdr->addr1) &&
1949	    compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1950		return RX_CONTINUE;
1951
1952	q = ieee80211_select_queue_80211(local, skb, hdr);
1953	if (ieee80211_queue_stopped(&local->hw, q)) {
1954		IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1955					     dropped_frames_congestion);
1956		return RX_DROP_MONITOR;
1957	}
1958	skb_set_queue_mapping(skb, q);
1959	mesh_hdr->ttl--;
1960
1961	if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
1962		if (!mesh_hdr->ttl)
1963			IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1964						     dropped_frames_ttl);
1965		else {
1966			struct ieee80211_hdr *fwd_hdr;
1967			struct ieee80211_tx_info *info;
1968
1969			fwd_skb = skb_copy(skb, GFP_ATOMIC);
1970
1971			if (!fwd_skb && net_ratelimit())
1972				printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1973						   sdata->name);
1974			if (!fwd_skb)
1975				goto out;
1976
1977			fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
1978			info = IEEE80211_SKB_CB(fwd_skb);
1979			memset(info, 0, sizeof(*info));
1980			info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1981			info->control.vif = &rx->sdata->vif;
1982			info->control.jiffies = jiffies;
1983			if (is_multicast_ether_addr(fwd_hdr->addr1)) {
1984				IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1985								fwded_mcast);
1986				memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1987			} else {
1988				int err;
1989				err = mesh_nexthop_lookup(fwd_skb, sdata);
1990				/* Failed to immediately resolve next hop:
1991				 * fwded frame was dropped or will be added
1992				 * later to the pending skb queue.  */
1993				if (err)
1994					return RX_DROP_MONITOR;
1995
1996				IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1997								fwded_unicast);
1998			}
1999			IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
2000						     fwded_frames);
2001			ieee80211_add_pending_skb(local, fwd_skb);
2002		}
2003	}
2004
2005 out:
2006	if (is_multicast_ether_addr(hdr->addr1) ||
2007	    sdata->dev->flags & IFF_PROMISC)
2008		return RX_CONTINUE;
2009	else
2010		return RX_DROP_MONITOR;
2011}
2012#endif
2013
2014static ieee80211_rx_result debug_noinline
2015ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2016{
2017	struct ieee80211_sub_if_data *sdata = rx->sdata;
2018	struct ieee80211_local *local = rx->local;
2019	struct net_device *dev = sdata->dev;
2020	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2021	__le16 fc = hdr->frame_control;
2022	bool port_control;
2023	int err;
2024
2025	if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2026		return RX_CONTINUE;
2027
2028	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2029		return RX_DROP_MONITOR;
2030
2031	/*
2032	 * Send unexpected-4addr-frame event to hostapd. For older versions,
2033	 * also drop the frame to cooked monitor interfaces.
2034	 */
2035	if (ieee80211_has_a4(hdr->frame_control) &&
2036	    sdata->vif.type == NL80211_IFTYPE_AP) {
2037		if (rx->sta &&
2038		    !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2039			cfg80211_rx_unexpected_4addr_frame(
2040				rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2041		return RX_DROP_MONITOR;
2042	}
2043
2044	err = __ieee80211_data_to_8023(rx, &port_control);
2045	if (unlikely(err))
2046		return RX_DROP_UNUSABLE;
2047
2048	if (!ieee80211_frame_allowed(rx, fc))
2049		return RX_DROP_MONITOR;
2050
2051	if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2052	    unlikely(port_control) && sdata->bss) {
2053		sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2054				     u.ap);
2055		dev = sdata->dev;
2056		rx->sdata = sdata;
2057	}
2058
2059	rx->skb->dev = dev;
2060
2061	dev->stats.rx_packets++;
2062	dev->stats.rx_bytes += rx->skb->len;
2063
2064	if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2065	    !is_multicast_ether_addr(
2066		    ((struct ethhdr *)rx->skb->data)->h_dest) &&
2067	    (!local->scanning &&
2068	     !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
2069			mod_timer(&local->dynamic_ps_timer, jiffies +
2070			 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2071	}
2072
2073	ieee80211_deliver_skb(rx);
2074
2075	return RX_QUEUED;
2076}
2077
2078static ieee80211_rx_result debug_noinline
2079ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
2080{
2081	struct ieee80211_local *local = rx->local;
2082	struct ieee80211_hw *hw = &local->hw;
2083	struct sk_buff *skb = rx->skb;
2084	struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2085	struct tid_ampdu_rx *tid_agg_rx;
2086	u16 start_seq_num;
2087	u16 tid;
2088
2089	if (likely(!ieee80211_is_ctl(bar->frame_control)))
2090		return RX_CONTINUE;
2091
2092	if (ieee80211_is_back_req(bar->frame_control)) {
2093		struct {
2094			__le16 control, start_seq_num;
2095		} __packed bar_data;
2096
2097		if (!rx->sta)
2098			return RX_DROP_MONITOR;
2099
2100		if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2101				  &bar_data, sizeof(bar_data)))
2102			return RX_DROP_MONITOR;
2103
2104		tid = le16_to_cpu(bar_data.control) >> 12;
2105
2106		tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2107		if (!tid_agg_rx)
2108			return RX_DROP_MONITOR;
2109
2110		start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2111
2112		/* reset session timer */
2113		if (tid_agg_rx->timeout)
2114			mod_timer(&tid_agg_rx->session_timer,
2115				  TU_TO_EXP_TIME(tid_agg_rx->timeout));
2116
2117		spin_lock(&tid_agg_rx->reorder_lock);
2118		/* release stored frames up to start of BAR */
2119		ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
2120		spin_unlock(&tid_agg_rx->reorder_lock);
2121
2122		kfree_skb(skb);
2123		return RX_QUEUED;
2124	}
2125
2126	/*
2127	 * After this point, we only want management frames,
2128	 * so we can drop all remaining control frames to
2129	 * cooked monitor interfaces.
2130	 */
2131	return RX_DROP_MONITOR;
2132}
2133
2134static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2135					   struct ieee80211_mgmt *mgmt,
2136					   size_t len)
2137{
2138	struct ieee80211_local *local = sdata->local;
2139	struct sk_buff *skb;
2140	struct ieee80211_mgmt *resp;
2141
2142	if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
2143		/* Not to own unicast address */
2144		return;
2145	}
2146
2147	if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
2148	    compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
2149		/* Not from the current AP or not associated yet. */
2150		return;
2151	}
2152
2153	if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2154		/* Too short SA Query request frame */
2155		return;
2156	}
2157
2158	skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2159	if (skb == NULL)
2160		return;
2161
2162	skb_reserve(skb, local->hw.extra_tx_headroom);
2163	resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
2164	memset(resp, 0, 24);
2165	memcpy(resp->da, mgmt->sa, ETH_ALEN);
2166	memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2167	memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2168	resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2169					  IEEE80211_STYPE_ACTION);
2170	skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2171	resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2172	resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2173	memcpy(resp->u.action.u.sa_query.trans_id,
2174	       mgmt->u.action.u.sa_query.trans_id,
2175	       WLAN_SA_QUERY_TR_ID_LEN);
2176
2177	ieee80211_tx_skb(sdata, skb);
2178}
2179
2180static ieee80211_rx_result debug_noinline
2181ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2182{
2183	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2184	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2185
2186	/*
2187	 * From here on, look only at management frames.
2188	 * Data and control frames are already handled,
2189	 * and unknown (reserved) frames are useless.
2190	 */
2191	if (rx->skb->len < 24)
2192		return RX_DROP_MONITOR;
2193
2194	if (!ieee80211_is_mgmt(mgmt->frame_control))
2195		return RX_DROP_MONITOR;
2196
2197	if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2198	    ieee80211_is_beacon(mgmt->frame_control) &&
2199	    !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2200		struct ieee80211_rx_status *status;
2201
2202		status = IEEE80211_SKB_RXCB(rx->skb);
2203		cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2204					    rx->skb->data, rx->skb->len,
2205					    status->freq, GFP_ATOMIC);
2206		rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2207	}
2208
2209	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2210		return RX_DROP_MONITOR;
2211
2212	if (ieee80211_drop_unencrypted_mgmt(rx))
2213		return RX_DROP_UNUSABLE;
2214
2215	return RX_CONTINUE;
2216}
2217
2218static ieee80211_rx_result debug_noinline
2219ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2220{
2221	struct ieee80211_local *local = rx->local;
2222	struct ieee80211_sub_if_data *sdata = rx->sdata;
2223	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2224	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2225	int len = rx->skb->len;
2226
2227	if (!ieee80211_is_action(mgmt->frame_control))
2228		return RX_CONTINUE;
2229
2230	/* drop too small frames */
2231	if (len < IEEE80211_MIN_ACTION_SIZE)
2232		return RX_DROP_UNUSABLE;
2233
2234	if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
2235		return RX_DROP_UNUSABLE;
2236
2237	if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
2238		return RX_DROP_UNUSABLE;
2239
2240	switch (mgmt->u.action.category) {
2241	case WLAN_CATEGORY_BACK:
2242		if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2243		    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2244		    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2245		    sdata->vif.type != NL80211_IFTYPE_AP)
2246			break;
2247
2248		/* verify action_code is present */
2249		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2250			break;
2251
2252		switch (mgmt->u.action.u.addba_req.action_code) {
2253		case WLAN_ACTION_ADDBA_REQ:
2254			if (len < (IEEE80211_MIN_ACTION_SIZE +
2255				   sizeof(mgmt->u.action.u.addba_req)))
2256				goto invalid;
2257			break;
2258		case WLAN_ACTION_ADDBA_RESP:
2259			if (len < (IEEE80211_MIN_ACTION_SIZE +
2260				   sizeof(mgmt->u.action.u.addba_resp)))
2261				goto invalid;
2262			break;
2263		case WLAN_ACTION_DELBA:
2264			if (len < (IEEE80211_MIN_ACTION_SIZE +
2265				   sizeof(mgmt->u.action.u.delba)))
2266				goto invalid;
2267			break;
2268		default:
2269			goto invalid;
2270		}
2271
2272		goto queue;
2273	case WLAN_CATEGORY_SPECTRUM_MGMT:
2274		if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
2275			break;
2276
2277		if (sdata->vif.type != NL80211_IFTYPE_STATION)
2278			break;
2279
2280		/* verify action_code is present */
2281		if (len < IEEE80211_MIN_ACTION_SIZE + 1)
2282			break;
2283
2284		switch (mgmt->u.action.u.measurement.action_code) {
2285		case WLAN_ACTION_SPCT_MSR_REQ:
2286			if (len < (IEEE80211_MIN_ACTION_SIZE +
2287				   sizeof(mgmt->u.action.u.measurement)))
2288				break;
2289			ieee80211_process_measurement_req(sdata, mgmt, len);
2290			goto handled;
2291		case WLAN_ACTION_SPCT_CHL_SWITCH:
2292			if (len < (IEEE80211_MIN_ACTION_SIZE +
2293				   sizeof(mgmt->u.action.u.chan_switch)))
2294				break;
2295
2296			if (sdata->vif.type != NL80211_IFTYPE_STATION)
2297				break;
2298
2299			if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
2300				break;
2301
2302			goto queue;
2303		}
2304		break;
2305	case WLAN_CATEGORY_SA_QUERY:
2306		if (len < (IEEE80211_MIN_ACTION_SIZE +
2307			   sizeof(mgmt->u.action.u.sa_query)))
2308			break;
2309
2310		switch (mgmt->u.action.u.sa_query.action) {
2311		case WLAN_ACTION_SA_QUERY_REQUEST:
2312			if (sdata->vif.type != NL80211_IFTYPE_STATION)
2313				break;
2314			ieee80211_process_sa_query_req(sdata, mgmt, len);
2315			goto handled;
2316		}
2317		break;
2318	case WLAN_CATEGORY_SELF_PROTECTED:
2319		switch (mgmt->u.action.u.self_prot.action_code) {
2320		case WLAN_SP_MESH_PEERING_OPEN:
2321		case WLAN_SP_MESH_PEERING_CLOSE:
2322		case WLAN_SP_MESH_PEERING_CONFIRM:
2323			if (!ieee80211_vif_is_mesh(&sdata->vif))
2324				goto invalid;
2325			if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
2326				/* userspace handles this frame */
2327				break;
2328			goto queue;
2329		case WLAN_SP_MGK_INFORM:
2330		case WLAN_SP_MGK_ACK:
2331			if (!ieee80211_vif_is_mesh(&sdata->vif))
2332				goto invalid;
2333			break;
2334		}
2335		break;
2336	case WLAN_CATEGORY_MESH_ACTION:
2337		if (!ieee80211_vif_is_mesh(&sdata->vif))
2338			break;
2339		if (mesh_action_is_path_sel(mgmt) &&
2340		  (!mesh_path_sel_is_hwmp(sdata)))
2341			break;
2342		goto queue;
2343	}
2344
2345	return RX_CONTINUE;
2346
2347 invalid:
2348	status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
2349	/* will return in the next handlers */
2350	return RX_CONTINUE;
2351
2352 handled:
2353	if (rx->sta)
2354		rx->sta->rx_packets++;
2355	dev_kfree_skb(rx->skb);
2356	return RX_QUEUED;
2357
2358 queue:
2359	rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2360	skb_queue_tail(&sdata->skb_queue, rx->skb);
2361	ieee80211_queue_work(&local->hw, &sdata->work);
2362	if (rx->sta)
2363		rx->sta->rx_packets++;
2364	return RX_QUEUED;
2365}
2366
2367static ieee80211_rx_result debug_noinline
2368ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2369{
2370	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2371
2372	/* skip known-bad action frames and return them in the next handler */
2373	if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
2374		return RX_CONTINUE;
2375
2376	/*
2377	 * Getting here means the kernel doesn't know how to handle
2378	 * it, but maybe userspace does ... include returned frames
2379	 * so userspace can register for those to know whether ones
2380	 * it transmitted were processed or returned.
2381	 */
2382
2383	if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2384			     rx->skb->data, rx->skb->len,
2385			     GFP_ATOMIC)) {
2386		if (rx->sta)
2387			rx->sta->rx_packets++;
2388		dev_kfree_skb(rx->skb);
2389		return RX_QUEUED;
2390	}
2391
2392
2393	return RX_CONTINUE;
2394}
2395
2396static ieee80211_rx_result debug_noinline
2397ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2398{
2399	struct ieee80211_local *local = rx->local;
2400	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2401	struct sk_buff *nskb;
2402	struct ieee80211_sub_if_data *sdata = rx->sdata;
2403	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2404
2405	if (!ieee80211_is_action(mgmt->frame_control))
2406		return RX_CONTINUE;
2407
2408	/*
2409	 * For AP mode, hostapd is responsible for handling any action
2410	 * frames that we didn't handle, including returning unknown
2411	 * ones. For all other modes we will return them to the sender,
2412	 * setting the 0x80 bit in the action category, as required by
2413	 * 802.11-2007 7.3.1.11.
2414	 * Newer versions of hostapd shall also use the management frame
2415	 * registration mechanisms, but older ones still use cooked
2416	 * monitor interfaces so push all frames there.
2417	 */
2418	if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
2419	    (sdata->vif.type == NL80211_IFTYPE_AP ||
2420	     sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2421		return RX_DROP_MONITOR;
2422
2423	/* do not return rejected action frames */
2424	if (mgmt->u.action.category & 0x80)
2425		return RX_DROP_UNUSABLE;
2426
2427	nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2428			       GFP_ATOMIC);
2429	if (nskb) {
2430		struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2431
2432		nmgmt->u.action.category |= 0x80;
2433		memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2434		memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2435
2436		memset(nskb->cb, 0, sizeof(nskb->cb));
2437
2438		ieee80211_tx_skb(rx->sdata, nskb);
2439	}
2440	dev_kfree_skb(rx->skb);
2441	return RX_QUEUED;
2442}
2443
2444static ieee80211_rx_result debug_noinline
2445ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2446{
2447	struct ieee80211_sub_if_data *sdata = rx->sdata;
2448	ieee80211_rx_result rxs;
2449	struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2450	__le16 stype;
2451
2452	rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2453	if (rxs != RX_CONTINUE)
2454		return rxs;
2455
2456	stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2457
2458	if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2459	    sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2460	    sdata->vif.type != NL80211_IFTYPE_STATION)
2461		return RX_DROP_MONITOR;
2462
2463	switch (stype) {
2464	case cpu_to_le16(IEEE80211_STYPE_BEACON):
2465	case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2466		/* process for all: mesh, mlme, ibss */
2467		break;
2468	case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2469	case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2470		if (is_multicast_ether_addr(mgmt->da) &&
2471		    !is_broadcast_ether_addr(mgmt->da))
2472			return RX_DROP_MONITOR;
2473
2474		/* process only for station */
2475		if (sdata->vif.type != NL80211_IFTYPE_STATION)
2476			return RX_DROP_MONITOR;
2477		break;
2478	case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2479	case cpu_to_le16(IEEE80211_STYPE_AUTH):
2480		/* process only for ibss */
2481		if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2482			return RX_DROP_MONITOR;
2483		break;
2484	default:
2485		return RX_DROP_MONITOR;
2486	}
2487
2488	/* queue up frame and kick off work to process it */
2489	rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2490	skb_queue_tail(&sdata->skb_queue, rx->skb);
2491	ieee80211_queue_work(&rx->local->hw, &sdata->work);
2492	if (rx->sta)
2493		rx->sta->rx_packets++;
2494
2495	return RX_QUEUED;
2496}
2497
2498/* TODO: use IEEE80211_RX_FRAGMENTED */
2499static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2500					struct ieee80211_rate *rate)
2501{
2502	struct ieee80211_sub_if_data *sdata;
2503	struct ieee80211_local *local = rx->local;
2504	struct ieee80211_rtap_hdr {
2505		struct ieee80211_radiotap_header hdr;
2506		u8 flags;
2507		u8 rate_or_pad;
2508		__le16 chan_freq;
2509		__le16 chan_flags;
2510	} __packed *rthdr;
2511	struct sk_buff *skb = rx->skb, *skb2;
2512	struct net_device *prev_dev = NULL;
2513	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2514
2515	/*
2516	 * If cooked monitor has been processed already, then
2517	 * don't do it again. If not, set the flag.
2518	 */
2519	if (rx->flags & IEEE80211_RX_CMNTR)
2520		goto out_free_skb;
2521	rx->flags |= IEEE80211_RX_CMNTR;
2522
2523	/* If there are no cooked monitor interfaces, just free the SKB */
2524	if (!local->cooked_mntrs)
2525		goto out_free_skb;
2526
2527	if (skb_headroom(skb) < sizeof(*rthdr) &&
2528	    pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
2529		goto out_free_skb;
2530
2531	rthdr = (void *)skb_push(skb, sizeof(*rthdr));
2532	memset(rthdr, 0, sizeof(*rthdr));
2533	rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
2534	rthdr->hdr.it_present =
2535		cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
2536			    (1 << IEEE80211_RADIOTAP_CHANNEL));
2537
2538	if (rate) {
2539		rthdr->rate_or_pad = rate->bitrate / 5;
2540		rthdr->hdr.it_present |=
2541			cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
2542	}
2543	rthdr->chan_freq = cpu_to_le16(status->freq);
2544
2545	if (status->band == IEEE80211_BAND_5GHZ)
2546		rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
2547						IEEE80211_CHAN_5GHZ);
2548	else
2549		rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
2550						IEEE80211_CHAN_2GHZ);
2551
2552	skb_set_mac_header(skb, 0);
2553	skb->ip_summed = CHECKSUM_UNNECESSARY;
2554	skb->pkt_type = PACKET_OTHERHOST;
2555	skb->protocol = htons(ETH_P_802_2);
2556
2557	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2558		if (!ieee80211_sdata_running(sdata))
2559			continue;
2560
2561		if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
2562		    !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
2563			continue;
2564
2565		if (prev_dev) {
2566			skb2 = skb_clone(skb, GFP_ATOMIC);
2567			if (skb2) {
2568				skb2->dev = prev_dev;
2569				netif_receive_skb(skb2);
2570			}
2571		}
2572
2573		prev_dev = sdata->dev;
2574		sdata->dev->stats.rx_packets++;
2575		sdata->dev->stats.rx_bytes += skb->len;
2576	}
2577
2578	if (prev_dev) {
2579		skb->dev = prev_dev;
2580		netif_receive_skb(skb);
2581		return;
2582	}
2583
2584 out_free_skb:
2585	dev_kfree_skb(skb);
2586}
2587
2588static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2589					 ieee80211_rx_result res)
2590{
2591	switch (res) {
2592	case RX_DROP_MONITOR:
2593		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2594		if (rx->sta)
2595			rx->sta->rx_dropped++;
2596		/* fall through */
2597	case RX_CONTINUE: {
2598		struct ieee80211_rate *rate = NULL;
2599		struct ieee80211_supported_band *sband;
2600		struct ieee80211_rx_status *status;
2601
2602		status = IEEE80211_SKB_RXCB((rx->skb));
2603
2604		sband = rx->local->hw.wiphy->bands[status->band];
2605		if (!(status->flag & RX_FLAG_HT))
2606			rate = &sband->bitrates[status->rate_idx];
2607
2608		ieee80211_rx_cooked_monitor(rx, rate);
2609		break;
2610		}
2611	case RX_DROP_UNUSABLE:
2612		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2613		if (rx->sta)
2614			rx->sta->rx_dropped++;
2615		dev_kfree_skb(rx->skb);
2616		break;
2617	case RX_QUEUED:
2618		I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2619		break;
2620	}
2621}
2622
2623static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
2624{
2625	ieee80211_rx_result res = RX_DROP_MONITOR;
2626	struct sk_buff *skb;
2627
2628#define CALL_RXH(rxh)			\
2629	do {				\
2630		res = rxh(rx);		\
2631		if (res != RX_CONTINUE)	\
2632			goto rxh_next;  \
2633	} while (0);
2634
2635	spin_lock(&rx->local->rx_skb_queue.lock);
2636	if (rx->local->running_rx_handler)
2637		goto unlock;
2638
2639	rx->local->running_rx_handler = true;
2640
2641	while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) {
2642		spin_unlock(&rx->local->rx_skb_queue.lock);
2643
2644		/*
2645		 * all the other fields are valid across frames
2646		 * that belong to an aMPDU since they are on the
2647		 * same TID from the same station
2648		 */
2649		rx->skb = skb;
2650
2651		CALL_RXH(ieee80211_rx_h_decrypt)
2652		CALL_RXH(ieee80211_rx_h_check_more_data)
2653		CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
2654		CALL_RXH(ieee80211_rx_h_sta_process)
2655		CALL_RXH(ieee80211_rx_h_defragment)
2656		CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2657		/* must be after MMIC verify so header is counted in MPDU mic */
2658#ifdef CONFIG_MAC80211_MESH
2659		if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2660			CALL_RXH(ieee80211_rx_h_mesh_fwding);
2661#endif
2662		CALL_RXH(ieee80211_rx_h_remove_qos_control)
2663		CALL_RXH(ieee80211_rx_h_amsdu)
2664		CALL_RXH(ieee80211_rx_h_data)
2665		CALL_RXH(ieee80211_rx_h_ctrl);
2666		CALL_RXH(ieee80211_rx_h_mgmt_check)
2667		CALL_RXH(ieee80211_rx_h_action)
2668		CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2669		CALL_RXH(ieee80211_rx_h_action_return)
2670		CALL_RXH(ieee80211_rx_h_mgmt)
2671
2672 rxh_next:
2673		ieee80211_rx_handlers_result(rx, res);
2674		spin_lock(&rx->local->rx_skb_queue.lock);
2675#undef CALL_RXH
2676	}
2677
2678	rx->local->running_rx_handler = false;
2679
2680 unlock:
2681	spin_unlock(&rx->local->rx_skb_queue.lock);
2682}
2683
2684static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
2685{
2686	ieee80211_rx_result res = RX_DROP_MONITOR;
2687
2688#define CALL_RXH(rxh)			\
2689	do {				\
2690		res = rxh(rx);		\
2691		if (res != RX_CONTINUE)	\
2692			goto rxh_next;  \
2693	} while (0);
2694
2695	CALL_RXH(ieee80211_rx_h_passive_scan)
2696	CALL_RXH(ieee80211_rx_h_check)
2697
2698	ieee80211_rx_reorder_ampdu(rx);
2699
2700	ieee80211_rx_handlers(rx);
2701	return;
2702
2703 rxh_next:
2704	ieee80211_rx_handlers_result(rx, res);
2705
2706#undef CALL_RXH
2707}
2708
2709/*
2710 * This function makes calls into the RX path, therefore
2711 * it has to be invoked under RCU read lock.
2712 */
2713void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2714{
2715	struct ieee80211_rx_data rx = {
2716		.sta = sta,
2717		.sdata = sta->sdata,
2718		.local = sta->local,
2719		/* This is OK -- must be QoS data frame */
2720		.security_idx = tid,
2721		.seqno_idx = tid,
2722		.flags = 0,
2723	};
2724	struct tid_ampdu_rx *tid_agg_rx;
2725
2726	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2727	if (!tid_agg_rx)
2728		return;
2729
2730	spin_lock(&tid_agg_rx->reorder_lock);
2731	ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
2732	spin_unlock(&tid_agg_rx->reorder_lock);
2733
2734	ieee80211_rx_handlers(&rx);
2735}
2736
2737/* main receive path */
2738
2739static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2740				struct ieee80211_hdr *hdr)
2741{
2742	struct ieee80211_sub_if_data *sdata = rx->sdata;
2743	struct sk_buff *skb = rx->skb;
2744	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2745	u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
2746	int multicast = is_multicast_ether_addr(hdr->addr1);
2747
2748	switch (sdata->vif.type) {
2749	case NL80211_IFTYPE_STATION:
2750		if (!bssid && !sdata->u.mgd.use_4addr)
2751			return 0;
2752		if (!multicast &&
2753		    compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2754			if (!(sdata->dev->flags & IFF_PROMISC) ||
2755			    sdata->u.mgd.use_4addr)
2756				return 0;
2757			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2758		}
2759		break;
2760	case NL80211_IFTYPE_ADHOC:
2761		if (!bssid)
2762			return 0;
2763		if (ieee80211_is_beacon(hdr->frame_control)) {
2764			return 1;
2765		}
2766		else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2767			if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
2768				return 0;
2769			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2770		} else if (!multicast &&
2771			   compare_ether_addr(sdata->vif.addr,
2772					      hdr->addr1) != 0) {
2773			if (!(sdata->dev->flags & IFF_PROMISC))
2774				return 0;
2775			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2776		} else if (!rx->sta) {
2777			int rate_idx;
2778			if (status->flag & RX_FLAG_HT)
2779				rate_idx = 0; /* TODO: HT rates */
2780			else
2781				rate_idx = status->rate_idx;
2782			rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2783					hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2784		}
2785		break;
2786	case NL80211_IFTYPE_MESH_POINT:
2787		if (!multicast &&
2788		    compare_ether_addr(sdata->vif.addr,
2789				       hdr->addr1) != 0) {
2790			if (!(sdata->dev->flags & IFF_PROMISC))
2791				return 0;
2792
2793			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2794		}
2795		break;
2796	case NL80211_IFTYPE_AP_VLAN:
2797	case NL80211_IFTYPE_AP:
2798		if (!bssid) {
2799			if (compare_ether_addr(sdata->vif.addr,
2800					       hdr->addr1))
2801				return 0;
2802		} else if (!ieee80211_bssid_match(bssid,
2803					sdata->vif.addr)) {
2804			if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
2805			    !ieee80211_is_beacon(hdr->frame_control) &&
2806			    !(ieee80211_is_action(hdr->frame_control) &&
2807			      sdata->vif.p2p))
2808				return 0;
2809			status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
2810		}
2811		break;
2812	case NL80211_IFTYPE_WDS:
2813		if (bssid || !ieee80211_is_data(hdr->frame_control))
2814			return 0;
2815		if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2816			return 0;
2817		break;
2818	default:
2819		/* should never get here */
2820		WARN_ON(1);
2821		break;
2822	}
2823
2824	return 1;
2825}
2826
2827/*
2828 * This function returns whether or not the SKB
2829 * was destined for RX processing or not, which,
2830 * if consume is true, is equivalent to whether
2831 * or not the skb was consumed.
2832 */
2833static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2834					    struct sk_buff *skb, bool consume)
2835{
2836	struct ieee80211_local *local = rx->local;
2837	struct ieee80211_sub_if_data *sdata = rx->sdata;
2838	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2839	struct ieee80211_hdr *hdr = (void *)skb->data;
2840	int prepares;
2841
2842	rx->skb = skb;
2843	status->rx_flags |= IEEE80211_RX_RA_MATCH;
2844	prepares = prepare_for_handlers(rx, hdr);
2845
2846	if (!prepares)
2847		return false;
2848
2849	if (!consume) {
2850		skb = skb_copy(skb, GFP_ATOMIC);
2851		if (!skb) {
2852			if (net_ratelimit())
2853				wiphy_debug(local->hw.wiphy,
2854					"failed to copy skb for %s\n",
2855					sdata->name);
2856			return true;
2857		}
2858
2859		rx->skb = skb;
2860	}
2861
2862	ieee80211_invoke_rx_handlers(rx);
2863	return true;
2864}
2865
2866/*
2867 * This is the actual Rx frames handler. as it blongs to Rx path it must
2868 * be called with rcu_read_lock protection.
2869 */
2870static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2871					 struct sk_buff *skb)
2872{
2873	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2874	struct ieee80211_local *local = hw_to_local(hw);
2875	struct ieee80211_sub_if_data *sdata;
2876	struct ieee80211_hdr *hdr;
2877	__le16 fc;
2878	struct ieee80211_rx_data rx;
2879	struct ieee80211_sub_if_data *prev;
2880	struct sta_info *sta, *tmp, *prev_sta;
2881	int err = 0;
2882
2883	fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2884	memset(&rx, 0, sizeof(rx));
2885	rx.skb = skb;
2886	rx.local = local;
2887
2888	if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2889		local->dot11ReceivedFragmentCount++;
2890
2891	if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2892		     test_bit(SCAN_SW_SCANNING, &local->scanning)))
2893		status->rx_flags |= IEEE80211_RX_IN_SCAN;
2894
2895	if (ieee80211_is_mgmt(fc))
2896		err = skb_linearize(skb);
2897	else
2898		err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2899
2900	if (err) {
2901		dev_kfree_skb(skb);
2902		return;
2903	}
2904
2905	hdr = (struct ieee80211_hdr *)skb->data;
2906	ieee80211_parse_qos(&rx);
2907	ieee80211_verify_alignment(&rx);
2908
2909	if (ieee80211_is_data(fc)) {
2910		prev_sta = NULL;
2911
2912		for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
2913			if (!prev_sta) {
2914				prev_sta = sta;
2915				continue;
2916			}
2917
2918			rx.sta = prev_sta;
2919			rx.sdata = prev_sta->sdata;
2920			ieee80211_prepare_and_rx_handle(&rx, skb, false);
2921
2922			prev_sta = sta;
2923		}
2924
2925		if (prev_sta) {
2926			rx.sta = prev_sta;
2927			rx.sdata = prev_sta->sdata;
2928
2929			if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2930				return;
2931			goto out;
2932		}
2933	}
2934
2935	prev = NULL;
2936
2937	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2938		if (!ieee80211_sdata_running(sdata))
2939			continue;
2940
2941		if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2942		    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2943			continue;
2944
2945		/*
2946		 * frame is destined for this interface, but if it's
2947		 * not also for the previous one we handle that after
2948		 * the loop to avoid copying the SKB once too much
2949		 */
2950
2951		if (!prev) {
2952			prev = sdata;
2953			continue;
2954		}
2955
2956		rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2957		rx.sdata = prev;
2958		ieee80211_prepare_and_rx_handle(&rx, skb, false);
2959
2960		prev = sdata;
2961	}
2962
2963	if (prev) {
2964		rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
2965		rx.sdata = prev;
2966
2967		if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
2968			return;
2969	}
2970
2971 out:
2972	dev_kfree_skb(skb);
2973}
2974
2975/*
2976 * This is the receive path handler. It is called by a low level driver when an
2977 * 802.11 MPDU is received from the hardware.
2978 */
2979void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2980{
2981	struct ieee80211_local *local = hw_to_local(hw);
2982	struct ieee80211_rate *rate = NULL;
2983	struct ieee80211_supported_band *sband;
2984	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2985
2986	WARN_ON_ONCE(softirq_count() == 0);
2987
2988	if (WARN_ON(status->band < 0 ||
2989		    status->band >= IEEE80211_NUM_BANDS))
2990		goto drop;
2991
2992	sband = local->hw.wiphy->bands[status->band];
2993	if (WARN_ON(!sband))
2994		goto drop;
2995
2996	/*
2997	 * If we're suspending, it is possible although not too likely
2998	 * that we'd be receiving frames after having already partially
2999	 * quiesced the stack. We can't process such frames then since
3000	 * that might, for example, cause stations to be added or other
3001	 * driver callbacks be invoked.
3002	 */
3003	if (unlikely(local->quiescing || local->suspended))
3004		goto drop;
3005
3006	/*
3007	 * The same happens when we're not even started,
3008	 * but that's worth a warning.
3009	 */
3010	if (WARN_ON(!local->started))
3011		goto drop;
3012
3013	if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
3014		/*
3015		 * Validate the rate, unless a PLCP error means that
3016		 * we probably can't have a valid rate here anyway.
3017		 */
3018
3019		if (status->flag & RX_FLAG_HT) {
3020			/*
3021			 * rate_idx is MCS index, which can be [0-76]
3022			 * as documented on:
3023			 *
3024			 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
3025			 *
3026			 * Anything else would be some sort of driver or
3027			 * hardware error. The driver should catch hardware
3028			 * errors.
3029			 */
3030			if (WARN((status->rate_idx < 0 ||
3031				 status->rate_idx > 76),
3032				 "Rate marked as an HT rate but passed "
3033				 "status->rate_idx is not "
3034				 "an MCS index [0-76]: %d (0x%02x)\n",
3035				 status->rate_idx,
3036				 status->rate_idx))
3037				goto drop;
3038		} else {
3039			if (WARN_ON(status->rate_idx < 0 ||
3040				    status->rate_idx >= sband->n_bitrates))
3041				goto drop;
3042			rate = &sband->bitrates[status->rate_idx];
3043		}
3044	}
3045
3046	status->rx_flags = 0;
3047
3048	/*
3049	 * key references and virtual interfaces are protected using RCU
3050	 * and this requires that we are in a read-side RCU section during
3051	 * receive processing
3052	 */
3053	rcu_read_lock();
3054
3055	/*
3056	 * Frames with failed FCS/PLCP checksum are not returned,
3057	 * all other frames are returned without radiotap header
3058	 * if it was previously present.
3059	 * Also, frames with less than 16 bytes are dropped.
3060	 */
3061	skb = ieee80211_rx_monitor(local, skb, rate);
3062	if (!skb) {
3063		rcu_read_unlock();
3064		return;
3065	}
3066
3067	ieee80211_tpt_led_trig_rx(local,
3068			((struct ieee80211_hdr *)skb->data)->frame_control,
3069			skb->len);
3070	__ieee80211_rx_handle_packet(hw, skb);
3071
3072	rcu_read_unlock();
3073
3074	return;
3075 drop:
3076	kfree_skb(skb);
3077}
3078EXPORT_SYMBOL(ieee80211_rx);
3079
3080/* This is a version of the rx handler that can be called from hard irq
3081 * context. Post the skb on the queue and schedule the tasklet */
3082void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
3083{
3084	struct ieee80211_local *local = hw_to_local(hw);
3085
3086	BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
3087
3088	skb->pkt_type = IEEE80211_RX_MSG;
3089	skb_queue_tail(&local->skb_queue, skb);
3090	tasklet_schedule(&local->tasklet);
3091}
3092EXPORT_SYMBOL(ieee80211_rx_irqsafe);
3093