wme.c revision e8a0464cc950972824e2e128028ae3db666ec1ed
1/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
21/* maximum number of hardware queues we support. */
22#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
25
26/*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
30const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
31
32struct ieee80211_sched_data
33{
34	unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
35	struct tcf_proto *filter_list;
36	struct Qdisc *queues[QD_MAX_QUEUES];
37	struct sk_buff_head requeued[QD_MAX_QUEUES];
38};
39
40static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
41
42/* given a data frame determine the 802.1p/1d tag to use */
43static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
44{
45	struct iphdr *ip;
46	int dscp;
47	int offset;
48
49	struct ieee80211_sched_data *q = qdisc_priv(qd);
50	struct tcf_result res = { -1, 0 };
51
52	/* if there is a user set filter list, call out to that */
53	if (q->filter_list) {
54		tc_classify(skb, q->filter_list, &res);
55		if (res.class != -1)
56			return res.class;
57	}
58
59	/* skb->priority values from 256->263 are magic values to
60	 * directly indicate a specific 802.1d priority.
61	 * This is used to allow 802.1d priority to be passed directly in
62	 * from VLAN tags, etc. */
63	if (skb->priority >= 256 && skb->priority <= 263)
64		return skb->priority - 256;
65
66	/* check there is a valid IP header present */
67	offset = ieee80211_get_hdrlen_from_skb(skb);
68	if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
69	    memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
70		return 0;
71
72	ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
73
74	dscp = ip->tos & 0xfc;
75	if (dscp & 0x1c)
76		return 0;
77	return dscp >> 5;
78}
79
80
81static inline int wme_downgrade_ac(struct sk_buff *skb)
82{
83	switch (skb->priority) {
84	case 6:
85	case 7:
86		skb->priority = 5; /* VO -> VI */
87		return 0;
88	case 4:
89	case 5:
90		skb->priority = 3; /* VI -> BE */
91		return 0;
92	case 0:
93	case 3:
94		skb->priority = 2; /* BE -> BK */
95		return 0;
96	default:
97		return -1;
98	}
99}
100
101
102/* positive return value indicates which queue to use
103 * negative return value indicates to drop the frame */
104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
105{
106	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
107	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
108
109	if (!ieee80211_is_data(hdr->frame_control)) {
110		/* management frames go on AC_VO queue, but are sent
111		* without QoS control fields */
112		return 0;
113	}
114
115	if (0 /* injected */) {
116		/* use AC from radiotap */
117	}
118
119	if (!ieee80211_is_data_qos(hdr->frame_control)) {
120		skb->priority = 0; /* required for correct WPA/11i MIC */
121		return ieee802_1d_to_ac[skb->priority];
122	}
123
124	/* use the data classifier to determine what 802.1d tag the
125	 * data frame has */
126	skb->priority = classify_1d(skb, qd);
127
128	/* in case we are a client verify acm is not set for this ac */
129	while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130		if (wme_downgrade_ac(skb)) {
131			/* No AC with lower priority has acm=0, drop packet. */
132			return -1;
133		}
134	}
135
136	/* look up which queue to use for frames with this 1d tag */
137	return ieee802_1d_to_ac[skb->priority];
138}
139
140
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{
143	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
144	struct ieee80211_hw *hw = &local->hw;
145	struct ieee80211_sched_data *q = qdisc_priv(qd);
146	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
147	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148	struct Qdisc *qdisc;
149	struct sta_info *sta;
150	int err, queue;
151	u8 tid;
152
153	if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
154		queue = skb_get_queue_mapping(skb);
155		rcu_read_lock();
156		sta = sta_info_get(local, hdr->addr1);
157		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
158		if (sta) {
159			int ampdu_queue = sta->tid_to_tx_q[tid];
160			if ((ampdu_queue < QD_NUM(hw)) &&
161			    test_bit(ampdu_queue, q->qdisc_pool)) {
162				queue = ampdu_queue;
163				info->flags |= IEEE80211_TX_CTL_AMPDU;
164			} else {
165				info->flags &= ~IEEE80211_TX_CTL_AMPDU;
166			}
167		}
168		rcu_read_unlock();
169		skb_queue_tail(&q->requeued[queue], skb);
170		qd->q.qlen++;
171		return 0;
172	}
173
174	queue = classify80211(skb, qd);
175
176	if (unlikely(queue >= local->hw.queues))
177		queue = local->hw.queues - 1;
178
179	/* now we know the 1d priority, fill in the QoS header if there is one
180	 */
181	if (ieee80211_is_data_qos(hdr->frame_control)) {
182		u8 *p = ieee80211_get_qos_ctl(hdr);
183		u8 ack_policy = 0;
184		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
185		if (local->wifi_wme_noack_test)
186			ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
187					QOS_CONTROL_ACK_POLICY_SHIFT;
188		/* qos header is 2 bytes, second reserved */
189		*p++ = ack_policy | tid;
190		*p = 0;
191
192		rcu_read_lock();
193
194		sta = sta_info_get(local, hdr->addr1);
195		if (sta) {
196			int ampdu_queue = sta->tid_to_tx_q[tid];
197			if ((ampdu_queue < QD_NUM(hw)) &&
198			    test_bit(ampdu_queue, q->qdisc_pool)) {
199				queue = ampdu_queue;
200				info->flags |= IEEE80211_TX_CTL_AMPDU;
201			} else {
202				info->flags &= ~IEEE80211_TX_CTL_AMPDU;
203			}
204		}
205
206		rcu_read_unlock();
207	}
208
209	if (unlikely(queue < 0)) {
210			kfree_skb(skb);
211			err = NET_XMIT_DROP;
212	} else {
213		skb_set_queue_mapping(skb, queue);
214		qdisc = q->queues[queue];
215		err = qdisc->enqueue(skb, qdisc);
216		if (err == NET_XMIT_SUCCESS) {
217			qd->q.qlen++;
218			qd->bstats.bytes += skb->len;
219			qd->bstats.packets++;
220			return NET_XMIT_SUCCESS;
221		}
222	}
223	qd->qstats.drops++;
224	return err;
225}
226
227
228/* TODO: clean up the cases where master_hard_start_xmit
229 * returns non 0 - it shouldn't ever do that. Once done we
230 * can remove this function */
231static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
232{
233	struct ieee80211_sched_data *q = qdisc_priv(qd);
234	struct Qdisc *qdisc;
235	int err;
236
237	/* we recorded which queue to use earlier! */
238	qdisc = q->queues[skb_get_queue_mapping(skb)];
239
240	if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
241		qd->q.qlen++;
242		return 0;
243	}
244	qd->qstats.drops++;
245	return err;
246}
247
248
249static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
250{
251	struct ieee80211_sched_data *q = qdisc_priv(qd);
252	struct net_device *dev = qdisc_dev(qd);
253	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
254	struct ieee80211_hw *hw = &local->hw;
255	struct sk_buff *skb;
256	struct Qdisc *qdisc;
257	int queue;
258
259	/* check all the h/w queues in numeric/priority order */
260	for (queue = 0; queue < QD_NUM(hw); queue++) {
261		/* see if there is room in this hardware queue */
262		if (__netif_subqueue_stopped(local->mdev, queue) ||
263		    !test_bit(queue, q->qdisc_pool))
264			continue;
265
266		/* there is space - try and get a frame */
267		skb = skb_dequeue(&q->requeued[queue]);
268		if (skb) {
269			qd->q.qlen--;
270			return skb;
271		}
272
273		qdisc = q->queues[queue];
274		skb = qdisc->dequeue(qdisc);
275		if (skb) {
276			qd->q.qlen--;
277			return skb;
278		}
279	}
280	/* returning a NULL here when all the h/w queues are full means we
281	 * never need to call netif_stop_queue in the driver */
282	return NULL;
283}
284
285
286static void wme_qdiscop_reset(struct Qdisc* qd)
287{
288	struct ieee80211_sched_data *q = qdisc_priv(qd);
289	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
290	struct ieee80211_hw *hw = &local->hw;
291	int queue;
292
293	/* QUESTION: should we have some hardware flush functionality here? */
294
295	for (queue = 0; queue < QD_NUM(hw); queue++) {
296		skb_queue_purge(&q->requeued[queue]);
297		qdisc_reset(q->queues[queue]);
298	}
299	qd->q.qlen = 0;
300}
301
302
303static void wme_qdiscop_destroy(struct Qdisc* qd)
304{
305	struct ieee80211_sched_data *q = qdisc_priv(qd);
306	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
307	struct ieee80211_hw *hw = &local->hw;
308	int queue;
309
310	tcf_destroy_chain(&q->filter_list);
311
312	for (queue = 0; queue < QD_NUM(hw); queue++) {
313		skb_queue_purge(&q->requeued[queue]);
314		qdisc_destroy(q->queues[queue]);
315		q->queues[queue] = &noop_qdisc;
316	}
317}
318
319
320/* called whenever parameters are updated on existing qdisc */
321static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
322{
323	return 0;
324}
325
326
327/* called during initial creation of qdisc on device */
328static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
329{
330	struct ieee80211_sched_data *q = qdisc_priv(qd);
331	struct net_device *dev = qdisc_dev(qd);
332	struct ieee80211_local *local;
333	struct ieee80211_hw *hw;
334	int err = 0, i;
335
336	/* check that device is a mac80211 device */
337	if (!dev->ieee80211_ptr ||
338	    dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
339		return -EINVAL;
340
341	local = wdev_priv(dev->ieee80211_ptr);
342	hw = &local->hw;
343
344	/* only allow on master dev */
345	if (dev != local->mdev)
346		return -EINVAL;
347
348	/* ensure that we are root qdisc */
349	if (qd->parent != TC_H_ROOT)
350		return -EINVAL;
351
352	if (qd->flags & TCQ_F_INGRESS)
353		return -EINVAL;
354
355	/* if options were passed in, set them */
356	if (opt)
357		err = wme_qdiscop_tune(qd, opt);
358
359	/* create child queues */
360	for (i = 0; i < QD_NUM(hw); i++) {
361		skb_queue_head_init(&q->requeued[i]);
362		q->queues[i] = qdisc_create_dflt(qdisc_dev(qd), qd->dev_queue,
363						 &pfifo_qdisc_ops,
364						 qd->handle);
365		if (!q->queues[i]) {
366			q->queues[i] = &noop_qdisc;
367			printk(KERN_ERR "%s child qdisc %i creation failed\n",
368			       dev->name, i);
369		}
370	}
371
372	/* non-aggregation queues: reserve/mark as used */
373	for (i = 0; i < local->hw.queues; i++)
374		set_bit(i, q->qdisc_pool);
375
376	return err;
377}
378
379static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
380{
381	return -1;
382}
383
384
385static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
386			     struct Qdisc *new, struct Qdisc **old)
387{
388	struct ieee80211_sched_data *q = qdisc_priv(qd);
389	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
390	struct ieee80211_hw *hw = &local->hw;
391	unsigned long queue = arg - 1;
392
393	if (queue >= QD_NUM(hw))
394		return -EINVAL;
395
396	if (!new)
397		new = &noop_qdisc;
398
399	sch_tree_lock(qd);
400	*old = q->queues[queue];
401	q->queues[queue] = new;
402	qdisc_reset(*old);
403	sch_tree_unlock(qd);
404
405	return 0;
406}
407
408
409static struct Qdisc *
410wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
411{
412	struct ieee80211_sched_data *q = qdisc_priv(qd);
413	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
414	struct ieee80211_hw *hw = &local->hw;
415	unsigned long queue = arg - 1;
416
417	if (queue >= QD_NUM(hw))
418		return NULL;
419
420	return q->queues[queue];
421}
422
423
424static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
425{
426	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
427	struct ieee80211_hw *hw = &local->hw;
428	unsigned long queue = TC_H_MIN(classid);
429
430	if (queue - 1 >= QD_NUM(hw))
431		return 0;
432
433	return queue;
434}
435
436
437static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
438				      u32 classid)
439{
440	return wme_classop_get(qd, classid);
441}
442
443
444static void wme_classop_put(struct Qdisc *q, unsigned long cl)
445{
446}
447
448
449static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
450			      struct nlattr **tca, unsigned long *arg)
451{
452	unsigned long cl = *arg;
453	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
454	struct ieee80211_hw *hw = &local->hw;
455
456	if (cl - 1 > QD_NUM(hw))
457		return -ENOENT;
458
459	/* TODO: put code to program hardware queue parameters here,
460	 * to allow programming from tc command line */
461
462	return 0;
463}
464
465
466/* we don't support deleting hardware queues
467 * when we add WMM-SA support - TSPECs may be deleted here */
468static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
469{
470	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
471	struct ieee80211_hw *hw = &local->hw;
472
473	if (cl - 1 > QD_NUM(hw))
474		return -ENOENT;
475	return 0;
476}
477
478
479static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
480				  struct sk_buff *skb, struct tcmsg *tcm)
481{
482	struct ieee80211_sched_data *q = qdisc_priv(qd);
483	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
484	struct ieee80211_hw *hw = &local->hw;
485
486	if (cl - 1 > QD_NUM(hw))
487		return -ENOENT;
488	tcm->tcm_handle = TC_H_MIN(cl);
489	tcm->tcm_parent = qd->handle;
490	tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
491	return 0;
492}
493
494
495static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
496{
497	struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr);
498	struct ieee80211_hw *hw = &local->hw;
499	int queue;
500
501	if (arg->stop)
502		return;
503
504	for (queue = 0; queue < QD_NUM(hw); queue++) {
505		if (arg->count < arg->skip) {
506			arg->count++;
507			continue;
508		}
509		/* we should return classids for our internal queues here
510		 * as well as the external ones */
511		if (arg->fn(qd, queue+1, arg) < 0) {
512			arg->stop = 1;
513			break;
514		}
515		arg->count++;
516	}
517}
518
519
520static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
521						unsigned long cl)
522{
523	struct ieee80211_sched_data *q = qdisc_priv(qd);
524
525	if (cl)
526		return NULL;
527
528	return &q->filter_list;
529}
530
531
532/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
533 * - these are the operations on the classes */
534static const struct Qdisc_class_ops class_ops =
535{
536	.graft = wme_classop_graft,
537	.leaf = wme_classop_leaf,
538
539	.get = wme_classop_get,
540	.put = wme_classop_put,
541	.change = wme_classop_change,
542	.delete = wme_classop_delete,
543	.walk = wme_classop_walk,
544
545	.tcf_chain = wme_classop_find_tcf,
546	.bind_tcf = wme_classop_bind,
547	.unbind_tcf = wme_classop_put,
548
549	.dump = wme_classop_dump_class,
550};
551
552
553/* queueing discipline operations */
554static struct Qdisc_ops wme_qdisc_ops __read_mostly =
555{
556	.next = NULL,
557	.cl_ops = &class_ops,
558	.id = "ieee80211",
559	.priv_size = sizeof(struct ieee80211_sched_data),
560
561	.enqueue = wme_qdiscop_enqueue,
562	.dequeue = wme_qdiscop_dequeue,
563	.requeue = wme_qdiscop_requeue,
564	.drop = NULL, /* drop not needed since we are always the root qdisc */
565
566	.init = wme_qdiscop_init,
567	.reset = wme_qdiscop_reset,
568	.destroy = wme_qdiscop_destroy,
569	.change = wme_qdiscop_tune,
570
571	.dump = wme_qdiscop_dump,
572};
573
574
575void ieee80211_install_qdisc(struct net_device *dev)
576{
577	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
578	struct Qdisc *qdisc;
579
580	qdisc = qdisc_create_dflt(dev, txq,
581				  &wme_qdisc_ops, TC_H_ROOT);
582	if (!qdisc) {
583		printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
584		return;
585	}
586
587	/* same handle as would be allocated by qdisc_alloc_handle() */
588	qdisc->handle = 0x80010000;
589
590	qdisc_lock_tree(dev);
591	list_add_tail(&qdisc->list, &txq->qdisc_list);
592	txq->qdisc_sleeping = qdisc;
593	qdisc_unlock_tree(dev);
594}
595
596
597int ieee80211_qdisc_installed(struct net_device *dev)
598{
599	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
600
601	return txq->qdisc_sleeping->ops == &wme_qdisc_ops;
602}
603
604
605int ieee80211_wme_register(void)
606{
607	return register_qdisc(&wme_qdisc_ops);
608}
609
610
611void ieee80211_wme_unregister(void)
612{
613	unregister_qdisc(&wme_qdisc_ops);
614}
615
616int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
617			struct sta_info *sta, u16 tid)
618{
619	int i;
620	struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
621	struct ieee80211_sched_data *q =
622			qdisc_priv(txq->qdisc_sleeping);
623	DECLARE_MAC_BUF(mac);
624
625	/* prepare the filter and save it for the SW queue
626	 * matching the received HW queue */
627
628	if (!local->hw.ampdu_queues)
629		return -EPERM;
630
631	/* try to get a Qdisc from the pool */
632	for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
633		if (!test_and_set_bit(i, q->qdisc_pool)) {
634			ieee80211_stop_queue(local_to_hw(local), i);
635			sta->tid_to_tx_q[tid] = i;
636
637			/* IF there are already pending packets
638			 * on this tid first we need to drain them
639			 * on the previous queue
640			 * since HT is strict in order */
641#ifdef CONFIG_MAC80211_HT_DEBUG
642			if (net_ratelimit())
643				printk(KERN_DEBUG "allocated aggregation queue"
644					" %d tid %d addr %s pool=0x%lX\n",
645					i, tid, print_mac(mac, sta->addr),
646					q->qdisc_pool[0]);
647#endif /* CONFIG_MAC80211_HT_DEBUG */
648			return 0;
649		}
650
651	return -EAGAIN;
652}
653
654/**
655 * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
656 */
657void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
658				   struct sta_info *sta, u16 tid,
659				   u8 requeue)
660{
661	struct ieee80211_hw *hw = &local->hw;
662	struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
663	struct ieee80211_sched_data *q =
664		qdisc_priv(txq->qdisc_sleeping);
665	int agg_queue = sta->tid_to_tx_q[tid];
666
667	/* return the qdisc to the pool */
668	clear_bit(agg_queue, q->qdisc_pool);
669	sta->tid_to_tx_q[tid] = QD_NUM(hw);
670
671	if (requeue)
672		ieee80211_requeue(local, agg_queue);
673	else
674		q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
675}
676
677void ieee80211_requeue(struct ieee80211_local *local, int queue)
678{
679	struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, 0);
680	struct Qdisc *root_qd = txq->qdisc_sleeping;
681	struct ieee80211_sched_data *q = qdisc_priv(root_qd);
682	struct Qdisc *qdisc = q->queues[queue];
683	struct sk_buff *skb = NULL;
684	u32 len;
685
686	if (!qdisc || !qdisc->dequeue)
687		return;
688
689	for (len = qdisc->q.qlen; len > 0; len--) {
690		skb = qdisc->dequeue(qdisc);
691		root_qd->q.qlen--;
692		/* packet will be classified again and */
693		/* skb->packet_data->queue will be overridden if needed */
694		if (skb)
695			wme_qdiscop_enqueue(skb, root_qd);
696	}
697}
698