wme.c revision 238f74a227fd7de8ea1bc66dcbbd36cf9920d1cb
1/*
2 * Copyright 2004, Instant802 Networks, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/module.h>
12#include <linux/if_arp.h>
13#include <linux/types.h>
14#include <net/ip.h>
15#include <net/pkt_sched.h>
16
17#include <net/mac80211.h>
18#include "ieee80211_i.h"
19#include "wme.h"
20
21/* maximum number of hardware queues we support. */
22#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
23/* current number of hardware queues we support. */
24#define QD_NUM(hw) ((hw)->queues + (hw)->ampdu_queues)
25
26/*
27 * Default mapping in classifier to work with default
28 * queue setup.
29 */
30const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
31
32struct ieee80211_sched_data
33{
34	unsigned long qdisc_pool[BITS_TO_LONGS(QD_MAX_QUEUES)];
35	struct tcf_proto *filter_list;
36	struct Qdisc *queues[QD_MAX_QUEUES];
37	struct sk_buff_head requeued[QD_MAX_QUEUES];
38};
39
40static const char llc_ip_hdr[8] = {0xAA, 0xAA, 0x3, 0, 0, 0, 0x08, 0};
41
42/* given a data frame determine the 802.1p/1d tag to use */
43static inline unsigned classify_1d(struct sk_buff *skb, struct Qdisc *qd)
44{
45	struct iphdr *ip;
46	int dscp;
47	int offset;
48
49	struct ieee80211_sched_data *q = qdisc_priv(qd);
50	struct tcf_result res = { -1, 0 };
51
52	/* if there is a user set filter list, call out to that */
53	if (q->filter_list) {
54		tc_classify(skb, q->filter_list, &res);
55		if (res.class != -1)
56			return res.class;
57	}
58
59	/* skb->priority values from 256->263 are magic values to
60	 * directly indicate a specific 802.1d priority.
61	 * This is used to allow 802.1d priority to be passed directly in
62	 * from VLAN tags, etc. */
63	if (skb->priority >= 256 && skb->priority <= 263)
64		return skb->priority - 256;
65
66	/* check there is a valid IP header present */
67	offset = ieee80211_get_hdrlen_from_skb(skb);
68	if (skb->len < offset + sizeof(llc_ip_hdr) + sizeof(*ip) ||
69	    memcmp(skb->data + offset, llc_ip_hdr, sizeof(llc_ip_hdr)))
70		return 0;
71
72	ip = (struct iphdr *) (skb->data + offset + sizeof(llc_ip_hdr));
73
74	dscp = ip->tos & 0xfc;
75	if (dscp & 0x1c)
76		return 0;
77	return dscp >> 5;
78}
79
80
81static inline int wme_downgrade_ac(struct sk_buff *skb)
82{
83	switch (skb->priority) {
84	case 6:
85	case 7:
86		skb->priority = 5; /* VO -> VI */
87		return 0;
88	case 4:
89	case 5:
90		skb->priority = 3; /* VI -> BE */
91		return 0;
92	case 0:
93	case 3:
94		skb->priority = 2; /* BE -> BK */
95		return 0;
96	default:
97		return -1;
98	}
99}
100
101
102/* positive return value indicates which queue to use
103 * negative return value indicates to drop the frame */
104static int classify80211(struct sk_buff *skb, struct Qdisc *qd)
105{
106	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
107	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
108
109	if (!ieee80211_is_data(hdr->frame_control)) {
110		/* management frames go on AC_VO queue, but are sent
111		* without QoS control fields */
112		return 0;
113	}
114
115	if (0 /* injected */) {
116		/* use AC from radiotap */
117	}
118
119	if (!ieee80211_is_data_qos(hdr->frame_control)) {
120		skb->priority = 0; /* required for correct WPA/11i MIC */
121		return ieee802_1d_to_ac[skb->priority];
122	}
123
124	/* use the data classifier to determine what 802.1d tag the
125	 * data frame has */
126	skb->priority = classify_1d(skb, qd);
127
128	/* in case we are a client verify acm is not set for this ac */
129	while (unlikely(local->wmm_acm & BIT(skb->priority))) {
130		if (wme_downgrade_ac(skb)) {
131			/* No AC with lower priority has acm=0, drop packet. */
132			return -1;
133		}
134	}
135
136	/* look up which queue to use for frames with this 1d tag */
137	return ieee802_1d_to_ac[skb->priority];
138}
139
140
141static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd)
142{
143	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
144	struct ieee80211_hw *hw = &local->hw;
145	struct ieee80211_sched_data *q = qdisc_priv(qd);
146	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
147	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
148	struct Qdisc *qdisc;
149	struct sta_info *sta;
150	int err, queue;
151	u8 tid;
152
153	if (info->flags & IEEE80211_TX_CTL_REQUEUE) {
154		queue = skb_get_queue_mapping(skb);
155		rcu_read_lock();
156		sta = sta_info_get(local, hdr->addr1);
157		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
158		if (sta) {
159			int ampdu_queue = sta->tid_to_tx_q[tid];
160			if ((ampdu_queue < QD_NUM(hw)) &&
161			    test_bit(ampdu_queue, q->qdisc_pool)) {
162				queue = ampdu_queue;
163				info->flags |= IEEE80211_TX_CTL_AMPDU;
164			} else {
165				info->flags &= ~IEEE80211_TX_CTL_AMPDU;
166			}
167		}
168		rcu_read_unlock();
169		skb_queue_tail(&q->requeued[queue], skb);
170		qd->q.qlen++;
171		return 0;
172	}
173
174	queue = classify80211(skb, qd);
175
176	if (unlikely(queue >= local->hw.queues))
177		queue = local->hw.queues - 1;
178
179	/* now we know the 1d priority, fill in the QoS header if there is one
180	 */
181	if (ieee80211_is_data_qos(hdr->frame_control)) {
182		u8 *p = ieee80211_get_qos_ctl(hdr);
183		u8 ack_policy = 0;
184		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
185		if (local->wifi_wme_noack_test)
186			ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
187					QOS_CONTROL_ACK_POLICY_SHIFT;
188		/* qos header is 2 bytes, second reserved */
189		*p++ = ack_policy | tid;
190		*p = 0;
191
192		rcu_read_lock();
193
194		sta = sta_info_get(local, hdr->addr1);
195		if (sta) {
196			int ampdu_queue = sta->tid_to_tx_q[tid];
197			if ((ampdu_queue < QD_NUM(hw)) &&
198			    test_bit(ampdu_queue, q->qdisc_pool)) {
199				queue = ampdu_queue;
200				info->flags |= IEEE80211_TX_CTL_AMPDU;
201			} else {
202				info->flags &= ~IEEE80211_TX_CTL_AMPDU;
203			}
204		}
205
206		rcu_read_unlock();
207	}
208
209	if (unlikely(queue < 0)) {
210			kfree_skb(skb);
211			err = NET_XMIT_DROP;
212	} else {
213		skb_set_queue_mapping(skb, queue);
214		qdisc = q->queues[queue];
215		err = qdisc->enqueue(skb, qdisc);
216		if (err == NET_XMIT_SUCCESS) {
217			qd->q.qlen++;
218			qd->bstats.bytes += skb->len;
219			qd->bstats.packets++;
220			return NET_XMIT_SUCCESS;
221		}
222	}
223	qd->qstats.drops++;
224	return err;
225}
226
227
228/* TODO: clean up the cases where master_hard_start_xmit
229 * returns non 0 - it shouldn't ever do that. Once done we
230 * can remove this function */
231static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd)
232{
233	struct ieee80211_sched_data *q = qdisc_priv(qd);
234	struct Qdisc *qdisc;
235	int err;
236
237	/* we recorded which queue to use earlier! */
238	qdisc = q->queues[skb_get_queue_mapping(skb)];
239
240	if ((err = qdisc->ops->requeue(skb, qdisc)) == 0) {
241		qd->q.qlen++;
242		return 0;
243	}
244	qd->qstats.drops++;
245	return err;
246}
247
248
249static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd)
250{
251	struct ieee80211_sched_data *q = qdisc_priv(qd);
252	struct net_device *dev = qd->dev;
253	struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
254	struct ieee80211_hw *hw = &local->hw;
255	struct sk_buff *skb;
256	struct Qdisc *qdisc;
257	int queue;
258
259	/* check all the h/w queues in numeric/priority order */
260	for (queue = 0; queue < QD_NUM(hw); queue++) {
261		/* see if there is room in this hardware queue */
262		if (__netif_subqueue_stopped(local->mdev, queue) ||
263		    !test_bit(queue, q->qdisc_pool))
264			continue;
265
266		/* there is space - try and get a frame */
267		skb = skb_dequeue(&q->requeued[queue]);
268		if (skb) {
269			qd->q.qlen--;
270			return skb;
271		}
272
273		qdisc = q->queues[queue];
274		skb = qdisc->dequeue(qdisc);
275		if (skb) {
276			qd->q.qlen--;
277			return skb;
278		}
279	}
280	/* returning a NULL here when all the h/w queues are full means we
281	 * never need to call netif_stop_queue in the driver */
282	return NULL;
283}
284
285
286static void wme_qdiscop_reset(struct Qdisc* qd)
287{
288	struct ieee80211_sched_data *q = qdisc_priv(qd);
289	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
290	struct ieee80211_hw *hw = &local->hw;
291	int queue;
292
293	/* QUESTION: should we have some hardware flush functionality here? */
294
295	for (queue = 0; queue < QD_NUM(hw); queue++) {
296		skb_queue_purge(&q->requeued[queue]);
297		qdisc_reset(q->queues[queue]);
298	}
299	qd->q.qlen = 0;
300}
301
302
303static void wme_qdiscop_destroy(struct Qdisc* qd)
304{
305	struct ieee80211_sched_data *q = qdisc_priv(qd);
306	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
307	struct ieee80211_hw *hw = &local->hw;
308	int queue;
309
310	tcf_destroy_chain(&q->filter_list);
311
312	for (queue = 0; queue < QD_NUM(hw); queue++) {
313		skb_queue_purge(&q->requeued[queue]);
314		qdisc_destroy(q->queues[queue]);
315		q->queues[queue] = &noop_qdisc;
316	}
317}
318
319
320/* called whenever parameters are updated on existing qdisc */
321static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt)
322{
323	return 0;
324}
325
326
327/* called during initial creation of qdisc on device */
328static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt)
329{
330	struct ieee80211_sched_data *q = qdisc_priv(qd);
331	struct net_device *dev = qd->dev;
332	struct ieee80211_local *local;
333	struct ieee80211_hw *hw;
334	int err = 0, i;
335
336	/* check that device is a mac80211 device */
337	if (!dev->ieee80211_ptr ||
338	    dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
339		return -EINVAL;
340
341	local = wdev_priv(dev->ieee80211_ptr);
342	hw = &local->hw;
343
344	/* only allow on master dev */
345	if (dev != local->mdev)
346		return -EINVAL;
347
348	/* ensure that we are root qdisc */
349	if (qd->parent != TC_H_ROOT)
350		return -EINVAL;
351
352	if (qd->flags & TCQ_F_INGRESS)
353		return -EINVAL;
354
355	/* if options were passed in, set them */
356	if (opt)
357		err = wme_qdiscop_tune(qd, opt);
358
359	/* create child queues */
360	for (i = 0; i < QD_NUM(hw); i++) {
361		skb_queue_head_init(&q->requeued[i]);
362		q->queues[i] = qdisc_create_dflt(qd->dev, &pfifo_qdisc_ops,
363						 qd->handle);
364		if (!q->queues[i]) {
365			q->queues[i] = &noop_qdisc;
366			printk(KERN_ERR "%s child qdisc %i creation failed\n",
367			       dev->name, i);
368		}
369	}
370
371	/* non-aggregation queues: reserve/mark as used */
372	for (i = 0; i < local->hw.queues; i++)
373		set_bit(i, q->qdisc_pool);
374
375	return err;
376}
377
378static int wme_qdiscop_dump(struct Qdisc *qd, struct sk_buff *skb)
379{
380	return -1;
381}
382
383
384static int wme_classop_graft(struct Qdisc *qd, unsigned long arg,
385			     struct Qdisc *new, struct Qdisc **old)
386{
387	struct ieee80211_sched_data *q = qdisc_priv(qd);
388	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
389	struct ieee80211_hw *hw = &local->hw;
390	unsigned long queue = arg - 1;
391
392	if (queue >= QD_NUM(hw))
393		return -EINVAL;
394
395	if (!new)
396		new = &noop_qdisc;
397
398	sch_tree_lock(qd);
399	*old = q->queues[queue];
400	q->queues[queue] = new;
401	qdisc_reset(*old);
402	sch_tree_unlock(qd);
403
404	return 0;
405}
406
407
408static struct Qdisc *
409wme_classop_leaf(struct Qdisc *qd, unsigned long arg)
410{
411	struct ieee80211_sched_data *q = qdisc_priv(qd);
412	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
413	struct ieee80211_hw *hw = &local->hw;
414	unsigned long queue = arg - 1;
415
416	if (queue >= QD_NUM(hw))
417		return NULL;
418
419	return q->queues[queue];
420}
421
422
423static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid)
424{
425	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
426	struct ieee80211_hw *hw = &local->hw;
427	unsigned long queue = TC_H_MIN(classid);
428
429	if (queue - 1 >= QD_NUM(hw))
430		return 0;
431
432	return queue;
433}
434
435
436static unsigned long wme_classop_bind(struct Qdisc *qd, unsigned long parent,
437				      u32 classid)
438{
439	return wme_classop_get(qd, classid);
440}
441
442
443static void wme_classop_put(struct Qdisc *q, unsigned long cl)
444{
445}
446
447
448static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent,
449			      struct nlattr **tca, unsigned long *arg)
450{
451	unsigned long cl = *arg;
452	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
453	struct ieee80211_hw *hw = &local->hw;
454
455	if (cl - 1 > QD_NUM(hw))
456		return -ENOENT;
457
458	/* TODO: put code to program hardware queue parameters here,
459	 * to allow programming from tc command line */
460
461	return 0;
462}
463
464
465/* we don't support deleting hardware queues
466 * when we add WMM-SA support - TSPECs may be deleted here */
467static int wme_classop_delete(struct Qdisc *qd, unsigned long cl)
468{
469	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
470	struct ieee80211_hw *hw = &local->hw;
471
472	if (cl - 1 > QD_NUM(hw))
473		return -ENOENT;
474	return 0;
475}
476
477
478static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl,
479				  struct sk_buff *skb, struct tcmsg *tcm)
480{
481	struct ieee80211_sched_data *q = qdisc_priv(qd);
482	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
483	struct ieee80211_hw *hw = &local->hw;
484
485	if (cl - 1 > QD_NUM(hw))
486		return -ENOENT;
487	tcm->tcm_handle = TC_H_MIN(cl);
488	tcm->tcm_parent = qd->handle;
489	tcm->tcm_info = q->queues[cl-1]->handle; /* do we need this? */
490	return 0;
491}
492
493
494static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg)
495{
496	struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr);
497	struct ieee80211_hw *hw = &local->hw;
498	int queue;
499
500	if (arg->stop)
501		return;
502
503	for (queue = 0; queue < QD_NUM(hw); queue++) {
504		if (arg->count < arg->skip) {
505			arg->count++;
506			continue;
507		}
508		/* we should return classids for our internal queues here
509		 * as well as the external ones */
510		if (arg->fn(qd, queue+1, arg) < 0) {
511			arg->stop = 1;
512			break;
513		}
514		arg->count++;
515	}
516}
517
518
519static struct tcf_proto ** wme_classop_find_tcf(struct Qdisc *qd,
520						unsigned long cl)
521{
522	struct ieee80211_sched_data *q = qdisc_priv(qd);
523
524	if (cl)
525		return NULL;
526
527	return &q->filter_list;
528}
529
530
531/* this qdisc is classful (i.e. has classes, some of which may have leaf qdiscs attached)
532 * - these are the operations on the classes */
533static const struct Qdisc_class_ops class_ops =
534{
535	.graft = wme_classop_graft,
536	.leaf = wme_classop_leaf,
537
538	.get = wme_classop_get,
539	.put = wme_classop_put,
540	.change = wme_classop_change,
541	.delete = wme_classop_delete,
542	.walk = wme_classop_walk,
543
544	.tcf_chain = wme_classop_find_tcf,
545	.bind_tcf = wme_classop_bind,
546	.unbind_tcf = wme_classop_put,
547
548	.dump = wme_classop_dump_class,
549};
550
551
552/* queueing discipline operations */
553static struct Qdisc_ops wme_qdisc_ops __read_mostly =
554{
555	.next = NULL,
556	.cl_ops = &class_ops,
557	.id = "ieee80211",
558	.priv_size = sizeof(struct ieee80211_sched_data),
559
560	.enqueue = wme_qdiscop_enqueue,
561	.dequeue = wme_qdiscop_dequeue,
562	.requeue = wme_qdiscop_requeue,
563	.drop = NULL, /* drop not needed since we are always the root qdisc */
564
565	.init = wme_qdiscop_init,
566	.reset = wme_qdiscop_reset,
567	.destroy = wme_qdiscop_destroy,
568	.change = wme_qdiscop_tune,
569
570	.dump = wme_qdiscop_dump,
571};
572
573
574void ieee80211_install_qdisc(struct net_device *dev)
575{
576	struct Qdisc *qdisc;
577
578	qdisc = qdisc_create_dflt(dev, &wme_qdisc_ops, TC_H_ROOT);
579	if (!qdisc) {
580		printk(KERN_ERR "%s: qdisc installation failed\n", dev->name);
581		return;
582	}
583
584	/* same handle as would be allocated by qdisc_alloc_handle() */
585	qdisc->handle = 0x80010000;
586
587	qdisc_lock_tree(dev);
588	list_add_tail(&qdisc->list, &dev->qdisc_list);
589	dev->qdisc_sleeping = qdisc;
590	qdisc_unlock_tree(dev);
591}
592
593
594int ieee80211_qdisc_installed(struct net_device *dev)
595{
596	return dev->qdisc_sleeping->ops == &wme_qdisc_ops;
597}
598
599
600int ieee80211_wme_register(void)
601{
602	return register_qdisc(&wme_qdisc_ops);
603}
604
605
606void ieee80211_wme_unregister(void)
607{
608	unregister_qdisc(&wme_qdisc_ops);
609}
610
611int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
612			struct sta_info *sta, u16 tid)
613{
614	int i;
615	struct ieee80211_sched_data *q =
616			qdisc_priv(local->mdev->qdisc_sleeping);
617	DECLARE_MAC_BUF(mac);
618
619	/* prepare the filter and save it for the SW queue
620	 * matching the received HW queue */
621
622	if (!local->hw.ampdu_queues)
623		return -EPERM;
624
625	/* try to get a Qdisc from the pool */
626	for (i = local->hw.queues; i < QD_NUM(&local->hw); i++)
627		if (!test_and_set_bit(i, q->qdisc_pool)) {
628			ieee80211_stop_queue(local_to_hw(local), i);
629			sta->tid_to_tx_q[tid] = i;
630
631			/* IF there are already pending packets
632			 * on this tid first we need to drain them
633			 * on the previous queue
634			 * since HT is strict in order */
635#ifdef CONFIG_MAC80211_HT_DEBUG
636			if (net_ratelimit())
637				printk(KERN_DEBUG "allocated aggregation queue"
638					" %d tid %d addr %s pool=0x%lX\n",
639					i, tid, print_mac(mac, sta->addr),
640					q->qdisc_pool[0]);
641#endif /* CONFIG_MAC80211_HT_DEBUG */
642			return 0;
643		}
644
645	return -EAGAIN;
646}
647
648/**
649 * the caller needs to hold local->mdev->queue_lock
650 */
651void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
652				   struct sta_info *sta, u16 tid,
653				   u8 requeue)
654{
655	struct ieee80211_hw *hw = &local->hw;
656	struct ieee80211_sched_data *q =
657		qdisc_priv(local->mdev->qdisc_sleeping);
658	int agg_queue = sta->tid_to_tx_q[tid];
659
660	/* return the qdisc to the pool */
661	clear_bit(agg_queue, q->qdisc_pool);
662	sta->tid_to_tx_q[tid] = QD_NUM(hw);
663
664	if (requeue)
665		ieee80211_requeue(local, agg_queue);
666	else
667		q->queues[agg_queue]->ops->reset(q->queues[agg_queue]);
668}
669
670void ieee80211_requeue(struct ieee80211_local *local, int queue)
671{
672	struct Qdisc *root_qd = local->mdev->qdisc_sleeping;
673	struct ieee80211_sched_data *q = qdisc_priv(root_qd);
674	struct Qdisc *qdisc = q->queues[queue];
675	struct sk_buff *skb = NULL;
676	u32 len;
677
678	if (!qdisc || !qdisc->dequeue)
679		return;
680
681	for (len = qdisc->q.qlen; len > 0; len--) {
682		skb = qdisc->dequeue(qdisc);
683		root_qd->q.qlen--;
684		/* packet will be classified again and */
685		/* skb->packet_data->queue will be overridden if needed */
686		if (skb)
687			wme_qdiscop_enqueue(skb, root_qd);
688	}
689}
690