sch_gred.c revision 0da974f4f303a6842516b764507e3c0a03f41e5a
1/*
2 * net/sched/sch_gred.c	Generic Random Early Detection queue.
3 *
4 *
5 *              This program is free software; you can redistribute it and/or
6 *              modify it under the terms of the GNU General Public License
7 *              as published by the Free Software Foundation; either version
8 *              2 of the License, or (at your option) any later version.
9 *
10 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
11 *
12 *             991129: -  Bug fix with grio mode
13 *		       - a better sing. AvgQ mode with Grio(WRED)
14 *		       - A finer grained VQ dequeue based on sugestion
15 *		         from Ren Liu
16 *		       - More error checks
17 *
18 *  For all the glorious comments look at include/net/red.h
19 */
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/netdevice.h>
25#include <linux/skbuff.h>
26#include <net/pkt_sched.h>
27#include <net/red.h>
28
29#define GRED_DEF_PRIO (MAX_DPs / 2)
30#define GRED_VQ_MASK (MAX_DPs - 1)
31
32struct gred_sched_data;
33struct gred_sched;
34
35struct gred_sched_data
36{
37	u32		limit;		/* HARD maximal queue length	*/
38	u32      	DP;		/* the drop pramaters */
39	u32		bytesin;	/* bytes seen on virtualQ so far*/
40	u32		packetsin;	/* packets seen on virtualQ so far*/
41	u32		backlog;	/* bytes on the virtualQ */
42	u8		prio;		/* the prio of this vq */
43
44	struct red_parms parms;
45	struct red_stats stats;
46};
47
48enum {
49	GRED_WRED_MODE = 1,
50	GRED_RIO_MODE,
51};
52
53struct gred_sched
54{
55	struct gred_sched_data *tab[MAX_DPs];
56	unsigned long	flags;
57	u32		red_flags;
58	u32 		DPs;
59	u32 		def;
60	struct red_parms wred_set;
61};
62
63static inline int gred_wred_mode(struct gred_sched *table)
64{
65	return test_bit(GRED_WRED_MODE, &table->flags);
66}
67
68static inline void gred_enable_wred_mode(struct gred_sched *table)
69{
70	__set_bit(GRED_WRED_MODE, &table->flags);
71}
72
73static inline void gred_disable_wred_mode(struct gred_sched *table)
74{
75	__clear_bit(GRED_WRED_MODE, &table->flags);
76}
77
78static inline int gred_rio_mode(struct gred_sched *table)
79{
80	return test_bit(GRED_RIO_MODE, &table->flags);
81}
82
83static inline void gred_enable_rio_mode(struct gred_sched *table)
84{
85	__set_bit(GRED_RIO_MODE, &table->flags);
86}
87
88static inline void gred_disable_rio_mode(struct gred_sched *table)
89{
90	__clear_bit(GRED_RIO_MODE, &table->flags);
91}
92
93static inline int gred_wred_mode_check(struct Qdisc *sch)
94{
95	struct gred_sched *table = qdisc_priv(sch);
96	int i;
97
98	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
99	for (i = 0; i < table->DPs; i++) {
100		struct gred_sched_data *q = table->tab[i];
101		int n;
102
103		if (q == NULL)
104			continue;
105
106		for (n = 0; n < table->DPs; n++)
107			if (table->tab[n] && table->tab[n] != q &&
108			    table->tab[n]->prio == q->prio)
109				return 1;
110	}
111
112	return 0;
113}
114
115static inline unsigned int gred_backlog(struct gred_sched *table,
116					struct gred_sched_data *q,
117					struct Qdisc *sch)
118{
119	if (gred_wred_mode(table))
120		return sch->qstats.backlog;
121	else
122		return q->backlog;
123}
124
125static inline u16 tc_index_to_dp(struct sk_buff *skb)
126{
127	return skb->tc_index & GRED_VQ_MASK;
128}
129
130static inline void gred_load_wred_set(struct gred_sched *table,
131				      struct gred_sched_data *q)
132{
133	q->parms.qavg = table->wred_set.qavg;
134	q->parms.qidlestart = table->wred_set.qidlestart;
135}
136
137static inline void gred_store_wred_set(struct gred_sched *table,
138				       struct gred_sched_data *q)
139{
140	table->wred_set.qavg = q->parms.qavg;
141}
142
143static inline int gred_use_ecn(struct gred_sched *t)
144{
145	return t->red_flags & TC_RED_ECN;
146}
147
148static inline int gred_use_harddrop(struct gred_sched *t)
149{
150	return t->red_flags & TC_RED_HARDDROP;
151}
152
153static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
154{
155	struct gred_sched_data *q=NULL;
156	struct gred_sched *t= qdisc_priv(sch);
157	unsigned long qavg = 0;
158	u16 dp = tc_index_to_dp(skb);
159
160	if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
161		dp = t->def;
162
163		if ((q = t->tab[dp]) == NULL) {
164			/* Pass through packets not assigned to a DP
165			 * if no default DP has been configured. This
166			 * allows for DP flows to be left untouched.
167			 */
168			if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len)
169				return qdisc_enqueue_tail(skb, sch);
170			else
171				goto drop;
172		}
173
174		/* fix tc_index? --could be controvesial but needed for
175		   requeueing */
176		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
177	}
178
179	/* sum up all the qaves of prios <= to ours to get the new qave */
180	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
181		int i;
182
183		for (i = 0; i < t->DPs; i++) {
184			if (t->tab[i] && t->tab[i]->prio < q->prio &&
185			    !red_is_idling(&t->tab[i]->parms))
186				qavg +=t->tab[i]->parms.qavg;
187		}
188
189	}
190
191	q->packetsin++;
192	q->bytesin += skb->len;
193
194	if (gred_wred_mode(t))
195		gred_load_wred_set(t, q);
196
197	q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
198
199	if (red_is_idling(&q->parms))
200		red_end_of_idle_period(&q->parms);
201
202	if (gred_wred_mode(t))
203		gred_store_wred_set(t, q);
204
205	switch (red_action(&q->parms, q->parms.qavg + qavg)) {
206		case RED_DONT_MARK:
207			break;
208
209		case RED_PROB_MARK:
210			sch->qstats.overlimits++;
211			if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
212				q->stats.prob_drop++;
213				goto congestion_drop;
214			}
215
216			q->stats.prob_mark++;
217			break;
218
219		case RED_HARD_MARK:
220			sch->qstats.overlimits++;
221			if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
222			    !INET_ECN_set_ce(skb)) {
223				q->stats.forced_drop++;
224				goto congestion_drop;
225			}
226			q->stats.forced_mark++;
227			break;
228	}
229
230	if (q->backlog + skb->len <= q->limit) {
231		q->backlog += skb->len;
232		return qdisc_enqueue_tail(skb, sch);
233	}
234
235	q->stats.pdrop++;
236drop:
237	return qdisc_drop(skb, sch);
238
239congestion_drop:
240	qdisc_drop(skb, sch);
241	return NET_XMIT_CN;
242}
243
244static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
245{
246	struct gred_sched *t = qdisc_priv(sch);
247	struct gred_sched_data *q;
248	u16 dp = tc_index_to_dp(skb);
249
250	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
251		if (net_ratelimit())
252			printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x "
253			       "for requeue, screwing up backlog.\n",
254			       tc_index_to_dp(skb));
255	} else {
256		if (red_is_idling(&q->parms))
257			red_end_of_idle_period(&q->parms);
258		q->backlog += skb->len;
259	}
260
261	return qdisc_requeue(skb, sch);
262}
263
264static struct sk_buff *gred_dequeue(struct Qdisc* sch)
265{
266	struct sk_buff *skb;
267	struct gred_sched *t = qdisc_priv(sch);
268
269	skb = qdisc_dequeue_head(sch);
270
271	if (skb) {
272		struct gred_sched_data *q;
273		u16 dp = tc_index_to_dp(skb);
274
275		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
276			if (net_ratelimit())
277				printk(KERN_WARNING "GRED: Unable to relocate "
278				       "VQ 0x%x after dequeue, screwing up "
279				       "backlog.\n", tc_index_to_dp(skb));
280		} else {
281			q->backlog -= skb->len;
282
283			if (!q->backlog && !gred_wred_mode(t))
284				red_start_of_idle_period(&q->parms);
285		}
286
287		return skb;
288	}
289
290	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
291		red_start_of_idle_period(&t->wred_set);
292
293	return NULL;
294}
295
296static unsigned int gred_drop(struct Qdisc* sch)
297{
298	struct sk_buff *skb;
299	struct gred_sched *t = qdisc_priv(sch);
300
301	skb = qdisc_dequeue_tail(sch);
302	if (skb) {
303		unsigned int len = skb->len;
304		struct gred_sched_data *q;
305		u16 dp = tc_index_to_dp(skb);
306
307		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
308			if (net_ratelimit())
309				printk(KERN_WARNING "GRED: Unable to relocate "
310				       "VQ 0x%x while dropping, screwing up "
311				       "backlog.\n", tc_index_to_dp(skb));
312		} else {
313			q->backlog -= len;
314			q->stats.other++;
315
316			if (!q->backlog && !gred_wred_mode(t))
317				red_start_of_idle_period(&q->parms);
318		}
319
320		qdisc_drop(skb, sch);
321		return len;
322	}
323
324	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
325		red_start_of_idle_period(&t->wred_set);
326
327	return 0;
328
329}
330
331static void gred_reset(struct Qdisc* sch)
332{
333	int i;
334	struct gred_sched *t = qdisc_priv(sch);
335
336	qdisc_reset_queue(sch);
337
338        for (i = 0; i < t->DPs; i++) {
339		struct gred_sched_data *q = t->tab[i];
340
341		if (!q)
342			continue;
343
344		red_restart(&q->parms);
345		q->backlog = 0;
346	}
347}
348
349static inline void gred_destroy_vq(struct gred_sched_data *q)
350{
351	kfree(q);
352}
353
354static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
355{
356	struct gred_sched *table = qdisc_priv(sch);
357	struct tc_gred_sopt *sopt;
358	int i;
359
360	if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
361		return -EINVAL;
362
363	sopt = RTA_DATA(dps);
364
365	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
366		return -EINVAL;
367
368	sch_tree_lock(sch);
369	table->DPs = sopt->DPs;
370	table->def = sopt->def_DP;
371	table->red_flags = sopt->flags;
372
373	/*
374	 * Every entry point to GRED is synchronized with the above code
375	 * and the DP is checked against DPs, i.e. shadowed VQs can no
376	 * longer be found so we can unlock right here.
377	 */
378	sch_tree_unlock(sch);
379
380	if (sopt->grio) {
381		gred_enable_rio_mode(table);
382		gred_disable_wred_mode(table);
383		if (gred_wred_mode_check(sch))
384			gred_enable_wred_mode(table);
385	} else {
386		gred_disable_rio_mode(table);
387		gred_disable_wred_mode(table);
388	}
389
390	for (i = table->DPs; i < MAX_DPs; i++) {
391		if (table->tab[i]) {
392			printk(KERN_WARNING "GRED: Warning: Destroying "
393			       "shadowed VQ 0x%x\n", i);
394			gred_destroy_vq(table->tab[i]);
395			table->tab[i] = NULL;
396  		}
397	}
398
399	return 0;
400}
401
402static inline int gred_change_vq(struct Qdisc *sch, int dp,
403				 struct tc_gred_qopt *ctl, int prio, u8 *stab)
404{
405	struct gred_sched *table = qdisc_priv(sch);
406	struct gred_sched_data *q;
407
408	if (table->tab[dp] == NULL) {
409		table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
410		if (table->tab[dp] == NULL)
411			return -ENOMEM;
412	}
413
414	q = table->tab[dp];
415	q->DP = dp;
416	q->prio = prio;
417	q->limit = ctl->limit;
418
419	if (q->backlog == 0)
420		red_end_of_idle_period(&q->parms);
421
422	red_set_parms(&q->parms,
423		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
424		      ctl->Scell_log, stab);
425
426	return 0;
427}
428
429static int gred_change(struct Qdisc *sch, struct rtattr *opt)
430{
431	struct gred_sched *table = qdisc_priv(sch);
432	struct tc_gred_qopt *ctl;
433	struct rtattr *tb[TCA_GRED_MAX];
434	int err = -EINVAL, prio = GRED_DEF_PRIO;
435	u8 *stab;
436
437	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
438		return -EINVAL;
439
440	if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
441		return gred_change_table_def(sch, opt);
442
443	if (tb[TCA_GRED_PARMS-1] == NULL ||
444	    RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
445	    tb[TCA_GRED_STAB-1] == NULL ||
446	    RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
447		return -EINVAL;
448
449	ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
450	stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
451
452	if (ctl->DP >= table->DPs)
453		goto errout;
454
455	if (gred_rio_mode(table)) {
456		if (ctl->prio == 0) {
457			int def_prio = GRED_DEF_PRIO;
458
459			if (table->tab[table->def])
460				def_prio = table->tab[table->def]->prio;
461
462			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
463			       "setting default to %d\n", ctl->DP, def_prio);
464
465			prio = def_prio;
466		} else
467			prio = ctl->prio;
468	}
469
470	sch_tree_lock(sch);
471
472	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
473	if (err < 0)
474		goto errout_locked;
475
476	if (gred_rio_mode(table)) {
477		gred_disable_wred_mode(table);
478		if (gred_wred_mode_check(sch))
479			gred_enable_wred_mode(table);
480	}
481
482	err = 0;
483
484errout_locked:
485	sch_tree_unlock(sch);
486errout:
487	return err;
488}
489
490static int gred_init(struct Qdisc *sch, struct rtattr *opt)
491{
492	struct rtattr *tb[TCA_GRED_MAX];
493
494	if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
495		return -EINVAL;
496
497	if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
498		return -EINVAL;
499
500	return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
501}
502
503static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
504{
505	struct gred_sched *table = qdisc_priv(sch);
506	struct rtattr *parms, *opts = NULL;
507	int i;
508	struct tc_gred_sopt sopt = {
509		.DPs	= table->DPs,
510		.def_DP	= table->def,
511		.grio	= gred_rio_mode(table),
512		.flags	= table->red_flags,
513	};
514
515	opts = RTA_NEST(skb, TCA_OPTIONS);
516	RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
517	parms = RTA_NEST(skb, TCA_GRED_PARMS);
518
519	for (i = 0; i < MAX_DPs; i++) {
520		struct gred_sched_data *q = table->tab[i];
521		struct tc_gred_qopt opt;
522
523		memset(&opt, 0, sizeof(opt));
524
525		if (!q) {
526			/* hack -- fix at some point with proper message
527			   This is how we indicate to tc that there is no VQ
528			   at this DP */
529
530			opt.DP = MAX_DPs + i;
531			goto append_opt;
532		}
533
534		opt.limit	= q->limit;
535		opt.DP		= q->DP;
536		opt.backlog	= q->backlog;
537		opt.prio	= q->prio;
538		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
539		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
540		opt.Wlog	= q->parms.Wlog;
541		opt.Plog	= q->parms.Plog;
542		opt.Scell_log	= q->parms.Scell_log;
543		opt.other	= q->stats.other;
544		opt.early	= q->stats.prob_drop;
545		opt.forced	= q->stats.forced_drop;
546		opt.pdrop	= q->stats.pdrop;
547		opt.packets	= q->packetsin;
548		opt.bytesin	= q->bytesin;
549
550		if (gred_wred_mode(table)) {
551			q->parms.qidlestart =
552				table->tab[table->def]->parms.qidlestart;
553			q->parms.qavg = table->tab[table->def]->parms.qavg;
554		}
555
556		opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
557
558append_opt:
559		RTA_APPEND(skb, sizeof(opt), &opt);
560	}
561
562	RTA_NEST_END(skb, parms);
563
564	return RTA_NEST_END(skb, opts);
565
566rtattr_failure:
567	return RTA_NEST_CANCEL(skb, opts);
568}
569
570static void gred_destroy(struct Qdisc *sch)
571{
572	struct gred_sched *table = qdisc_priv(sch);
573	int i;
574
575	for (i = 0; i < table->DPs; i++) {
576		if (table->tab[i])
577			gred_destroy_vq(table->tab[i]);
578	}
579}
580
581static struct Qdisc_ops gred_qdisc_ops = {
582	.id		=	"gred",
583	.priv_size	=	sizeof(struct gred_sched),
584	.enqueue	=	gred_enqueue,
585	.dequeue	=	gred_dequeue,
586	.requeue	=	gred_requeue,
587	.drop		=	gred_drop,
588	.init		=	gred_init,
589	.reset		=	gred_reset,
590	.destroy	=	gred_destroy,
591	.change		=	gred_change,
592	.dump		=	gred_dump,
593	.owner		=	THIS_MODULE,
594};
595
596static int __init gred_module_init(void)
597{
598	return register_qdisc(&gred_qdisc_ops);
599}
600
601static void __exit gred_module_exit(void)
602{
603	unregister_qdisc(&gred_qdisc_ops);
604}
605
606module_init(gred_module_init)
607module_exit(gred_module_exit)
608
609MODULE_LICENSE("GPL");
610