1/*
2 * net/sched/sch_netem.c	Network emulator
3 *
4 * 		This program is free software; you can redistribute it and/or
5 * 		modify it under the terms of the GNU General Public License
6 * 		as published by the Free Software Foundation; either version
7 * 		2 of the License.
8 *
9 *  		Many of the algorithms and ideas for this came from
10 *		NIST Net which is not copyrighted.
11 *
12 * Authors:	Stephen Hemminger <shemminger@osdl.org>
13 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/skbuff.h>
23#include <linux/vmalloc.h>
24#include <linux/rtnetlink.h>
25#include <linux/reciprocal_div.h>
26#include <linux/rbtree.h>
27
28#include <net/netlink.h>
29#include <net/pkt_sched.h>
30#include <net/inet_ecn.h>
31
32#define VERSION "1.3"
33
34/*	Network Emulation Queuing algorithm.
35	====================================
36
37	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38		 Network Emulation Tool
39		 [2] Luigi Rizzo, DummyNet for FreeBSD
40
41	 ----------------------------------------------------------------
42
43	 This started out as a simple way to delay outgoing packets to
44	 test TCP but has grown to include most of the functionality
45	 of a full blown network emulator like NISTnet. It can delay
46	 packets and add random jitter (and correlation). The random
47	 distribution can be loaded from a table as well to provide
48	 normal, Pareto, or experimental curves. Packet loss,
49	 duplication, and reordering can also be emulated.
50
51	 This qdisc does not do classification that can be handled in
52	 layering other disciplines.  It does not need to do bandwidth
53	 control either since that can be handled by using token
54	 bucket or other rate control.
55
56     Correlated Loss Generator models
57
58	Added generation of correlated loss according to the
59	"Gilbert-Elliot" model, a 4-state markov model.
60
61	References:
62	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64	and intuitive loss model for packet networks and its implementation
65	in the Netem module in the Linux kernel", available in [1]
66
67	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68		 Fabio Ludovici <fabio.ludovici at yahoo.it>
69*/
70
71struct netem_sched_data {
72	/* internal t(ime)fifo qdisc uses t_root and sch->limit */
73	struct rb_root t_root;
74
75	/* optional qdisc for classful handling (NULL at netem init) */
76	struct Qdisc	*qdisc;
77
78	struct qdisc_watchdog watchdog;
79
80	psched_tdiff_t latency;
81	psched_tdiff_t jitter;
82
83	u32 loss;
84	u32 ecn;
85	u32 limit;
86	u32 counter;
87	u32 gap;
88	u32 duplicate;
89	u32 reorder;
90	u32 corrupt;
91	u64 rate;
92	s32 packet_overhead;
93	u32 cell_size;
94	struct reciprocal_value cell_size_reciprocal;
95	s32 cell_overhead;
96
97	struct crndstate {
98		u32 last;
99		u32 rho;
100	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
101
102	struct disttable {
103		u32  size;
104		s16 table[0];
105	} *delay_dist;
106
107	enum  {
108		CLG_RANDOM,
109		CLG_4_STATES,
110		CLG_GILB_ELL,
111	} loss_model;
112
113	enum {
114		TX_IN_GAP_PERIOD = 1,
115		TX_IN_BURST_PERIOD,
116		LOST_IN_GAP_PERIOD,
117		LOST_IN_BURST_PERIOD,
118	} _4_state_model;
119
120	enum {
121		GOOD_STATE = 1,
122		BAD_STATE,
123	} GE_state_model;
124
125	/* Correlated Loss Generation models */
126	struct clgstate {
127		/* state of the Markov chain */
128		u8 state;
129
130		/* 4-states and Gilbert-Elliot models */
131		u32 a1;	/* p13 for 4-states or p for GE */
132		u32 a2;	/* p31 for 4-states or r for GE */
133		u32 a3;	/* p32 for 4-states or h for GE */
134		u32 a4;	/* p14 for 4-states or 1-k for GE */
135		u32 a5; /* p23 used only in 4-states */
136	} clg;
137
138};
139
140/* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
142 */
143struct netem_skb_cb {
144	psched_time_t	time_to_send;
145	ktime_t		tstamp_save;
146};
147
148/* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
149 * to hold a rb_node structure.
150 *
151 * If struct sk_buff layout is changed, the following checks will complain.
152 */
153static struct rb_node *netem_rb_node(struct sk_buff *skb)
154{
155	BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
156	BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
157		     offsetof(struct sk_buff, next) + sizeof(skb->next));
158	BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
159		     offsetof(struct sk_buff, prev) + sizeof(skb->prev));
160	BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
161					      sizeof(skb->prev) +
162					      sizeof(skb->tstamp));
163	return (struct rb_node *)&skb->next;
164}
165
166static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
167{
168	return (struct sk_buff *)rb;
169}
170
171static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
172{
173	/* we assume we can use skb next/prev/tstamp as storage for rb_node */
174	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
175	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
176}
177
178/* init_crandom - initialize correlated random number generator
179 * Use entropy source for initial seed.
180 */
181static void init_crandom(struct crndstate *state, unsigned long rho)
182{
183	state->rho = rho;
184	state->last = prandom_u32();
185}
186
187/* get_crandom - correlated random number generator
188 * Next number depends on last value.
189 * rho is scaled to avoid floating point.
190 */
191static u32 get_crandom(struct crndstate *state)
192{
193	u64 value, rho;
194	unsigned long answer;
195
196	if (state->rho == 0)	/* no correlation */
197		return prandom_u32();
198
199	value = prandom_u32();
200	rho = (u64)state->rho + 1;
201	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
202	state->last = answer;
203	return answer;
204}
205
206/* loss_4state - 4-state model loss generator
207 * Generates losses according to the 4-state Markov chain adopted in
208 * the GI (General and Intuitive) loss model.
209 */
210static bool loss_4state(struct netem_sched_data *q)
211{
212	struct clgstate *clg = &q->clg;
213	u32 rnd = prandom_u32();
214
215	/*
216	 * Makes a comparison between rnd and the transition
217	 * probabilities outgoing from the current state, then decides the
218	 * next state and if the next packet has to be transmitted or lost.
219	 * The four states correspond to:
220	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
221	 *   LOST_IN_BURST_PERIOD => isolated losses within a gap period
222	 *   LOST_IN_GAP_PERIOD => lost packets within a burst period
223	 *   TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
224	 */
225	switch (clg->state) {
226	case TX_IN_GAP_PERIOD:
227		if (rnd < clg->a4) {
228			clg->state = LOST_IN_BURST_PERIOD;
229			return true;
230		} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
231			clg->state = LOST_IN_GAP_PERIOD;
232			return true;
233		} else if (clg->a1 + clg->a4 < rnd) {
234			clg->state = TX_IN_GAP_PERIOD;
235		}
236
237		break;
238	case TX_IN_BURST_PERIOD:
239		if (rnd < clg->a5) {
240			clg->state = LOST_IN_GAP_PERIOD;
241			return true;
242		} else {
243			clg->state = TX_IN_BURST_PERIOD;
244		}
245
246		break;
247	case LOST_IN_GAP_PERIOD:
248		if (rnd < clg->a3)
249			clg->state = TX_IN_BURST_PERIOD;
250		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
251			clg->state = TX_IN_GAP_PERIOD;
252		} else if (clg->a2 + clg->a3 < rnd) {
253			clg->state = LOST_IN_GAP_PERIOD;
254			return true;
255		}
256		break;
257	case LOST_IN_BURST_PERIOD:
258		clg->state = TX_IN_GAP_PERIOD;
259		break;
260	}
261
262	return false;
263}
264
265/* loss_gilb_ell - Gilbert-Elliot model loss generator
266 * Generates losses according to the Gilbert-Elliot loss model or
267 * its special cases  (Gilbert or Simple Gilbert)
268 *
269 * Makes a comparison between random number and the transition
270 * probabilities outgoing from the current state, then decides the
271 * next state. A second random number is extracted and the comparison
272 * with the loss probability of the current state decides if the next
273 * packet will be transmitted or lost.
274 */
275static bool loss_gilb_ell(struct netem_sched_data *q)
276{
277	struct clgstate *clg = &q->clg;
278
279	switch (clg->state) {
280	case GOOD_STATE:
281		if (prandom_u32() < clg->a1)
282			clg->state = BAD_STATE;
283		if (prandom_u32() < clg->a4)
284			return true;
285		break;
286	case BAD_STATE:
287		if (prandom_u32() < clg->a2)
288			clg->state = GOOD_STATE;
289		if (prandom_u32() > clg->a3)
290			return true;
291	}
292
293	return false;
294}
295
296static bool loss_event(struct netem_sched_data *q)
297{
298	switch (q->loss_model) {
299	case CLG_RANDOM:
300		/* Random packet drop 0 => none, ~0 => all */
301		return q->loss && q->loss >= get_crandom(&q->loss_cor);
302
303	case CLG_4_STATES:
304		/* 4state loss model algorithm (used also for GI model)
305		* Extracts a value from the markov 4 state loss generator,
306		* if it is 1 drops a packet and if needed writes the event in
307		* the kernel logs
308		*/
309		return loss_4state(q);
310
311	case CLG_GILB_ELL:
312		/* Gilbert-Elliot loss model algorithm
313		* Extracts a value from the Gilbert-Elliot loss generator,
314		* if it is 1 drops a packet and if needed writes the event in
315		* the kernel logs
316		*/
317		return loss_gilb_ell(q);
318	}
319
320	return false;	/* not reached */
321}
322
323
324/* tabledist - return a pseudo-randomly distributed value with mean mu and
325 * std deviation sigma.  Uses table lookup to approximate the desired
326 * distribution, and a uniformly-distributed pseudo-random source.
327 */
328static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
329				struct crndstate *state,
330				const struct disttable *dist)
331{
332	psched_tdiff_t x;
333	long t;
334	u32 rnd;
335
336	if (sigma == 0)
337		return mu;
338
339	rnd = get_crandom(state);
340
341	/* default uniform distribution */
342	if (dist == NULL)
343		return (rnd % (2*sigma)) - sigma + mu;
344
345	t = dist->table[rnd % dist->size];
346	x = (sigma % NETEM_DIST_SCALE) * t;
347	if (x >= 0)
348		x += NETEM_DIST_SCALE/2;
349	else
350		x -= NETEM_DIST_SCALE/2;
351
352	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
353}
354
355static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
356{
357	u64 ticks;
358
359	len += q->packet_overhead;
360
361	if (q->cell_size) {
362		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
363
364		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
365			cells++;
366		len = cells * (q->cell_size + q->cell_overhead);
367	}
368
369	ticks = (u64)len * NSEC_PER_SEC;
370
371	do_div(ticks, q->rate);
372	return PSCHED_NS2TICKS(ticks);
373}
374
375static void tfifo_reset(struct Qdisc *sch)
376{
377	struct netem_sched_data *q = qdisc_priv(sch);
378	struct rb_node *p;
379
380	while ((p = rb_first(&q->t_root))) {
381		struct sk_buff *skb = netem_rb_to_skb(p);
382
383		rb_erase(p, &q->t_root);
384		skb->next = NULL;
385		skb->prev = NULL;
386		kfree_skb(skb);
387	}
388}
389
390static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
391{
392	struct netem_sched_data *q = qdisc_priv(sch);
393	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
394	struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
395
396	while (*p) {
397		struct sk_buff *skb;
398
399		parent = *p;
400		skb = netem_rb_to_skb(parent);
401		if (tnext >= netem_skb_cb(skb)->time_to_send)
402			p = &parent->rb_right;
403		else
404			p = &parent->rb_left;
405	}
406	rb_link_node(netem_rb_node(nskb), parent, p);
407	rb_insert_color(netem_rb_node(nskb), &q->t_root);
408	sch->q.qlen++;
409}
410
411/*
412 * Insert one skb into qdisc.
413 * Note: parent depends on return value to account for queue length.
414 * 	NET_XMIT_DROP: queue length didn't change.
415 *      NET_XMIT_SUCCESS: one skb was queued.
416 */
417static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
418{
419	struct netem_sched_data *q = qdisc_priv(sch);
420	/* We don't fill cb now as skb_unshare() may invalidate it */
421	struct netem_skb_cb *cb;
422	struct sk_buff *skb2;
423	int count = 1;
424
425	/* Random duplication */
426	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
427		++count;
428
429	/* Drop packet? */
430	if (loss_event(q)) {
431		if (q->ecn && INET_ECN_set_ce(skb))
432			qdisc_qstats_drop(sch); /* mark packet */
433		else
434			--count;
435	}
436	if (count == 0) {
437		qdisc_qstats_drop(sch);
438		kfree_skb(skb);
439		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
440	}
441
442	/* If a delay is expected, orphan the skb. (orphaning usually takes
443	 * place at TX completion time, so _before_ the link transit delay)
444	 */
445	if (q->latency || q->jitter)
446		skb_orphan_partial(skb);
447
448	/*
449	 * If we need to duplicate packet, then re-insert at top of the
450	 * qdisc tree, since parent queuer expects that only one
451	 * skb will be queued.
452	 */
453	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
454		struct Qdisc *rootq = qdisc_root(sch);
455		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
456		q->duplicate = 0;
457
458		qdisc_enqueue_root(skb2, rootq);
459		q->duplicate = dupsave;
460	}
461
462	/*
463	 * Randomized packet corruption.
464	 * Make copy if needed since we are modifying
465	 * If packet is going to be hardware checksummed, then
466	 * do it now in software before we mangle it.
467	 */
468	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
469		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
470		    (skb->ip_summed == CHECKSUM_PARTIAL &&
471		     skb_checksum_help(skb)))
472			return qdisc_drop(skb, sch);
473
474		skb->data[prandom_u32() % skb_headlen(skb)] ^=
475			1<<(prandom_u32() % 8);
476	}
477
478	if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
479		return qdisc_reshape_fail(skb, sch);
480
481	qdisc_qstats_backlog_inc(sch, skb);
482
483	cb = netem_skb_cb(skb);
484	if (q->gap == 0 ||		/* not doing reordering */
485	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
486	    q->reorder < get_crandom(&q->reorder_cor)) {
487		psched_time_t now;
488		psched_tdiff_t delay;
489
490		delay = tabledist(q->latency, q->jitter,
491				  &q->delay_cor, q->delay_dist);
492
493		now = psched_get_time();
494
495		if (q->rate) {
496			struct sk_buff *last;
497
498			if (!skb_queue_empty(&sch->q))
499				last = skb_peek_tail(&sch->q);
500			else
501				last = netem_rb_to_skb(rb_last(&q->t_root));
502			if (last) {
503				/*
504				 * Last packet in queue is reference point (now),
505				 * calculate this time bonus and subtract
506				 * from delay.
507				 */
508				delay -= netem_skb_cb(last)->time_to_send - now;
509				delay = max_t(psched_tdiff_t, 0, delay);
510				now = netem_skb_cb(last)->time_to_send;
511			}
512
513			delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
514		}
515
516		cb->time_to_send = now + delay;
517		cb->tstamp_save = skb->tstamp;
518		++q->counter;
519		tfifo_enqueue(skb, sch);
520	} else {
521		/*
522		 * Do re-ordering by putting one out of N packets at the front
523		 * of the queue.
524		 */
525		cb->time_to_send = psched_get_time();
526		q->counter = 0;
527
528		__skb_queue_head(&sch->q, skb);
529		sch->qstats.requeues++;
530	}
531
532	return NET_XMIT_SUCCESS;
533}
534
535static unsigned int netem_drop(struct Qdisc *sch)
536{
537	struct netem_sched_data *q = qdisc_priv(sch);
538	unsigned int len;
539
540	len = qdisc_queue_drop(sch);
541
542	if (!len) {
543		struct rb_node *p = rb_first(&q->t_root);
544
545		if (p) {
546			struct sk_buff *skb = netem_rb_to_skb(p);
547
548			rb_erase(p, &q->t_root);
549			sch->q.qlen--;
550			skb->next = NULL;
551			skb->prev = NULL;
552			qdisc_qstats_backlog_dec(sch, skb);
553			kfree_skb(skb);
554		}
555	}
556	if (!len && q->qdisc && q->qdisc->ops->drop)
557	    len = q->qdisc->ops->drop(q->qdisc);
558	if (len)
559		qdisc_qstats_drop(sch);
560
561	return len;
562}
563
564static struct sk_buff *netem_dequeue(struct Qdisc *sch)
565{
566	struct netem_sched_data *q = qdisc_priv(sch);
567	struct sk_buff *skb;
568	struct rb_node *p;
569
570	if (qdisc_is_throttled(sch))
571		return NULL;
572
573tfifo_dequeue:
574	skb = __skb_dequeue(&sch->q);
575	if (skb) {
576deliver:
577		qdisc_qstats_backlog_dec(sch, skb);
578		qdisc_unthrottled(sch);
579		qdisc_bstats_update(sch, skb);
580		return skb;
581	}
582	p = rb_first(&q->t_root);
583	if (p) {
584		psched_time_t time_to_send;
585
586		skb = netem_rb_to_skb(p);
587
588		/* if more time remaining? */
589		time_to_send = netem_skb_cb(skb)->time_to_send;
590		if (time_to_send <= psched_get_time()) {
591			rb_erase(p, &q->t_root);
592
593			sch->q.qlen--;
594			skb->next = NULL;
595			skb->prev = NULL;
596			skb->tstamp = netem_skb_cb(skb)->tstamp_save;
597
598#ifdef CONFIG_NET_CLS_ACT
599			/*
600			 * If it's at ingress let's pretend the delay is
601			 * from the network (tstamp will be updated).
602			 */
603			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
604				skb->tstamp.tv64 = 0;
605#endif
606
607			if (q->qdisc) {
608				int err = qdisc_enqueue(skb, q->qdisc);
609
610				if (unlikely(err != NET_XMIT_SUCCESS)) {
611					if (net_xmit_drop_count(err)) {
612						qdisc_qstats_drop(sch);
613						qdisc_tree_decrease_qlen(sch, 1);
614					}
615				}
616				goto tfifo_dequeue;
617			}
618			goto deliver;
619		}
620
621		if (q->qdisc) {
622			skb = q->qdisc->ops->dequeue(q->qdisc);
623			if (skb)
624				goto deliver;
625		}
626		qdisc_watchdog_schedule(&q->watchdog, time_to_send);
627	}
628
629	if (q->qdisc) {
630		skb = q->qdisc->ops->dequeue(q->qdisc);
631		if (skb)
632			goto deliver;
633	}
634	return NULL;
635}
636
637static void netem_reset(struct Qdisc *sch)
638{
639	struct netem_sched_data *q = qdisc_priv(sch);
640
641	qdisc_reset_queue(sch);
642	tfifo_reset(sch);
643	if (q->qdisc)
644		qdisc_reset(q->qdisc);
645	qdisc_watchdog_cancel(&q->watchdog);
646}
647
648static void dist_free(struct disttable *d)
649{
650	kvfree(d);
651}
652
653/*
654 * Distribution data is a variable size payload containing
655 * signed 16 bit values.
656 */
657static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
658{
659	struct netem_sched_data *q = qdisc_priv(sch);
660	size_t n = nla_len(attr)/sizeof(__s16);
661	const __s16 *data = nla_data(attr);
662	spinlock_t *root_lock;
663	struct disttable *d;
664	int i;
665	size_t s;
666
667	if (n > NETEM_DIST_MAX)
668		return -EINVAL;
669
670	s = sizeof(struct disttable) + n * sizeof(s16);
671	d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
672	if (!d)
673		d = vmalloc(s);
674	if (!d)
675		return -ENOMEM;
676
677	d->size = n;
678	for (i = 0; i < n; i++)
679		d->table[i] = data[i];
680
681	root_lock = qdisc_root_sleeping_lock(sch);
682
683	spin_lock_bh(root_lock);
684	swap(q->delay_dist, d);
685	spin_unlock_bh(root_lock);
686
687	dist_free(d);
688	return 0;
689}
690
691static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
692{
693	const struct tc_netem_corr *c = nla_data(attr);
694
695	init_crandom(&q->delay_cor, c->delay_corr);
696	init_crandom(&q->loss_cor, c->loss_corr);
697	init_crandom(&q->dup_cor, c->dup_corr);
698}
699
700static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
701{
702	const struct tc_netem_reorder *r = nla_data(attr);
703
704	q->reorder = r->probability;
705	init_crandom(&q->reorder_cor, r->correlation);
706}
707
708static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
709{
710	const struct tc_netem_corrupt *r = nla_data(attr);
711
712	q->corrupt = r->probability;
713	init_crandom(&q->corrupt_cor, r->correlation);
714}
715
716static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
717{
718	const struct tc_netem_rate *r = nla_data(attr);
719
720	q->rate = r->rate;
721	q->packet_overhead = r->packet_overhead;
722	q->cell_size = r->cell_size;
723	q->cell_overhead = r->cell_overhead;
724	if (q->cell_size)
725		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
726	else
727		q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
728}
729
730static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
731{
732	const struct nlattr *la;
733	int rem;
734
735	nla_for_each_nested(la, attr, rem) {
736		u16 type = nla_type(la);
737
738		switch (type) {
739		case NETEM_LOSS_GI: {
740			const struct tc_netem_gimodel *gi = nla_data(la);
741
742			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
743				pr_info("netem: incorrect gi model size\n");
744				return -EINVAL;
745			}
746
747			q->loss_model = CLG_4_STATES;
748
749			q->clg.state = TX_IN_GAP_PERIOD;
750			q->clg.a1 = gi->p13;
751			q->clg.a2 = gi->p31;
752			q->clg.a3 = gi->p32;
753			q->clg.a4 = gi->p14;
754			q->clg.a5 = gi->p23;
755			break;
756		}
757
758		case NETEM_LOSS_GE: {
759			const struct tc_netem_gemodel *ge = nla_data(la);
760
761			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
762				pr_info("netem: incorrect ge model size\n");
763				return -EINVAL;
764			}
765
766			q->loss_model = CLG_GILB_ELL;
767			q->clg.state = GOOD_STATE;
768			q->clg.a1 = ge->p;
769			q->clg.a2 = ge->r;
770			q->clg.a3 = ge->h;
771			q->clg.a4 = ge->k1;
772			break;
773		}
774
775		default:
776			pr_info("netem: unknown loss type %u\n", type);
777			return -EINVAL;
778		}
779	}
780
781	return 0;
782}
783
784static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
785	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
786	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
787	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
788	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
789	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
790	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
791	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
792};
793
794static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
795		      const struct nla_policy *policy, int len)
796{
797	int nested_len = nla_len(nla) - NLA_ALIGN(len);
798
799	if (nested_len < 0) {
800		pr_info("netem: invalid attributes len %d\n", nested_len);
801		return -EINVAL;
802	}
803
804	if (nested_len >= nla_attr_size(0))
805		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
806				 nested_len, policy);
807
808	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
809	return 0;
810}
811
812/* Parse netlink message to set options */
813static int netem_change(struct Qdisc *sch, struct nlattr *opt)
814{
815	struct netem_sched_data *q = qdisc_priv(sch);
816	struct nlattr *tb[TCA_NETEM_MAX + 1];
817	struct tc_netem_qopt *qopt;
818	struct clgstate old_clg;
819	int old_loss_model = CLG_RANDOM;
820	int ret;
821
822	if (opt == NULL)
823		return -EINVAL;
824
825	qopt = nla_data(opt);
826	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
827	if (ret < 0)
828		return ret;
829
830	/* backup q->clg and q->loss_model */
831	old_clg = q->clg;
832	old_loss_model = q->loss_model;
833
834	if (tb[TCA_NETEM_LOSS]) {
835		ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
836		if (ret) {
837			q->loss_model = old_loss_model;
838			return ret;
839		}
840	} else {
841		q->loss_model = CLG_RANDOM;
842	}
843
844	if (tb[TCA_NETEM_DELAY_DIST]) {
845		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
846		if (ret) {
847			/* recover clg and loss_model, in case of
848			 * q->clg and q->loss_model were modified
849			 * in get_loss_clg()
850			 */
851			q->clg = old_clg;
852			q->loss_model = old_loss_model;
853			return ret;
854		}
855	}
856
857	sch->limit = qopt->limit;
858
859	q->latency = qopt->latency;
860	q->jitter = qopt->jitter;
861	q->limit = qopt->limit;
862	q->gap = qopt->gap;
863	q->counter = 0;
864	q->loss = qopt->loss;
865	q->duplicate = qopt->duplicate;
866
867	/* for compatibility with earlier versions.
868	 * if gap is set, need to assume 100% probability
869	 */
870	if (q->gap)
871		q->reorder = ~0;
872
873	if (tb[TCA_NETEM_CORR])
874		get_correlation(q, tb[TCA_NETEM_CORR]);
875
876	if (tb[TCA_NETEM_REORDER])
877		get_reorder(q, tb[TCA_NETEM_REORDER]);
878
879	if (tb[TCA_NETEM_CORRUPT])
880		get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
881
882	if (tb[TCA_NETEM_RATE])
883		get_rate(q, tb[TCA_NETEM_RATE]);
884
885	if (tb[TCA_NETEM_RATE64])
886		q->rate = max_t(u64, q->rate,
887				nla_get_u64(tb[TCA_NETEM_RATE64]));
888
889	if (tb[TCA_NETEM_ECN])
890		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
891
892	return ret;
893}
894
895static int netem_init(struct Qdisc *sch, struct nlattr *opt)
896{
897	struct netem_sched_data *q = qdisc_priv(sch);
898	int ret;
899
900	if (!opt)
901		return -EINVAL;
902
903	qdisc_watchdog_init(&q->watchdog, sch);
904
905	q->loss_model = CLG_RANDOM;
906	ret = netem_change(sch, opt);
907	if (ret)
908		pr_info("netem: change failed\n");
909	return ret;
910}
911
912static void netem_destroy(struct Qdisc *sch)
913{
914	struct netem_sched_data *q = qdisc_priv(sch);
915
916	qdisc_watchdog_cancel(&q->watchdog);
917	if (q->qdisc)
918		qdisc_destroy(q->qdisc);
919	dist_free(q->delay_dist);
920}
921
922static int dump_loss_model(const struct netem_sched_data *q,
923			   struct sk_buff *skb)
924{
925	struct nlattr *nest;
926
927	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
928	if (nest == NULL)
929		goto nla_put_failure;
930
931	switch (q->loss_model) {
932	case CLG_RANDOM:
933		/* legacy loss model */
934		nla_nest_cancel(skb, nest);
935		return 0;	/* no data */
936
937	case CLG_4_STATES: {
938		struct tc_netem_gimodel gi = {
939			.p13 = q->clg.a1,
940			.p31 = q->clg.a2,
941			.p32 = q->clg.a3,
942			.p14 = q->clg.a4,
943			.p23 = q->clg.a5,
944		};
945
946		if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
947			goto nla_put_failure;
948		break;
949	}
950	case CLG_GILB_ELL: {
951		struct tc_netem_gemodel ge = {
952			.p = q->clg.a1,
953			.r = q->clg.a2,
954			.h = q->clg.a3,
955			.k1 = q->clg.a4,
956		};
957
958		if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
959			goto nla_put_failure;
960		break;
961	}
962	}
963
964	nla_nest_end(skb, nest);
965	return 0;
966
967nla_put_failure:
968	nla_nest_cancel(skb, nest);
969	return -1;
970}
971
972static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
973{
974	const struct netem_sched_data *q = qdisc_priv(sch);
975	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
976	struct tc_netem_qopt qopt;
977	struct tc_netem_corr cor;
978	struct tc_netem_reorder reorder;
979	struct tc_netem_corrupt corrupt;
980	struct tc_netem_rate rate;
981
982	qopt.latency = q->latency;
983	qopt.jitter = q->jitter;
984	qopt.limit = q->limit;
985	qopt.loss = q->loss;
986	qopt.gap = q->gap;
987	qopt.duplicate = q->duplicate;
988	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
989		goto nla_put_failure;
990
991	cor.delay_corr = q->delay_cor.rho;
992	cor.loss_corr = q->loss_cor.rho;
993	cor.dup_corr = q->dup_cor.rho;
994	if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
995		goto nla_put_failure;
996
997	reorder.probability = q->reorder;
998	reorder.correlation = q->reorder_cor.rho;
999	if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1000		goto nla_put_failure;
1001
1002	corrupt.probability = q->corrupt;
1003	corrupt.correlation = q->corrupt_cor.rho;
1004	if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1005		goto nla_put_failure;
1006
1007	if (q->rate >= (1ULL << 32)) {
1008		if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
1009			goto nla_put_failure;
1010		rate.rate = ~0U;
1011	} else {
1012		rate.rate = q->rate;
1013	}
1014	rate.packet_overhead = q->packet_overhead;
1015	rate.cell_size = q->cell_size;
1016	rate.cell_overhead = q->cell_overhead;
1017	if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1018		goto nla_put_failure;
1019
1020	if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1021		goto nla_put_failure;
1022
1023	if (dump_loss_model(q, skb) != 0)
1024		goto nla_put_failure;
1025
1026	return nla_nest_end(skb, nla);
1027
1028nla_put_failure:
1029	nlmsg_trim(skb, nla);
1030	return -1;
1031}
1032
1033static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1034			  struct sk_buff *skb, struct tcmsg *tcm)
1035{
1036	struct netem_sched_data *q = qdisc_priv(sch);
1037
1038	if (cl != 1 || !q->qdisc) 	/* only one class */
1039		return -ENOENT;
1040
1041	tcm->tcm_handle |= TC_H_MIN(1);
1042	tcm->tcm_info = q->qdisc->handle;
1043
1044	return 0;
1045}
1046
1047static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1048		     struct Qdisc **old)
1049{
1050	struct netem_sched_data *q = qdisc_priv(sch);
1051
1052	sch_tree_lock(sch);
1053	*old = q->qdisc;
1054	q->qdisc = new;
1055	if (*old) {
1056		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1057		qdisc_reset(*old);
1058	}
1059	sch_tree_unlock(sch);
1060
1061	return 0;
1062}
1063
1064static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1065{
1066	struct netem_sched_data *q = qdisc_priv(sch);
1067	return q->qdisc;
1068}
1069
1070static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1071{
1072	return 1;
1073}
1074
1075static void netem_put(struct Qdisc *sch, unsigned long arg)
1076{
1077}
1078
1079static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1080{
1081	if (!walker->stop) {
1082		if (walker->count >= walker->skip)
1083			if (walker->fn(sch, 1, walker) < 0) {
1084				walker->stop = 1;
1085				return;
1086			}
1087		walker->count++;
1088	}
1089}
1090
1091static const struct Qdisc_class_ops netem_class_ops = {
1092	.graft		=	netem_graft,
1093	.leaf		=	netem_leaf,
1094	.get		=	netem_get,
1095	.put		=	netem_put,
1096	.walk		=	netem_walk,
1097	.dump		=	netem_dump_class,
1098};
1099
1100static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1101	.id		=	"netem",
1102	.cl_ops		=	&netem_class_ops,
1103	.priv_size	=	sizeof(struct netem_sched_data),
1104	.enqueue	=	netem_enqueue,
1105	.dequeue	=	netem_dequeue,
1106	.peek		=	qdisc_peek_dequeued,
1107	.drop		=	netem_drop,
1108	.init		=	netem_init,
1109	.reset		=	netem_reset,
1110	.destroy	=	netem_destroy,
1111	.change		=	netem_change,
1112	.dump		=	netem_dump,
1113	.owner		=	THIS_MODULE,
1114};
1115
1116
1117static int __init netem_module_init(void)
1118{
1119	pr_info("netem: version " VERSION "\n");
1120	return register_qdisc(&netem_qdisc_ops);
1121}
1122static void __exit netem_module_exit(void)
1123{
1124	unregister_qdisc(&netem_qdisc_ops);
1125}
1126module_init(netem_module_init)
1127module_exit(netem_module_exit)
1128MODULE_LICENSE("GPL");
1129