1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __LINUX_PKT_SCHED_H
3#define __LINUX_PKT_SCHED_H
4
5#include <linux/types.h>
6
7/* Logical priority bands not depending on specific packet scheduler.
8   Every scheduler will map them to real traffic classes, if it has
9   no more precise mechanism to classify packets.
10
11   These numbers have no special meaning, though their coincidence
12   with obsolete IPv6 values is not occasional :-). New IPv6 drafts
13   preferred full anarchy inspired by diffserv group.
14
15   Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
16   class, actually, as rule it will be handled with more care than
17   filler or even bulk.
18 */
19
20#define TC_PRIO_BESTEFFORT		0
21#define TC_PRIO_FILLER			1
22#define TC_PRIO_BULK			2
23#define TC_PRIO_INTERACTIVE_BULK	4
24#define TC_PRIO_INTERACTIVE		6
25#define TC_PRIO_CONTROL			7
26
27#define TC_PRIO_MAX			15
28
29/* Generic queue statistics, available for all the elements.
30   Particular schedulers may have also their private records.
31 */
32
33struct tc_stats {
34	__u64	bytes;			/* Number of enqueued bytes */
35	__u32	packets;		/* Number of enqueued packets	*/
36	__u32	drops;			/* Packets dropped because of lack of resources */
37	__u32	overlimits;		/* Number of throttle events when this
38					 * flow goes out of allocated bandwidth */
39	__u32	bps;			/* Current flow byte rate */
40	__u32	pps;			/* Current flow packet rate */
41	__u32	qlen;
42	__u32	backlog;
43};
44
45struct tc_estimator {
46	signed char	interval;
47	unsigned char	ewma_log;
48};
49
50/* "Handles"
51   ---------
52
53    All the traffic control objects have 32bit identifiers, or "handles".
54
55    They can be considered as opaque numbers from user API viewpoint,
56    but actually they always consist of two fields: major and
57    minor numbers, which are interpreted by kernel specially,
58    that may be used by applications, though not recommended.
59
60    F.e. qdisc handles always have minor number equal to zero,
61    classes (or flows) have major equal to parent qdisc major, and
62    minor uniquely identifying class inside qdisc.
63
64    Macros to manipulate handles:
65 */
66
67#define TC_H_MAJ_MASK (0xFFFF0000U)
68#define TC_H_MIN_MASK (0x0000FFFFU)
69#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
70#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
71#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
72
73#define TC_H_UNSPEC	(0U)
74#define TC_H_ROOT	(0xFFFFFFFFU)
75#define TC_H_INGRESS    (0xFFFFFFF1U)
76#define TC_H_CLSACT	TC_H_INGRESS
77
78#define TC_H_MIN_PRIORITY	0xFFE0U
79#define TC_H_MIN_INGRESS	0xFFF2U
80#define TC_H_MIN_EGRESS		0xFFF3U
81
82/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
83enum tc_link_layer {
84	TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
85	TC_LINKLAYER_ETHERNET,
86	TC_LINKLAYER_ATM,
87};
88#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
89
90struct tc_ratespec {
91	unsigned char	cell_log;
92	__u8		linklayer; /* lower 4 bits */
93	unsigned short	overhead;
94	short		cell_align;
95	unsigned short	mpu;
96	__u32		rate;
97};
98
99#define TC_RTAB_SIZE	1024
100
101struct tc_sizespec {
102	unsigned char	cell_log;
103	unsigned char	size_log;
104	short		cell_align;
105	int		overhead;
106	unsigned int	linklayer;
107	unsigned int	mpu;
108	unsigned int	mtu;
109	unsigned int	tsize;
110};
111
112enum {
113	TCA_STAB_UNSPEC,
114	TCA_STAB_BASE,
115	TCA_STAB_DATA,
116	__TCA_STAB_MAX
117};
118
119#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
120
121/* FIFO section */
122
123struct tc_fifo_qopt {
124	__u32	limit;	/* Queue length: bytes for bfifo, packets for pfifo */
125};
126
127/* PRIO section */
128
129#define TCQ_PRIO_BANDS	16
130#define TCQ_MIN_PRIO_BANDS 2
131
132struct tc_prio_qopt {
133	int	bands;			/* Number of bands */
134	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
135};
136
137/* MULTIQ section */
138
139struct tc_multiq_qopt {
140	__u16	bands;			/* Number of bands */
141	__u16	max_bands;		/* Maximum number of queues */
142};
143
144/* PLUG section */
145
146#define TCQ_PLUG_BUFFER                0
147#define TCQ_PLUG_RELEASE_ONE           1
148#define TCQ_PLUG_RELEASE_INDEFINITE    2
149#define TCQ_PLUG_LIMIT                 3
150
151struct tc_plug_qopt {
152	/* TCQ_PLUG_BUFFER: Inset a plug into the queue and
153	 *  buffer any incoming packets
154	 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
155	 *   to beginning of the next plug.
156	 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
157	 *   Stop buffering packets until the next TCQ_PLUG_BUFFER
158	 *   command is received (just act as a pass-thru queue).
159	 * TCQ_PLUG_LIMIT: Increase/decrease queue size
160	 */
161	int             action;
162	__u32           limit;
163};
164
165/* TBF section */
166
167struct tc_tbf_qopt {
168	struct tc_ratespec rate;
169	struct tc_ratespec peakrate;
170	__u32		limit;
171	__u32		buffer;
172	__u32		mtu;
173};
174
175enum {
176	TCA_TBF_UNSPEC,
177	TCA_TBF_PARMS,
178	TCA_TBF_RTAB,
179	TCA_TBF_PTAB,
180	TCA_TBF_RATE64,
181	TCA_TBF_PRATE64,
182	TCA_TBF_BURST,
183	TCA_TBF_PBURST,
184	TCA_TBF_PAD,
185	__TCA_TBF_MAX,
186};
187
188#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
189
190
191/* TEQL section */
192
193/* TEQL does not require any parameters */
194
195/* SFQ section */
196
197struct tc_sfq_qopt {
198	unsigned	quantum;	/* Bytes per round allocated to flow */
199	int		perturb_period;	/* Period of hash perturbation */
200	__u32		limit;		/* Maximal packets in queue */
201	unsigned	divisor;	/* Hash divisor  */
202	unsigned	flows;		/* Maximal number of flows  */
203};
204
205struct tc_sfqred_stats {
206	__u32           prob_drop;      /* Early drops, below max threshold */
207	__u32           forced_drop;	/* Early drops, after max threshold */
208	__u32           prob_mark;      /* Marked packets, below max threshold */
209	__u32           forced_mark;    /* Marked packets, after max threshold */
210	__u32           prob_mark_head; /* Marked packets, below max threshold */
211	__u32           forced_mark_head;/* Marked packets, after max threshold */
212};
213
214struct tc_sfq_qopt_v1 {
215	struct tc_sfq_qopt v0;
216	unsigned int	depth;		/* max number of packets per flow */
217	unsigned int	headdrop;
218/* SFQRED parameters */
219	__u32		limit;		/* HARD maximal flow queue length (bytes) */
220	__u32		qth_min;	/* Min average length threshold (bytes) */
221	__u32		qth_max;	/* Max average length threshold (bytes) */
222	unsigned char   Wlog;		/* log(W)		*/
223	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
224	unsigned char   Scell_log;	/* cell size for idle damping */
225	unsigned char	flags;
226	__u32		max_P;		/* probability, high resolution */
227/* SFQRED stats */
228	struct tc_sfqred_stats stats;
229};
230
231
232struct tc_sfq_xstats {
233	__s32		allot;
234};
235
236/* RED section */
237
238enum {
239	TCA_RED_UNSPEC,
240	TCA_RED_PARMS,
241	TCA_RED_STAB,
242	TCA_RED_MAX_P,
243	__TCA_RED_MAX,
244};
245
246#define TCA_RED_MAX (__TCA_RED_MAX - 1)
247
248struct tc_red_qopt {
249	__u32		limit;		/* HARD maximal queue length (bytes)	*/
250	__u32		qth_min;	/* Min average length threshold (bytes) */
251	__u32		qth_max;	/* Max average length threshold (bytes) */
252	unsigned char   Wlog;		/* log(W)		*/
253	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
254	unsigned char   Scell_log;	/* cell size for idle damping */
255	unsigned char	flags;
256#define TC_RED_ECN		1
257#define TC_RED_HARDDROP		2
258#define TC_RED_ADAPTATIVE	4
259};
260
261struct tc_red_xstats {
262	__u32           early;          /* Early drops */
263	__u32           pdrop;          /* Drops due to queue limits */
264	__u32           other;          /* Drops due to drop() calls */
265	__u32           marked;         /* Marked packets */
266};
267
268/* GRED section */
269
270#define MAX_DPs 16
271
272enum {
273       TCA_GRED_UNSPEC,
274       TCA_GRED_PARMS,
275       TCA_GRED_STAB,
276       TCA_GRED_DPS,
277       TCA_GRED_MAX_P,
278       TCA_GRED_LIMIT,
279       __TCA_GRED_MAX,
280};
281
282#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
283
284struct tc_gred_qopt {
285	__u32		limit;        /* HARD maximal queue length (bytes)    */
286	__u32		qth_min;      /* Min average length threshold (bytes) */
287	__u32		qth_max;      /* Max average length threshold (bytes) */
288	__u32		DP;           /* up to 2^32 DPs */
289	__u32		backlog;
290	__u32		qave;
291	__u32		forced;
292	__u32		early;
293	__u32		other;
294	__u32		pdrop;
295	__u8		Wlog;         /* log(W)               */
296	__u8		Plog;         /* log(P_max/(qth_max-qth_min)) */
297	__u8		Scell_log;    /* cell size for idle damping */
298	__u8		prio;         /* prio of this VQ */
299	__u32		packets;
300	__u32		bytesin;
301};
302
303/* gred setup */
304struct tc_gred_sopt {
305	__u32		DPs;
306	__u32		def_DP;
307	__u8		grio;
308	__u8		flags;
309	__u16		pad1;
310};
311
312/* CHOKe section */
313
314enum {
315	TCA_CHOKE_UNSPEC,
316	TCA_CHOKE_PARMS,
317	TCA_CHOKE_STAB,
318	TCA_CHOKE_MAX_P,
319	__TCA_CHOKE_MAX,
320};
321
322#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
323
324struct tc_choke_qopt {
325	__u32		limit;		/* Hard queue length (packets)	*/
326	__u32		qth_min;	/* Min average threshold (packets) */
327	__u32		qth_max;	/* Max average threshold (packets) */
328	unsigned char   Wlog;		/* log(W)		*/
329	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
330	unsigned char   Scell_log;	/* cell size for idle damping */
331	unsigned char	flags;		/* see RED flags */
332};
333
334struct tc_choke_xstats {
335	__u32		early;          /* Early drops */
336	__u32		pdrop;          /* Drops due to queue limits */
337	__u32		other;          /* Drops due to drop() calls */
338	__u32		marked;         /* Marked packets */
339	__u32		matched;	/* Drops due to flow match */
340};
341
342/* HTB section */
343#define TC_HTB_NUMPRIO		8
344#define TC_HTB_MAXDEPTH		8
345#define TC_HTB_PROTOVER		3 /* the same as HTB and TC's major */
346
347struct tc_htb_opt {
348	struct tc_ratespec 	rate;
349	struct tc_ratespec 	ceil;
350	__u32	buffer;
351	__u32	cbuffer;
352	__u32	quantum;
353	__u32	level;		/* out only */
354	__u32	prio;
355};
356struct tc_htb_glob {
357	__u32 version;		/* to match HTB/TC */
358    	__u32 rate2quantum;	/* bps->quantum divisor */
359    	__u32 defcls;		/* default class number */
360	__u32 debug;		/* debug flags */
361
362	/* stats */
363	__u32 direct_pkts; /* count of non shaped packets */
364};
365enum {
366	TCA_HTB_UNSPEC,
367	TCA_HTB_PARMS,
368	TCA_HTB_INIT,
369	TCA_HTB_CTAB,
370	TCA_HTB_RTAB,
371	TCA_HTB_DIRECT_QLEN,
372	TCA_HTB_RATE64,
373	TCA_HTB_CEIL64,
374	TCA_HTB_PAD,
375	__TCA_HTB_MAX,
376};
377
378#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
379
380struct tc_htb_xstats {
381	__u32 lends;
382	__u32 borrows;
383	__u32 giants;	/* too big packets (rate will not be accurate) */
384	__u32 tokens;
385	__u32 ctokens;
386};
387
388/* HFSC section */
389
390struct tc_hfsc_qopt {
391	__u16	defcls;		/* default class */
392};
393
394struct tc_service_curve {
395	__u32	m1;		/* slope of the first segment in bps */
396	__u32	d;		/* x-projection of the first segment in us */
397	__u32	m2;		/* slope of the second segment in bps */
398};
399
400struct tc_hfsc_stats {
401	__u64	work;		/* total work done */
402	__u64	rtwork;		/* work done by real-time criteria */
403	__u32	period;		/* current period */
404	__u32	level;		/* class level in hierarchy */
405};
406
407enum {
408	TCA_HFSC_UNSPEC,
409	TCA_HFSC_RSC,
410	TCA_HFSC_FSC,
411	TCA_HFSC_USC,
412	__TCA_HFSC_MAX,
413};
414
415#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
416
417
418/* CBQ section */
419
420#define TC_CBQ_MAXPRIO		8
421#define TC_CBQ_MAXLEVEL		8
422#define TC_CBQ_DEF_EWMA		5
423
424struct tc_cbq_lssopt {
425	unsigned char	change;
426	unsigned char	flags;
427#define TCF_CBQ_LSS_BOUNDED	1
428#define TCF_CBQ_LSS_ISOLATED	2
429	unsigned char  	ewma_log;
430	unsigned char  	level;
431#define TCF_CBQ_LSS_FLAGS	1
432#define TCF_CBQ_LSS_EWMA	2
433#define TCF_CBQ_LSS_MAXIDLE	4
434#define TCF_CBQ_LSS_MINIDLE	8
435#define TCF_CBQ_LSS_OFFTIME	0x10
436#define TCF_CBQ_LSS_AVPKT	0x20
437	__u32		maxidle;
438	__u32		minidle;
439	__u32		offtime;
440	__u32		avpkt;
441};
442
443struct tc_cbq_wrropt {
444	unsigned char	flags;
445	unsigned char	priority;
446	unsigned char	cpriority;
447	unsigned char	__reserved;
448	__u32		allot;
449	__u32		weight;
450};
451
452struct tc_cbq_ovl {
453	unsigned char	strategy;
454#define	TC_CBQ_OVL_CLASSIC	0
455#define	TC_CBQ_OVL_DELAY	1
456#define	TC_CBQ_OVL_LOWPRIO	2
457#define	TC_CBQ_OVL_DROP		3
458#define	TC_CBQ_OVL_RCLASSIC	4
459	unsigned char	priority2;
460	__u16		pad;
461	__u32		penalty;
462};
463
464struct tc_cbq_police {
465	unsigned char	police;
466	unsigned char	__res1;
467	unsigned short	__res2;
468};
469
470struct tc_cbq_fopt {
471	__u32		split;
472	__u32		defmap;
473	__u32		defchange;
474};
475
476struct tc_cbq_xstats {
477	__u32		borrows;
478	__u32		overactions;
479	__s32		avgidle;
480	__s32		undertime;
481};
482
483enum {
484	TCA_CBQ_UNSPEC,
485	TCA_CBQ_LSSOPT,
486	TCA_CBQ_WRROPT,
487	TCA_CBQ_FOPT,
488	TCA_CBQ_OVL_STRATEGY,
489	TCA_CBQ_RATE,
490	TCA_CBQ_RTAB,
491	TCA_CBQ_POLICE,
492	__TCA_CBQ_MAX,
493};
494
495#define TCA_CBQ_MAX	(__TCA_CBQ_MAX - 1)
496
497/* dsmark section */
498
499enum {
500	TCA_DSMARK_UNSPEC,
501	TCA_DSMARK_INDICES,
502	TCA_DSMARK_DEFAULT_INDEX,
503	TCA_DSMARK_SET_TC_INDEX,
504	TCA_DSMARK_MASK,
505	TCA_DSMARK_VALUE,
506	__TCA_DSMARK_MAX,
507};
508
509#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
510
511/* ATM  section */
512
513enum {
514	TCA_ATM_UNSPEC,
515	TCA_ATM_FD,		/* file/socket descriptor */
516	TCA_ATM_PTR,		/* pointer to descriptor - later */
517	TCA_ATM_HDR,		/* LL header */
518	TCA_ATM_EXCESS,		/* excess traffic class (0 for CLP)  */
519	TCA_ATM_ADDR,		/* PVC address (for output only) */
520	TCA_ATM_STATE,		/* VC state (ATM_VS_*; for output only) */
521	__TCA_ATM_MAX,
522};
523
524#define TCA_ATM_MAX	(__TCA_ATM_MAX - 1)
525
526/* Network emulator */
527
528enum {
529	TCA_NETEM_UNSPEC,
530	TCA_NETEM_CORR,
531	TCA_NETEM_DELAY_DIST,
532	TCA_NETEM_REORDER,
533	TCA_NETEM_CORRUPT,
534	TCA_NETEM_LOSS,
535	TCA_NETEM_RATE,
536	TCA_NETEM_ECN,
537	TCA_NETEM_RATE64,
538	TCA_NETEM_PAD,
539	TCA_NETEM_LATENCY64,
540	TCA_NETEM_JITTER64,
541	TCA_NETEM_SLOT,
542	__TCA_NETEM_MAX,
543};
544
545#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
546
547struct tc_netem_qopt {
548	__u32	latency;	/* added delay (us) */
549	__u32   limit;		/* fifo limit (packets) */
550	__u32	loss;		/* random packet loss (0=none ~0=100%) */
551	__u32	gap;		/* re-ordering gap (0 for none) */
552	__u32   duplicate;	/* random packet dup  (0=none ~0=100%) */
553	__u32	jitter;		/* random jitter in latency (us) */
554};
555
556struct tc_netem_corr {
557	__u32	delay_corr;	/* delay correlation */
558	__u32	loss_corr;	/* packet loss correlation */
559	__u32	dup_corr;	/* duplicate correlation  */
560};
561
562struct tc_netem_reorder {
563	__u32	probability;
564	__u32	correlation;
565};
566
567struct tc_netem_corrupt {
568	__u32	probability;
569	__u32	correlation;
570};
571
572struct tc_netem_rate {
573	__u32	rate;	/* byte/s */
574	__s32	packet_overhead;
575	__u32	cell_size;
576	__s32	cell_overhead;
577};
578
579struct tc_netem_slot {
580	__s64   min_delay; /* nsec */
581	__s64   max_delay;
582	__s32   max_packets;
583	__s32   max_bytes;
584};
585
586enum {
587	NETEM_LOSS_UNSPEC,
588	NETEM_LOSS_GI,		/* General Intuitive - 4 state model */
589	NETEM_LOSS_GE,		/* Gilbert Elliot models */
590	__NETEM_LOSS_MAX
591};
592#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
593
594/* State transition probabilities for 4 state model */
595struct tc_netem_gimodel {
596	__u32	p13;
597	__u32	p31;
598	__u32	p32;
599	__u32	p14;
600	__u32	p23;
601};
602
603/* Gilbert-Elliot models */
604struct tc_netem_gemodel {
605	__u32 p;
606	__u32 r;
607	__u32 h;
608	__u32 k1;
609};
610
611#define NETEM_DIST_SCALE	8192
612#define NETEM_DIST_MAX		16384
613
614/* DRR */
615
616enum {
617	TCA_DRR_UNSPEC,
618	TCA_DRR_QUANTUM,
619	__TCA_DRR_MAX
620};
621
622#define TCA_DRR_MAX	(__TCA_DRR_MAX - 1)
623
624struct tc_drr_stats {
625	__u32	deficit;
626};
627
628/* MQPRIO */
629#define TC_QOPT_BITMASK 15
630#define TC_QOPT_MAX_QUEUE 16
631
632enum {
633	TC_MQPRIO_HW_OFFLOAD_NONE,	/* no offload requested */
634	TC_MQPRIO_HW_OFFLOAD_TCS,	/* offload TCs, no queue counts */
635	__TC_MQPRIO_HW_OFFLOAD_MAX
636};
637
638#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
639
640enum {
641	TC_MQPRIO_MODE_DCB,
642	TC_MQPRIO_MODE_CHANNEL,
643	__TC_MQPRIO_MODE_MAX
644};
645
646#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
647
648enum {
649	TC_MQPRIO_SHAPER_DCB,
650	TC_MQPRIO_SHAPER_BW_RATE,	/* Add new shapers below */
651	__TC_MQPRIO_SHAPER_MAX
652};
653
654#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
655
656struct tc_mqprio_qopt {
657	__u8	num_tc;
658	__u8	prio_tc_map[TC_QOPT_BITMASK + 1];
659	__u8	hw;
660	__u16	count[TC_QOPT_MAX_QUEUE];
661	__u16	offset[TC_QOPT_MAX_QUEUE];
662};
663
664#define TC_MQPRIO_F_MODE		0x1
665#define TC_MQPRIO_F_SHAPER		0x2
666#define TC_MQPRIO_F_MIN_RATE		0x4
667#define TC_MQPRIO_F_MAX_RATE		0x8
668
669enum {
670	TCA_MQPRIO_UNSPEC,
671	TCA_MQPRIO_MODE,
672	TCA_MQPRIO_SHAPER,
673	TCA_MQPRIO_MIN_RATE64,
674	TCA_MQPRIO_MAX_RATE64,
675	__TCA_MQPRIO_MAX,
676};
677
678#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
679
680/* SFB */
681
682enum {
683	TCA_SFB_UNSPEC,
684	TCA_SFB_PARMS,
685	__TCA_SFB_MAX,
686};
687
688#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
689
690/*
691 * Note: increment, decrement are Q0.16 fixed-point values.
692 */
693struct tc_sfb_qopt {
694	__u32 rehash_interval;	/* delay between hash move, in ms */
695	__u32 warmup_time;	/* double buffering warmup time in ms (warmup_time < rehash_interval) */
696	__u32 max;		/* max len of qlen_min */
697	__u32 bin_size;		/* maximum queue length per bin */
698	__u32 increment;	/* probability increment, (d1 in Blue) */
699	__u32 decrement;	/* probability decrement, (d2 in Blue) */
700	__u32 limit;		/* max SFB queue length */
701	__u32 penalty_rate;	/* inelastic flows are rate limited to 'rate' pps */
702	__u32 penalty_burst;
703};
704
705struct tc_sfb_xstats {
706	__u32 earlydrop;
707	__u32 penaltydrop;
708	__u32 bucketdrop;
709	__u32 queuedrop;
710	__u32 childdrop; /* drops in child qdisc */
711	__u32 marked;
712	__u32 maxqlen;
713	__u32 maxprob;
714	__u32 avgprob;
715};
716
717#define SFB_MAX_PROB 0xFFFF
718
719/* QFQ */
720enum {
721	TCA_QFQ_UNSPEC,
722	TCA_QFQ_WEIGHT,
723	TCA_QFQ_LMAX,
724	__TCA_QFQ_MAX
725};
726
727#define TCA_QFQ_MAX	(__TCA_QFQ_MAX - 1)
728
729struct tc_qfq_stats {
730	__u32 weight;
731	__u32 lmax;
732};
733
734/* CODEL */
735
736enum {
737	TCA_CODEL_UNSPEC,
738	TCA_CODEL_TARGET,
739	TCA_CODEL_LIMIT,
740	TCA_CODEL_INTERVAL,
741	TCA_CODEL_ECN,
742	TCA_CODEL_CE_THRESHOLD,
743	__TCA_CODEL_MAX
744};
745
746#define TCA_CODEL_MAX	(__TCA_CODEL_MAX - 1)
747
748struct tc_codel_xstats {
749	__u32	maxpacket; /* largest packet we've seen so far */
750	__u32	count;	   /* how many drops we've done since the last time we
751			    * entered dropping state
752			    */
753	__u32	lastcount; /* count at entry to dropping state */
754	__u32	ldelay;    /* in-queue delay seen by most recently dequeued packet */
755	__s32	drop_next; /* time to drop next packet */
756	__u32	drop_overlimit; /* number of time max qdisc packet limit was hit */
757	__u32	ecn_mark;  /* number of packets we ECN marked instead of dropped */
758	__u32	dropping;  /* are we in dropping state ? */
759	__u32	ce_mark;   /* number of CE marked packets because of ce_threshold */
760};
761
762/* FQ_CODEL */
763
764enum {
765	TCA_FQ_CODEL_UNSPEC,
766	TCA_FQ_CODEL_TARGET,
767	TCA_FQ_CODEL_LIMIT,
768	TCA_FQ_CODEL_INTERVAL,
769	TCA_FQ_CODEL_ECN,
770	TCA_FQ_CODEL_FLOWS,
771	TCA_FQ_CODEL_QUANTUM,
772	TCA_FQ_CODEL_CE_THRESHOLD,
773	TCA_FQ_CODEL_DROP_BATCH_SIZE,
774	TCA_FQ_CODEL_MEMORY_LIMIT,
775	__TCA_FQ_CODEL_MAX
776};
777
778#define TCA_FQ_CODEL_MAX	(__TCA_FQ_CODEL_MAX - 1)
779
780enum {
781	TCA_FQ_CODEL_XSTATS_QDISC,
782	TCA_FQ_CODEL_XSTATS_CLASS,
783};
784
785struct tc_fq_codel_qd_stats {
786	__u32	maxpacket;	/* largest packet we've seen so far */
787	__u32	drop_overlimit; /* number of time max qdisc
788				 * packet limit was hit
789				 */
790	__u32	ecn_mark;	/* number of packets we ECN marked
791				 * instead of being dropped
792				 */
793	__u32	new_flow_count; /* number of time packets
794				 * created a 'new flow'
795				 */
796	__u32	new_flows_len;	/* count of flows in new list */
797	__u32	old_flows_len;	/* count of flows in old list */
798	__u32	ce_mark;	/* packets above ce_threshold */
799	__u32	memory_usage;	/* in bytes */
800	__u32	drop_overmemory;
801};
802
803struct tc_fq_codel_cl_stats {
804	__s32	deficit;
805	__u32	ldelay;		/* in-queue delay seen by most recently
806				 * dequeued packet
807				 */
808	__u32	count;
809	__u32	lastcount;
810	__u32	dropping;
811	__s32	drop_next;
812};
813
814struct tc_fq_codel_xstats {
815	__u32	type;
816	union {
817		struct tc_fq_codel_qd_stats qdisc_stats;
818		struct tc_fq_codel_cl_stats class_stats;
819	};
820};
821
822/* FQ */
823
824enum {
825	TCA_FQ_UNSPEC,
826
827	TCA_FQ_PLIMIT,		/* limit of total number of packets in queue */
828
829	TCA_FQ_FLOW_PLIMIT,	/* limit of packets per flow */
830
831	TCA_FQ_QUANTUM,		/* RR quantum */
832
833	TCA_FQ_INITIAL_QUANTUM,		/* RR quantum for new flow */
834
835	TCA_FQ_RATE_ENABLE,	/* enable/disable rate limiting */
836
837	TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
838
839	TCA_FQ_FLOW_MAX_RATE,	/* per flow max rate */
840
841	TCA_FQ_BUCKETS_LOG,	/* log2(number of buckets) */
842
843	TCA_FQ_FLOW_REFILL_DELAY,	/* flow credit refill delay in usec */
844
845	TCA_FQ_ORPHAN_MASK,	/* mask applied to orphaned skb hashes */
846
847	TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
848
849	__TCA_FQ_MAX
850};
851
852#define TCA_FQ_MAX	(__TCA_FQ_MAX - 1)
853
854struct tc_fq_qd_stats {
855	__u64	gc_flows;
856	__u64	highprio_packets;
857	__u64	tcp_retrans;
858	__u64	throttled;
859	__u64	flows_plimit;
860	__u64	pkts_too_long;
861	__u64	allocation_errors;
862	__s64	time_next_delayed_flow;
863	__u32	flows;
864	__u32	inactive_flows;
865	__u32	throttled_flows;
866	__u32	unthrottle_latency_ns;
867};
868
869/* Heavy-Hitter Filter */
870
871enum {
872	TCA_HHF_UNSPEC,
873	TCA_HHF_BACKLOG_LIMIT,
874	TCA_HHF_QUANTUM,
875	TCA_HHF_HH_FLOWS_LIMIT,
876	TCA_HHF_RESET_TIMEOUT,
877	TCA_HHF_ADMIT_BYTES,
878	TCA_HHF_EVICT_TIMEOUT,
879	TCA_HHF_NON_HH_WEIGHT,
880	__TCA_HHF_MAX
881};
882
883#define TCA_HHF_MAX	(__TCA_HHF_MAX - 1)
884
885struct tc_hhf_xstats {
886	__u32	drop_overlimit; /* number of times max qdisc packet limit
887				 * was hit
888				 */
889	__u32	hh_overlimit;   /* number of times max heavy-hitters was hit */
890	__u32	hh_tot_count;   /* number of captured heavy-hitters so far */
891	__u32	hh_cur_count;   /* number of current heavy-hitters */
892};
893
894/* PIE */
895enum {
896	TCA_PIE_UNSPEC,
897	TCA_PIE_TARGET,
898	TCA_PIE_LIMIT,
899	TCA_PIE_TUPDATE,
900	TCA_PIE_ALPHA,
901	TCA_PIE_BETA,
902	TCA_PIE_ECN,
903	TCA_PIE_BYTEMODE,
904	__TCA_PIE_MAX
905};
906#define TCA_PIE_MAX   (__TCA_PIE_MAX - 1)
907
908struct tc_pie_xstats {
909	__u32 prob;             /* current probability */
910	__u32 delay;            /* current delay in ms */
911	__u32 avg_dq_rate;      /* current average dq_rate in bits/pie_time */
912	__u32 packets_in;       /* total number of packets enqueued */
913	__u32 dropped;          /* packets dropped due to pie_action */
914	__u32 overlimit;        /* dropped due to lack of space in queue */
915	__u32 maxq;             /* maximum queue size */
916	__u32 ecn_mark;         /* packets marked with ecn*/
917};
918
919/* CBS */
920struct tc_cbs_qopt {
921	__u8 offload;
922	__u8 _pad[3];
923	__s32 hicredit;
924	__s32 locredit;
925	__s32 idleslope;
926	__s32 sendslope;
927};
928
929enum {
930	TCA_CBS_UNSPEC,
931	TCA_CBS_PARMS,
932	__TCA_CBS_MAX,
933};
934
935#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
936
937#endif
938