1/*
2 *
3 *   YeAH TCP
4 *
5 * For further details look at:
6 *   https://web.archive.org/web/20080316215752/http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf
7 *
8 */
9#include <linux/mm.h>
10#include <linux/module.h>
11#include <linux/skbuff.h>
12#include <linux/inet_diag.h>
13
14#include <net/tcp.h>
15
16#include "tcp_vegas.h"
17
18#define TCP_YEAH_ALPHA       80 /* number of packets queued at the bottleneck */
19#define TCP_YEAH_GAMMA        1 /* fraction of queue to be removed per rtt */
20#define TCP_YEAH_DELTA        3 /* log minimum fraction of cwnd to be removed on loss */
21#define TCP_YEAH_EPSILON      1 /* log maximum fraction to be removed on early decongestion */
22#define TCP_YEAH_PHY          8 /* maximum delta from base */
23#define TCP_YEAH_RHO         16 /* minimum number of consecutive rtt to consider competition on loss */
24#define TCP_YEAH_ZETA        50 /* minimum number of state switches to reset reno_count */
25
26#define TCP_SCALABLE_AI_CNT	 100U
27
28/* YeAH variables */
29struct yeah {
30	struct vegas vegas;	/* must be first */
31
32	/* YeAH */
33	u32 lastQ;
34	u32 doing_reno_now;
35
36	u32 reno_count;
37	u32 fast_count;
38
39	u32 pkts_acked;
40};
41
42static void tcp_yeah_init(struct sock *sk)
43{
44	struct tcp_sock *tp = tcp_sk(sk);
45	struct yeah *yeah = inet_csk_ca(sk);
46
47	tcp_vegas_init(sk);
48
49	yeah->doing_reno_now = 0;
50	yeah->lastQ = 0;
51
52	yeah->reno_count = 2;
53
54	/* Ensure the MD arithmetic works.  This is somewhat pedantic,
55	 * since I don't think we will see a cwnd this large. :) */
56	tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
57}
58
59static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
60{
61	const struct inet_connection_sock *icsk = inet_csk(sk);
62	struct yeah *yeah = inet_csk_ca(sk);
63
64	if (icsk->icsk_ca_state == TCP_CA_Open)
65		yeah->pkts_acked = pkts_acked;
66
67	tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
68}
69
70static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
71{
72	struct tcp_sock *tp = tcp_sk(sk);
73	struct yeah *yeah = inet_csk_ca(sk);
74
75	if (!tcp_is_cwnd_limited(sk))
76		return;
77
78	if (tp->snd_cwnd <= tp->snd_ssthresh)
79		tcp_slow_start(tp, acked);
80
81	else if (!yeah->doing_reno_now) {
82		/* Scalable */
83
84		tp->snd_cwnd_cnt += yeah->pkts_acked;
85		if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) {
86			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
87				tp->snd_cwnd++;
88			tp->snd_cwnd_cnt = 0;
89		}
90
91		yeah->pkts_acked = 1;
92
93	} else {
94		/* Reno */
95		tcp_cong_avoid_ai(tp, tp->snd_cwnd);
96	}
97
98	/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
99	 *
100	 * These are so named because they represent the approximate values
101	 * of snd_una and snd_nxt at the beginning of the current RTT. More
102	 * precisely, they represent the amount of data sent during the RTT.
103	 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
104	 * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding
105	 * bytes of data have been ACKed during the course of the RTT, giving
106	 * an "actual" rate of:
107	 *
108	 *     (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration)
109	 *
110	 * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una,
111	 * because delayed ACKs can cover more than one segment, so they
112	 * don't line up yeahly with the boundaries of RTTs.
113	 *
114	 * Another unfortunate fact of life is that delayed ACKs delay the
115	 * advance of the left edge of our send window, so that the number
116	 * of bytes we send in an RTT is often less than our cwnd will allow.
117	 * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
118	 */
119
120	if (after(ack, yeah->vegas.beg_snd_nxt)) {
121		/* We do the Vegas calculations only if we got enough RTT
122		 * samples that we can be reasonably sure that we got
123		 * at least one RTT sample that wasn't from a delayed ACK.
124		 * If we only had 2 samples total,
125		 * then that means we're getting only 1 ACK per RTT, which
126		 * means they're almost certainly delayed ACKs.
127		 * If  we have 3 samples, we should be OK.
128		 */
129
130		if (yeah->vegas.cntRTT > 2) {
131			u32 rtt, queue;
132			u64 bw;
133
134			/* We have enough RTT samples, so, using the Vegas
135			 * algorithm, we determine if we should increase or
136			 * decrease cwnd, and by how much.
137			 */
138
139			/* Pluck out the RTT we are using for the Vegas
140			 * calculations. This is the min RTT seen during the
141			 * last RTT. Taking the min filters out the effects
142			 * of delayed ACKs, at the cost of noticing congestion
143			 * a bit later.
144			 */
145			rtt = yeah->vegas.minRTT;
146
147			/* Compute excess number of packets above bandwidth
148			 * Avoid doing full 64 bit divide.
149			 */
150			bw = tp->snd_cwnd;
151			bw *= rtt - yeah->vegas.baseRTT;
152			do_div(bw, rtt);
153			queue = bw;
154
155			if (queue > TCP_YEAH_ALPHA ||
156			    rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
157				if (queue > TCP_YEAH_ALPHA &&
158				    tp->snd_cwnd > yeah->reno_count) {
159					u32 reduction = min(queue / TCP_YEAH_GAMMA ,
160							    tp->snd_cwnd >> TCP_YEAH_EPSILON);
161
162					tp->snd_cwnd -= reduction;
163
164					tp->snd_cwnd = max(tp->snd_cwnd,
165							   yeah->reno_count);
166
167					tp->snd_ssthresh = tp->snd_cwnd;
168				}
169
170				if (yeah->reno_count <= 2)
171					yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
172				else
173					yeah->reno_count++;
174
175				yeah->doing_reno_now = min(yeah->doing_reno_now + 1,
176							   0xffffffU);
177			} else {
178				yeah->fast_count++;
179
180				if (yeah->fast_count > TCP_YEAH_ZETA) {
181					yeah->reno_count = 2;
182					yeah->fast_count = 0;
183				}
184
185				yeah->doing_reno_now = 0;
186			}
187
188			yeah->lastQ = queue;
189		}
190
191		/* Save the extent of the current window so we can use this
192		 * at the end of the next RTT.
193		 */
194		yeah->vegas.beg_snd_una  = yeah->vegas.beg_snd_nxt;
195		yeah->vegas.beg_snd_nxt  = tp->snd_nxt;
196		yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
197
198		/* Wipe the slate clean for the next RTT. */
199		yeah->vegas.cntRTT = 0;
200		yeah->vegas.minRTT = 0x7fffffff;
201	}
202}
203
204static u32 tcp_yeah_ssthresh(struct sock *sk)
205{
206	const struct tcp_sock *tp = tcp_sk(sk);
207	struct yeah *yeah = inet_csk_ca(sk);
208	u32 reduction;
209
210	if (yeah->doing_reno_now < TCP_YEAH_RHO) {
211		reduction = yeah->lastQ;
212
213		reduction = min(reduction, max(tp->snd_cwnd>>1, 2U));
214
215		reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
216	} else
217		reduction = max(tp->snd_cwnd>>1, 2U);
218
219	yeah->fast_count = 0;
220	yeah->reno_count = max(yeah->reno_count>>1, 2U);
221
222	return tp->snd_cwnd - reduction;
223}
224
225static struct tcp_congestion_ops tcp_yeah __read_mostly = {
226	.init		= tcp_yeah_init,
227	.ssthresh	= tcp_yeah_ssthresh,
228	.cong_avoid	= tcp_yeah_cong_avoid,
229	.set_state	= tcp_vegas_state,
230	.cwnd_event	= tcp_vegas_cwnd_event,
231	.get_info	= tcp_vegas_get_info,
232	.pkts_acked	= tcp_yeah_pkts_acked,
233
234	.owner		= THIS_MODULE,
235	.name		= "yeah",
236};
237
238static int __init tcp_yeah_register(void)
239{
240	BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE);
241	tcp_register_congestion_control(&tcp_yeah);
242	return 0;
243}
244
245static void __exit tcp_yeah_unregister(void)
246{
247	tcp_unregister_congestion_control(&tcp_yeah);
248}
249
250module_init(tcp_yeah_register);
251module_exit(tcp_yeah_unregister);
252
253MODULE_AUTHOR("Angelo P. Castellani");
254MODULE_LICENSE("GPL");
255MODULE_DESCRIPTION("YeAH TCP");
256