tcp_bic.c revision 164891aadf1721fca4dce473bb0e0998181537c6
1/* 2 * Binary Increase Congestion control for TCP 3 * 4 * This is from the implementation of BICTCP in 5 * Lison-Xu, Kahaled Harfoush, and Injong Rhee. 6 * "Binary Increase Congestion Control for Fast, Long Distance 7 * Networks" in InfoComm 2004 8 * Available from: 9 * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf 10 * 11 * Unless BIC is enabled and congestion window is large 12 * this behaves the same as the original Reno. 13 */ 14 15#include <linux/mm.h> 16#include <linux/module.h> 17#include <net/tcp.h> 18 19 20#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation 21 * max_cwnd = snd_cwnd * beta 22 */ 23#define BICTCP_B 4 /* 24 * In binary search, 25 * go to point (max+min)/N 26 */ 27 28static int fast_convergence = 1; 29static int max_increment = 16; 30static int low_window = 14; 31static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ 32static int initial_ssthresh = 100; 33static int smooth_part = 20; 34 35module_param(fast_convergence, int, 0644); 36MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); 37module_param(max_increment, int, 0644); 38MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search"); 39module_param(low_window, int, 0644); 40MODULE_PARM_DESC(low_window, "lower bound on congestion window (for TCP friendliness)"); 41module_param(beta, int, 0644); 42MODULE_PARM_DESC(beta, "beta for multiplicative increase"); 43module_param(initial_ssthresh, int, 0644); 44MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); 45module_param(smooth_part, int, 0644); 46MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax"); 47 48 49/* BIC TCP Parameters */ 50struct bictcp { 51 u32 cnt; /* increase cwnd by 1 after ACKs */ 52 u32 last_max_cwnd; /* last maximum snd_cwnd */ 53 u32 loss_cwnd; /* congestion window at last loss */ 54 u32 last_cwnd; /* the last snd_cwnd */ 55 u32 last_time; /* time when updated last_cwnd */ 56 u32 epoch_start; /* beginning of an epoch */ 57#define ACK_RATIO_SHIFT 4 58 u32 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ 59}; 60 61static inline void bictcp_reset(struct bictcp *ca) 62{ 63 ca->cnt = 0; 64 ca->last_max_cwnd = 0; 65 ca->loss_cwnd = 0; 66 ca->last_cwnd = 0; 67 ca->last_time = 0; 68 ca->epoch_start = 0; 69 ca->delayed_ack = 2 << ACK_RATIO_SHIFT; 70} 71 72static void bictcp_init(struct sock *sk) 73{ 74 bictcp_reset(inet_csk_ca(sk)); 75 if (initial_ssthresh) 76 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; 77} 78 79/* 80 * Compute congestion window to use. 81 */ 82static inline void bictcp_update(struct bictcp *ca, u32 cwnd) 83{ 84 if (ca->last_cwnd == cwnd && 85 (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) 86 return; 87 88 ca->last_cwnd = cwnd; 89 ca->last_time = tcp_time_stamp; 90 91 if (ca->epoch_start == 0) /* record the beginning of an epoch */ 92 ca->epoch_start = tcp_time_stamp; 93 94 /* start off normal */ 95 if (cwnd <= low_window) { 96 ca->cnt = cwnd; 97 return; 98 } 99 100 /* binary increase */ 101 if (cwnd < ca->last_max_cwnd) { 102 __u32 dist = (ca->last_max_cwnd - cwnd) 103 / BICTCP_B; 104 105 if (dist > max_increment) 106 /* linear increase */ 107 ca->cnt = cwnd / max_increment; 108 else if (dist <= 1U) 109 /* binary search increase */ 110 ca->cnt = (cwnd * smooth_part) / BICTCP_B; 111 else 112 /* binary search increase */ 113 ca->cnt = cwnd / dist; 114 } else { 115 /* slow start AMD linear increase */ 116 if (cwnd < ca->last_max_cwnd + BICTCP_B) 117 /* slow start */ 118 ca->cnt = (cwnd * smooth_part) / BICTCP_B; 119 else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) 120 /* slow start */ 121 ca->cnt = (cwnd * (BICTCP_B-1)) 122 / (cwnd - ca->last_max_cwnd); 123 else 124 /* linear increase */ 125 ca->cnt = cwnd / max_increment; 126 } 127 128 /* if in slow start or link utilization is very low */ 129 if (ca->loss_cwnd == 0) { 130 if (ca->cnt > 20) /* increase cwnd 5% per RTT */ 131 ca->cnt = 20; 132 } 133 134 ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; 135 if (ca->cnt == 0) /* cannot be zero */ 136 ca->cnt = 1; 137} 138 139static void bictcp_cong_avoid(struct sock *sk, u32 ack, 140 u32 seq_rtt, u32 in_flight, int data_acked) 141{ 142 struct tcp_sock *tp = tcp_sk(sk); 143 struct bictcp *ca = inet_csk_ca(sk); 144 145 if (!tcp_is_cwnd_limited(sk, in_flight)) 146 return; 147 148 if (tp->snd_cwnd <= tp->snd_ssthresh) 149 tcp_slow_start(tp); 150 else { 151 bictcp_update(ca, tp->snd_cwnd); 152 153 /* In dangerous area, increase slowly. 154 * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd 155 */ 156 if (tp->snd_cwnd_cnt >= ca->cnt) { 157 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 158 tp->snd_cwnd++; 159 tp->snd_cwnd_cnt = 0; 160 } else 161 tp->snd_cwnd_cnt++; 162 } 163 164} 165 166/* 167 * behave like Reno until low_window is reached, 168 * then increase congestion window slowly 169 */ 170static u32 bictcp_recalc_ssthresh(struct sock *sk) 171{ 172 const struct tcp_sock *tp = tcp_sk(sk); 173 struct bictcp *ca = inet_csk_ca(sk); 174 175 ca->epoch_start = 0; /* end of epoch */ 176 177 /* Wmax and fast convergence */ 178 if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence) 179 ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta)) 180 / (2 * BICTCP_BETA_SCALE); 181 else 182 ca->last_max_cwnd = tp->snd_cwnd; 183 184 ca->loss_cwnd = tp->snd_cwnd; 185 186 187 if (tp->snd_cwnd <= low_window) 188 return max(tp->snd_cwnd >> 1U, 2U); 189 else 190 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 191} 192 193static u32 bictcp_undo_cwnd(struct sock *sk) 194{ 195 const struct tcp_sock *tp = tcp_sk(sk); 196 const struct bictcp *ca = inet_csk_ca(sk); 197 return max(tp->snd_cwnd, ca->last_max_cwnd); 198} 199 200static void bictcp_state(struct sock *sk, u8 new_state) 201{ 202 if (new_state == TCP_CA_Loss) 203 bictcp_reset(inet_csk_ca(sk)); 204} 205 206/* Track delayed acknowledgment ratio using sliding window 207 * ratio = (15*ratio + sample) / 16 208 */ 209static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last) 210{ 211 const struct inet_connection_sock *icsk = inet_csk(sk); 212 213 if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) { 214 struct bictcp *ca = inet_csk_ca(sk); 215 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; 216 ca->delayed_ack += cnt; 217 } 218} 219 220 221static struct tcp_congestion_ops bictcp = { 222 .init = bictcp_init, 223 .ssthresh = bictcp_recalc_ssthresh, 224 .cong_avoid = bictcp_cong_avoid, 225 .set_state = bictcp_state, 226 .undo_cwnd = bictcp_undo_cwnd, 227 .pkts_acked = bictcp_acked, 228 .owner = THIS_MODULE, 229 .name = "bic", 230}; 231 232static int __init bictcp_register(void) 233{ 234 BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE); 235 return tcp_register_congestion_control(&bictcp); 236} 237 238static void __exit bictcp_unregister(void) 239{ 240 tcp_unregister_congestion_control(&bictcp); 241} 242 243module_init(bictcp_register); 244module_exit(bictcp_unregister); 245 246MODULE_AUTHOR("Stephen Hemminger"); 247MODULE_LICENSE("GPL"); 248MODULE_DESCRIPTION("BIC TCP"); 249