Searched defs:cwnd (Results 1 - 8 of 8) sorted by relevance

/net/ipv4/
H A Dtcp_highspeed.c17 unsigned int cwnd; member in struct:hstcp_aimd_val
108 * since I don't think we will see a cwnd this large. :) */
126 * hstcp_aimd_vals[ca->ai-1].cwnd <
128 * hstcp_aimd_vals[ca->ai].cwnd
130 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
131 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
134 } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
135 while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
141 /* cwnd = cwnd
[all...]
H A Dtcp_bic.c52 u32 cnt; /* increase cwnd by 1 after ACKs */
86 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) argument
88 if (ca->last_cwnd == cwnd &&
92 ca->last_cwnd = cwnd;
99 if (cwnd <= low_window) {
100 ca->cnt = cwnd;
105 if (cwnd < ca->last_max_cwnd) {
106 __u32 dist = (ca->last_max_cwnd - cwnd)
111 ca->cnt = cwnd / max_increment;
114 ca->cnt = (cwnd * smooth_par
[all...]
H A Dtcp_cubic.c78 MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
84 u32 cnt; /* increase cwnd by 1 after ACKs */
94 u32 tcp_cwnd; /* estimated tcp cwnd */
207 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) argument
214 if (ca->last_cwnd == cwnd &&
218 ca->last_cwnd = cwnd;
224 ca->tcp_cwnd = cwnd; /* syn with cubic */
226 if (ca->last_max_cwnd <= cwnd) {
228 ca->bic_origin_point = cwnd;
231 * (wmax-cwnd) * (srt
[all...]
H A Dtcp_output.c137 * This is the first part of cwnd validation mechanism. */
143 u32 cwnd = tp->snd_cwnd; local
148 restart_cwnd = min(restart_cwnd, cwnd);
150 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
151 cwnd >>= 1;
152 tp->snd_cwnd = max(cwnd, restart_cwnd);
1409 u32 in_flight, cwnd; local
1417 cwnd = tp->snd_cwnd;
1418 if (in_flight < cwnd)
1419 return (cwnd
[all...]
H A Dtcp_input.c237 /* Better not delay acks, sender can have a very low cwnd */
715 * to do with delayed acks, because at cwnd>2 true delack timeout
735 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); local
737 if (!cwnd)
738 cwnd = TCP_INIT_CWND;
739 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
2075 * with the same cwnd?
2370 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
2473 /* The cwnd reduction in CWR and Recovery use the PRR algorithm
2478 * cwnd reduction
[all...]
/net/dccp/ccids/
H A Dccid2.c91 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
93 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
148 /* adjust pipe, cwnd etc */
164 /* if we were blocked before, we may now send cwnd=1 packet */
182 * ccid2_update_used_window - Track how much of cwnd is actually used
197 /* don't reduce cwnd below the initial window (IW) */
216 u32 cwnd = hc->tx_cwnd, restart_cwnd, local
219 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
221 /* don't reduce cwnd belo
[all...]
/net/sctp/
H A Dtransport.c385 /* This routine updates the transport's cwnd and partial_bytes_acked
392 __u32 cwnd, ssthresh, flight_size, pba, pmtu; local
394 cwnd = transport->cwnd;
402 /* The appropriate cwnd increase algorithm is performed if, and only
407 (flight_size < cwnd))
414 if (cwnd <= ssthresh) {
416 * o When cwnd is less than or equal to ssthresh, an SCTP
418 * cwnd only if the current congestion window is being fully
421 * Only when these three conditions are met can the cwnd b
[all...]
/net/sunrpc/
H A Dxprt.c79 * halved; otherwise, it is incremented by 1/cwnd when
90 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
402 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
403 task->tk_pid, xprt->cong, xprt->cwnd);
450 unsigned long cwnd = xprt->cwnd; local
452 if (result >= 0 && cwnd <= xprt->cong) {
453 /* The (cwnd >> 1) term makes sure
455 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >>
[all...]

Completed in 174 milliseconds