Searched refs:cwnd (Results 1 - 15 of 15) sorted by relevance

/net/ipv4/
H A Dtcp_bic.c50 u32 cnt; /* increase cwnd by 1 after ACKs */
84 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) argument
86 if (ca->last_cwnd == cwnd &&
90 ca->last_cwnd = cwnd;
97 if (cwnd <= low_window) {
98 ca->cnt = cwnd;
103 if (cwnd < ca->last_max_cwnd) {
104 __u32 dist = (ca->last_max_cwnd - cwnd)
109 ca->cnt = cwnd / max_increment;
112 ca->cnt = (cwnd * smooth_par
[all...]
H A Dtcp_highspeed.c16 unsigned int cwnd; member in struct:hstcp_aimd_val
107 * since I don't think we will see a cwnd this large. :) */
125 * hstcp_aimd_vals[ca->ai-1].cwnd <
127 * hstcp_aimd_vals[ca->ai].cwnd
129 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
130 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
133 } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
134 while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
140 /* cwnd = cwnd
[all...]
H A Dtcp_cubic.c78 MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
84 u32 cnt; /* increase cwnd by 1 after ACKs */
95 u32 tcp_cwnd; /* estimated tcp cwnd */
208 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) argument
215 if (ca->last_cwnd == cwnd &&
219 ca->last_cwnd = cwnd;
225 ca->tcp_cwnd = cwnd; /* syn with cubic */
227 if (ca->last_max_cwnd <= cwnd) {
229 ca->bic_origin_point = cwnd;
232 * (wmax-cwnd) * (srt
[all...]
H A Dtcp_cong.c291 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
292 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
296 u32 cwnd = tp->snd_cwnd + acked; local
298 if (cwnd > tp->snd_ssthresh)
299 cwnd = tp->snd_ssthresh + 1;
300 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
H A Dtcp_output.c143 * This is the first part of cwnd validation mechanism. */
149 u32 cwnd = tp->snd_cwnd; local
154 restart_cwnd = min(restart_cwnd, cwnd);
156 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
157 cwnd >>= 1;
158 tp->snd_cwnd = max(cwnd, restart_cwnd);
1431 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1432 * As additional protections, we do not touch cwnd in retransmission phases,
1458 * window, and remember whether we were cwnd-limited then.
1556 u32 in_flight, cwnd; local
[all...]
H A Dtcp_input.c238 /* Better not delay acks, sender can have a very low cwnd */
750 * in flight is significantly lower than cwnd (or rwin)
757 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
785 * to do with delayed acks, because at cwnd>2 true delack timeout
805 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); local
807 if (!cwnd)
808 cwnd = TCP_INIT_CWND;
809 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
2158 * with the same cwnd?
2451 /* Try to undo cwnd reductio
[all...]
/net/sctp/
H A Dtransport.c378 /* This routine updates the transport's cwnd and partial_bytes_acked
385 __u32 cwnd, ssthresh, flight_size, pba, pmtu; local
387 cwnd = transport->cwnd;
395 /* The appropriate cwnd increase algorithm is performed if, and only
400 (flight_size < cwnd))
407 if (cwnd <= ssthresh) {
409 * o When cwnd is less than or equal to ssthresh, an SCTP
411 * cwnd only if the current congestion window is being fully
414 * Only when these three conditions are met can the cwnd b
[all...]
H A Dprobe.c59 MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
149 (full || sp->cwnd != lcwnd)) {
150 lcwnd = sp->cwnd;
168 &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
H A Doutqueue.c455 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
457 transport->cwnd, transport->ssthresh, transport->flight_size,
902 * current cwnd).
1628 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1630 transport->cwnd, transport->ssthresh,
H A Doutput.c648 * the receiver if allowed by cwnd (see rule B below). This rule
669 * to a given transport address if it has cwnd or more bytes
676 * ignore the value of cwnd and SHOULD NOT delay retransmission.
679 flight_size >= transport->cwnd)
H A Dassociola.c673 * o The initial cwnd before DATA transmission or after a sufficiently
681 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
796 * active state and set cwnd to 1 MTU, see SCTP
801 transport->cwnd = asoc->pathmtu;
H A Dsocket.c595 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
4261 status.sstat_primary.spinfo_cwnd = transport->cwnd;
4321 pinfo.spinfo_cwnd = transport->cwnd;
/net/sunrpc/
H A Dxprt.c388 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
389 task->tk_pid, xprt->cong, xprt->cwnd);
435 * halved; otherwise, it is incremented by 1/cwnd when
444 unsigned long cwnd = xprt->cwnd; local
446 if (result >= 0 && cwnd <= xprt->cong) {
447 /* The (cwnd >> 1) term makes sure
449 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
[all...]
/net/dccp/ccids/
H A Dccid2.c91 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
93 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
148 /* adjust pipe, cwnd etc */
164 /* if we were blocked before, we may now send cwnd=1 packet */
182 * ccid2_update_used_window - Track how much of cwnd is actually used
197 /* don't reduce cwnd below the initial window (IW) */
216 u32 cwnd = hc->tx_cwnd, restart_cwnd, local
219 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
221 /* don't reduce cwnd belo
[all...]
/net/sunrpc/xprtrdma/
H A Drpc_rdma.c736 unsigned long cwnd; local
866 cwnd = xprt->cwnd;
867 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
868 if (xprt->cwnd > cwnd)

Completed in 147 milliseconds