Searched refs:backlog (Results 1 - 25 of 40) sorted by relevance

12

/net/core/
H A Dgen_stats.c228 qstats->backlog += qcpu->backlog;
244 qstats->backlog = q->backlog;
279 d->tc_stats.backlog = qstats.backlog;
/net/sched/
H A Dsch_gred.c40 u32 backlog; /* bytes on the virtualQ */ member in struct:gred_sched_data
118 return sch->qstats.backlog;
120 return q->backlog;
232 if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
233 q->backlog += qdisc_pkt_len(skb);
258 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
261 q->backlog -= qdisc_pkt_len(skb);
264 if (!sch->qstats.backlog)
267 if (!q->backlog)
290 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\
[all...]
H A Dsch_sfq.c113 unsigned int backlog; member in struct:sfq_slot
330 slot->backlog -= len;
397 slot->backlog = 0; /* should already be 0 anyway... */
404 slot->backlog);
455 sch->qstats.backlog -= delta;
456 slot->backlog -= delta;
465 slot->backlog += qdisc_pkt_len(skb);
524 slot->backlog -= qdisc_pkt_len(skb);
575 slot->backlog = 0;
605 slot->backlog);
[all...]
H A Dsch_mqprio.c239 sch->qstats.backlog += qdisc->qstats.backlog;
350 qstats.backlog += qdisc->qstats.backlog;
H A Dsch_plug.c93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
H A Dsch_mq.c115 sch->qstats.backlog += qdisc->qstats.backlog;
H A Dsch_red.c67 child->qstats.backlog);
271 sch->qstats.backlog = q->qdisc->qstats.backlog;
H A Dsch_fifo.c24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
H A Dsch_pie.c119 if (sch->qstats.backlog < 2 * mtu)
248 int qlen = sch->qstats.backlog; /* current queue size in bytes */
313 u32 qlen = sch->qstats.backlog; /* queue size in bytes */
H A Dsch_sfb.c573 sch->qstats.backlog = q->qdisc->qstats.backlog;
H A Dsch_tbf.c103 u32 limit; /* Maximal length of backlog: bytes */
456 sch->qstats.backlog = q->qdisc->qstats.backlog;
H A Dsch_hfsc.c706 * first child for a new parent backlog period.
1373 cl->qstats.backlog = cl->qdisc->qstats.backlog;
1568 sch->qstats.backlog = 0;
1571 sch->qstats.backlog += cl->qdisc->qstats.backlog;
/net/dccp/
H A Dproto.c234 static inline int dccp_listen_start(struct sock *sk, int backlog) argument
242 return inet_csk_listen_start(sk, backlog);
918 int inet_dccp_listen(struct socket *sock, int backlog) argument
935 * we can only allow the backlog to be adjusted.
942 err = dccp_listen_start(sk, backlog);
946 sk->sk_max_ack_backlog = backlog;
1048 * It is the last release_sock in its life. It will remove backlog.
1061 /* Have we already been destroyed by a softirq or backlog? */
H A Ddccp.h319 int inet_dccp_listen(struct socket *sock, int backlog);
/net/atm/
H A Dsvc.c281 static int svc_listen(struct socket *sock, int backlog) argument
314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
/net/sunrpc/
H A Dxprt.c10 * one is available. Otherwise, it sleeps on the backlog queue
945 xprt->stat.bklog_u += xprt->backlog.qlen;
967 rpc_sleep_on(&xprt->backlog, task, NULL);
972 if (rpc_wake_up_next(&xprt->backlog) == NULL)
984 rpc_sleep_on(&xprt->backlog, task, NULL);
1136 * backlog queue.
1160 * backlog queue.
1285 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1358 rpc_destroy_wait_queue(&xprt->backlog);
/net/llc/
H A Daf_llc.c500 * @backlog: Number of connections to queue.
505 static int llc_ui_listen(struct socket *sock, int backlog) argument
520 if (!(unsigned int)backlog) /* BSDism */
521 backlog = 1;
522 sk->sk_max_ack_backlog = backlog;
761 /* Well, if we have backlog, try to process it now yet. */
801 if (copied >= target) { /* Do not sleep, just process backlog. */
/net/bluetooth/rfcomm/
H A Dsock.c420 static int rfcomm_sock_listen(struct socket *sock, int backlog) argument
425 BT_DBG("sk %p backlog %d", sk, backlog);
460 sk->sk_max_ack_backlog = backlog;
967 /* Check for backlog size */
969 BT_DBG("backlog full %d", parent->sk_ack_backlog);
/net/bluetooth/
H A Dsco.c579 static int sco_sock_listen(struct socket *sock, int backlog) argument
585 BT_DBG("sk %p backlog %d", sk, backlog);
606 sk->sk_max_ack_backlog = backlog;
H A Dl2cap_sock.c251 static int l2cap_sock_listen(struct socket *sock, int backlog) argument
257 BT_DBG("sk %p backlog %d", sk, backlog);
285 sk->sk_max_ack_backlog = backlog;
1177 /* Check for backlog size */
1179 BT_DBG("backlog full %d", parent->sk_ack_backlog);
/net/nfc/
H A Dllcp_sock.c197 static int llcp_sock_listen(struct socket *sock, int backlog) argument
202 pr_debug("sk %p backlog %d\n", sk, backlog);
212 sk->sk_max_ack_backlog = backlog;
/net/rxrpc/
H A Daf_rxrpc.c188 static int rxrpc_listen(struct socket *sock, int backlog) argument
194 _enter("%p,%d", rx, backlog);
209 sk->sk_max_ack_backlog = backlog;
/net/ipv4/
H A Daf_inet.c205 int inet_listen(struct socket *sock, int backlog) argument
222 * we can only allow the backlog to be adjusted.
235 err = fastopen_init_queue(sk, backlog);
245 err = inet_csk_listen_start(sk, backlog);
249 sk->sk_max_ack_backlog = backlog;
/net/
H A DKconfig303 backlog reaches netdev_max_backlog. If a few out of many active flows
/net/phonet/
H A Dsocket.c404 static int pn_socket_listen(struct socket *sock, int backlog) argument
422 sk->sk_max_ack_backlog = backlog;

Completed in 260 milliseconds

12