Searched refs:skb (Results 1 - 25 of 881) sorted by relevance

1234567891011>>

/net/ipv6/
H A Dxfrm6_output.c23 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, argument
26 return ip6_find_1stfragopt(skb, prevhdr);
30 static int xfrm6_local_dontfrag(struct sk_buff *skb) argument
33 struct sock *sk = skb->sk;
47 static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) argument
50 struct sock *sk = skb->sk;
53 fl6.daddr = ipv6_hdr(skb)->daddr;
58 void xfrm6_local_error(struct sk_buff *skb, u32 mtu) argument
62 struct sock *sk = skb->sk;
64 hdr = skb
71 xfrm6_tunnel_check_size(struct sk_buff *skb) argument
95 xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb) argument
108 xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) argument
122 xfrm6_output_finish(struct sk_buff *skb) argument
134 __xfrm6_output(struct sk_buff *skb) argument
168 xfrm6_output(struct sock *sk, struct sk_buff *skb) argument
[all...]
H A Dtcpv6_offload.c19 struct sk_buff *skb)
22 if (!NAPI_GRO_CB(skb)->flush &&
23 skb_gro_checksum_validate(skb, IPPROTO_TCP,
25 NAPI_GRO_CB(skb)->flush = 1;
29 return tcp_gro_receive(head, skb);
32 static int tcp6_gro_complete(struct sk_buff *skb, int thoff) argument
34 const struct ipv6hdr *iph = ipv6_hdr(skb);
35 struct tcphdr *th = tcp_hdr(skb);
37 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
39 skb_shinfo(skb)
18 tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) argument
44 tcp6_gso_segment(struct sk_buff *skb, netdev_features_t features) argument
[all...]
H A Dudp_offload.c20 static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, argument
33 mss = skb_shinfo(skb)->gso_size;
34 if (unlikely(skb->len <= mss))
37 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
39 int type = skb_shinfo(skb)->gso_type;
53 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
59 if (skb->encapsulation && skb_shinfo(skb)->gso_type &
61 segs = skb_udp_tunnel_segment(skb, feature
123 udp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) argument
151 udp6_gro_complete(struct sk_buff *skb, int nhoff) argument
[all...]
H A Dxfrm6_mode_transport.c22 static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) argument
28 iph = ipv6_hdr(skb);
30 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
31 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
32 skb_set_network_header(skb, -x->props.header_len);
33 skb->transport_header = skb->network_header + hdr_len;
34 __skb_pull(skb, hdr_len);
35 memmove(ipv6_hdr(skb), ip
47 xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) argument
[all...]
H A Dip6_input.c50 int ip6_rcv_finish(struct sk_buff *skb) argument
52 if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
55 ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
57 ipprot->early_demux(skb);
59 if (!skb_dst(skb))
60 ip6_route_input(skb);
62 return dst_input(skb);
65 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) argument
70 struct net *net = dev_net(skb
201 ip6_input_finish(struct sk_buff *skb) argument
277 ip6_input(struct sk_buff *skb) argument
283 ip6_mc_input(struct sk_buff *skb) argument
[all...]
/net/ipv4/
H A Dxfrm4_output.c21 static int xfrm4_tunnel_check_size(struct sk_buff *skb) argument
25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
31 mtu = dst_mtu(skb_dst(skb));
32 if (skb->len > mtu) {
33 if (skb->sk)
34 xfrm_local_error(skb, mtu);
36 icmp_send(skb, ICMP_DEST_UNREACH,
44 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb) argument
57 xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) argument
71 xfrm4_output_finish(struct sk_buff *skb) argument
83 __xfrm4_output(struct sk_buff *skb) argument
97 xfrm4_output(struct sock *sk, struct sk_buff *skb) argument
104 xfrm4_local_error(struct sk_buff *skb, u32 mtu) argument
[all...]
H A Dxfrm4_mode_transport.c21 static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) argument
23 struct iphdr *iph = ip_hdr(skb);
26 skb_set_network_header(skb, -x->props.header_len);
27 skb->mac_header = skb->network_header +
29 skb->transport_header = skb->network_header + ihl;
30 __skb_pull(skb, ihl);
31 memmove(skb_network_header(skb), iph, ihl);
39 * On entry, skb
43 xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) argument
[all...]
H A Dxfrm4_input.c20 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) argument
22 return xfrm4_extract_header(skb);
25 static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) argument
27 if (skb_dst(skb) == NULL) {
28 const struct iphdr *iph = ip_hdr(skb);
30 if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
31 iph->tos, skb->dev))
34 return dst_input(skb);
36 kfree_skb(skb);
40 int xfrm4_transport_finish(struct sk_buff *skb, in argument
67 xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) argument
153 xfrm4_rcv(struct sk_buff *skb) argument
[all...]
H A Dip_forward.c42 static bool ip_may_fragment(const struct sk_buff *skb) argument
44 return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
45 skb->ignore_df;
48 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) argument
50 if (skb->len <= mtu)
53 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
60 static int ip_forward_finish(struct sk_buff *skb) argument
62 struct ip_options *opt = &(IPCB(skb)->opt);
64 IP_INC_STATS_BH(dev_net(skb_dst(skb)
73 ip_forward(struct sk_buff *skb) argument
[all...]
H A Dtcp_offload.c17 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, argument
20 while (skb) {
22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
23 skb_shinfo(skb)->tskey = ts_seq;
27 skb = skb->next;
32 struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, argument
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
39 const struct iphdr *iph = ip_hdr(skb);
54 tcp_gso_segment(struct sk_buff *skb, netdev_features_t features) argument
178 tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) argument
274 tcp_gro_complete(struct sk_buff *skb) argument
291 tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) argument
304 tcp4_gro_complete(struct sk_buff *skb, int thoff) argument
[all...]
H A Dgre_offload.c18 static struct sk_buff *gre_gso_segment(struct sk_buff *skb, argument
25 u16 mac_offset = skb->mac_header;
26 int mac_len = skb->mac_len;
27 __be16 protocol = skb->protocol;
31 if (unlikely(skb_shinfo(skb)->gso_type &
42 if (!skb->encapsulation)
45 if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
48 greh = (struct gre_base_hdr *)skb_transport_header(skb);
50 ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
118 gre_gro_receive(struct sk_buff **head, struct sk_buff *skb) argument
226 gre_gro_complete(struct sk_buff *skb, int nhoff) argument
[all...]
H A Dxfrm4_mode_tunnel.c18 static inline void ipip_ecn_decapsulate(struct sk_buff *skb) argument
20 struct iphdr *inner_iph = ipip_hdr(skb);
22 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
30 static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) argument
32 struct dst_entry *dst = skb_dst(skb);
36 skb_set_network_header(skb, -x->props.header_len);
37 skb->mac_header = skb->network_header +
39 skb->transport_header = skb
71 xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) argument
[all...]
/net/bridge/
H A Dbr_forward.c25 struct sk_buff *skb,
27 struct sk_buff *skb));
31 const struct sk_buff *skb)
33 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
34 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
38 int br_dev_queue_push_xmit(struct sk_buff *skb) argument
41 if (nf_bridge_maybe_copy_header(skb) ||
42 !is_skb_forwardable(skb->dev, skb)) {
43 kfree_skb(skb);
30 should_deliver(const struct net_bridge_port *p, const struct sk_buff *skb) argument
54 br_forward_finish(struct sk_buff *skb) argument
62 __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) argument
84 __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) argument
106 br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) argument
118 br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) argument
132 deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) argument
149 maybe_deliver( struct net_bridge_port *prev, struct net_bridge_port *p, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) argument
172 br_flood(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb), bool unicast) argument
208 br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) argument
214 br_flood_forward(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb2, bool unicast) argument
222 br_multicast_flood(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)( const struct net_bridge_port *p, struct sk_buff *skb)) argument
271 br_multicast_deliver(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb) argument
278 br_multicast_forward(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb2) argument
[all...]
H A Dbr_netfilter.c45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
67 #define IS_IP(skb) \
68 (!vlan_tx_tag_present(skb)
76 vlan_proto(const struct sk_buff *skb) argument
98 pppoe_proto(const struct sk_buff *skb) argument
130 nf_bridge_alloc(struct sk_buff *skb) argument
139 nf_bridge_unshare(struct sk_buff *skb) argument
156 nf_bridge_push_encap_header(struct sk_buff *skb) argument
164 nf_bridge_pull_encap_header(struct sk_buff *skb) argument
172 nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb) argument
180 nf_bridge_save_header(struct sk_buff *skb) argument
193 br_parse_ip_options(struct sk_buff *skb) argument
244 br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) argument
276 br_nf_pre_routing_finish_bridge(struct sk_buff *skb) argument
354 br_nf_pre_routing_finish(struct sk_buff *skb) argument
433 brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev) argument
448 setup_pre_routing(struct sk_buff *skb) argument
471 check_hbh_len(struct sk_buff *skb) argument
527 br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) argument
577 br_nf_pre_routing(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) argument
636 br_nf_local_in(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) argument
647 br_nf_forward_finish(struct sk_buff *skb) argument
675 br_nf_forward_ip(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) argument
729 br_nf_forward_arp(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) argument
766 br_nf_dev_queue_xmit(struct sk_buff *skb) argument
789 br_nf_dev_queue_xmit(struct sk_buff *skb) argument
796 br_nf_post_routing(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) argument
842 ip_sabotage_in(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) argument
[all...]
/net/rose/
H A Drose_loopback.c35 int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) argument
39 skbn = skb_clone(skb, GFP_ATOMIC);
41 kfree_skb(skb);
68 struct sk_buff *skb; local
75 while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
76 if (skb->len < ROSE_MIN_LEN) {
77 kfree_skb(skb);
80 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
81 frametype = skb
116 struct sk_buff *skb; local
[all...]
/net/dsa/
H A Dtag_trailer.c16 static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) argument
24 dev->stats.tx_bytes += skb->len;
33 if (skb->len < 60)
34 padlen = 60 - skb->len;
36 nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
38 kfree_skb(skb);
44 skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
45 skb_set_transport_header(nskb, skb_transport_header(skb) - skb
68 trailer_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) argument
[all...]
H A Dtag_brcm.c61 static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev) argument
67 dev->stats.tx_bytes += skb->len;
69 if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
72 skb_push(skb, BRCM_TAG_LEN);
74 memmove(skb->data, skb->data + BRCM_TAG_LEN, 2 * ETH_ALEN);
77 brcm_tag = skb->data + 2 * ETH_ALEN;
83 ((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK);
93 skb->dev = p->parent->dst->master_netdev;
94 dev_queue_xmit(skb);
103 brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) argument
[all...]
H A Dtag_edsa.c19 static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) argument
25 dev->stats.tx_bytes += skb->len;
33 if (skb->protocol == htons(ETH_P_8021Q)) {
34 if (skb_cow_head(skb, DSA_HLEN) < 0)
36 skb_push(skb, DSA_HLEN);
38 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
43 edsa_header = skb->data + 2 * ETH_ALEN;
59 if (skb_cow_head(skb, EDSA_HLEN) < 0)
61 skb_push(skb, EDSA_HLE
91 edsa_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) argument
[all...]
H A Dtag_dsa.c18 static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) argument
24 dev->stats.tx_bytes += skb->len;
31 if (skb->protocol == htons(ETH_P_8021Q)) {
32 if (skb_cow_head(skb, 0) < 0)
38 dsa_header = skb->data + 2 * ETH_ALEN;
50 if (skb_cow_head(skb, DSA_HLEN) < 0)
52 skb_push(skb, DSA_HLEN);
54 memmove(skb->data, skb->data + DSA_HLEN, 2 * ETH_ALEN);
59 dsa_header = skb
78 dsa_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) argument
[all...]
/net/llc/
H A Dllc_output.c25 * @skb: Address of the frame to initialize its MAC header
32 int llc_mac_hdr_init(struct sk_buff *skb, argument
37 switch (skb->dev->type) {
40 rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
41 skb->len);
54 * @skb: packet to send
65 int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, argument
69 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, sap->laddr.lsap,
71 llc_pdu_init_as_ui_cmd(skb);
[all...]
H A Dllc_s_ac.c32 * @skb: the event to forward
37 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) argument
39 llc_sap_rtn_pdu(sap, skb);
46 * @skb: the event to send
52 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) argument
54 struct llc_sap_state_ev *ev = llc_sap_ev(skb);
57 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
59 llc_pdu_init_as_ui_cmd(skb);
60 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
62 rc = dev_queue_xmit(skb);
75 llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) argument
97 llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb) argument
129 llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) argument
143 llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb) argument
177 llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb) argument
190 llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb) argument
204 llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb) argument
[all...]
/net/core/
H A Dtimestamping.c26 static unsigned int classify(const struct sk_buff *skb) argument
28 if (likely(skb->dev && skb->dev->phydev &&
29 skb->dev->phydev->drv))
30 return ptp_classify_raw(skb);
35 void skb_clone_tx_timestamp(struct sk_buff *skb) argument
41 if (!skb->sk)
44 type = classify(skb);
48 phydev = skb->dev->phydev;
50 clone = skb_clone_sk(skb);
58 skb_defer_rx_timestamp(struct sk_buff *skb) argument
[all...]
/net/x25/
H A Dx25_dev.c17 * 2000-09-04 Henner Eisen Prevent freeing a dangling skb.
31 static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) argument
37 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
40 frametype = skb->data[2];
41 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
48 x25_link_control(skb, nb, frametype);
58 skb_reset_transport_header(skb);
61 queued = x25_process_rx_frame(sk, skb);
63 queued = !sk_add_backlog(sk, skb, s
99 x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) argument
153 struct sk_buff *skb; local
182 struct sk_buff *skb; local
206 x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb) argument
[all...]
/net/lapb/
H A Dlapb_subr.c53 struct sk_buff *skb; local
63 skb = skb_dequeue(&lapb->ack_queue);
64 kfree_skb(skb);
71 struct sk_buff *skb, *skb_prev = NULL; local
78 while ((skb = skb_dequeue(&lapb->ack_queue)) != NULL) {
80 skb_queue_head(&lapb->write_queue, skb);
82 skb_append(skb_prev, skb, &lapb->write_queue);
83 skb_prev = skb;
111 int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, argument
118 skb
231 struct sk_buff *skb; local
267 struct sk_buff *skb; local
[all...]
/net/ieee802154/
H A Dieee802154.h52 int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info);
53 int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
54 int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info);
55 int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info);
62 int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info);
63 int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info);
64 int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info);
65 int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info);
66 int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info);
67 int ieee802154_list_iface(struct sk_buff *skb, struc
[all...]

Completed in 204 milliseconds

1234567891011>>