/net/llc/ |
H A D | llc_station.c | 52 struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, local 55 if (!nskb) 60 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); 61 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127); 62 rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); 65 dev_queue_xmit(nskb); 69 kfree_skb(nskb); 78 struct sk_buff *nskb; local 82 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); 84 if (!nskb) [all...] |
H A D | llc_c_ac.c | 202 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); local 204 if (nskb) { 207 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, 209 llc_pdu_init_as_disc_cmd(nskb, 1); 210 rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); 213 llc_conn_send_pdu(sk, nskb); 219 kfree_skb(nskb); 227 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); local 229 if (nskb) { 234 llc_pdu_header_init(nskb, LLC_PDU_TYPE_ 253 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); local 277 struct sk_buff *nskb; local 311 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, local 338 struct sk_buff *nskb; local 431 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); local 466 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 490 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 514 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 538 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 562 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 586 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 622 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 646 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 670 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 695 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 719 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 743 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local 777 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); local 806 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); local 963 struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); local [all...] |
H A D | llc_s_ac.c | 101 struct sk_buff *nskb; local 106 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, 108 if (!nskb) 110 llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, 112 llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0); 113 rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); 115 rc = dev_queue_xmit(nskb); 146 struct sk_buff *nskb; local 156 nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); 157 if (!nskb) [all...] |
/net/dsa/ |
H A D | tag_trailer.c | 20 struct sk_buff *nskb; local 37 nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC); 38 if (nskb == NULL) { 42 skb_reserve(nskb, NET_IP_ALIGN); 44 skb_reset_mac_header(nskb); 45 skb_set_network_header(nskb, skb_network_header(skb) - skb->head); 46 skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); 47 skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); 51 u8 *pad = skb_put(nskb, padlen); 55 trailer = skb_put(nskb, [all...] |
/net/ipv4/netfilter/ |
H A D | ipt_REJECT.c | 38 struct sk_buff *nskb; local 65 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 67 if (!nskb) 70 skb_reserve(nskb, LL_MAX_HEADER); 72 skb_reset_network_header(nskb); 73 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); 84 skb_reset_transport_header(nskb); 85 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 103 nskb->ip_summed = CHECKSUM_PARTIAL; 104 nskb [all...] |
/net/ipv6/netfilter/ |
H A D | ip6t_REJECT.c | 44 struct sk_buff *nskb; local 114 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) 118 if (!nskb) { 124 skb_dst_set(nskb, dst); 126 skb_reserve(nskb, hh_len + dst->header_len); 128 skb_put(nskb, sizeof(struct ipv6hdr)); 129 skb_reset_network_header(nskb); 130 ip6h = ipv6_hdr(nskb); 137 skb_reset_transport_header(nskb); 138 tcph = (struct tcphdr *)skb_put(nskb, sizeo [all...] |
/net/bluetooth/cmtp/ |
H A D | core.c | 108 struct sk_buff *skb = session->reassembly[id], *nskb; local 115 nskb = alloc_skb(size, GFP_ATOMIC); 116 if (!nskb) { 122 skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len); 124 memcpy(skb_put(nskb, count), buf, count); 126 session->reassembly[id] = nskb; 211 struct sk_buff *skb, *nskb; local 217 nskb = alloc_skb(session->mtu, GFP_ATOMIC); 218 if (!nskb) { 226 tail = session->mtu - nskb [all...] |
/net/sctp/ |
H A D | output.c | 395 struct sk_buff *nskb; local 416 nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); 417 if (!nskb) 421 skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); 426 sctp_packet_set_owner_w(nskb, sk); 435 skb_dst_set(nskb, dst); 440 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); 441 skb_reset_transport_header(nskb); 502 auth = skb_tail_pointer(nskb); 505 memcpy(skb_put(nskb, chun [all...] |
/net/bluetooth/bnep/ |
H A D | core.c | 293 struct sk_buff *nskb; local 332 nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); 333 if (!nskb) { 338 skb_reserve(nskb, 2); 343 memcpy(__skb_put(nskb, ETH_HLEN), &s->eh, ETH_HLEN); 347 memcpy(__skb_put(nskb, ETH_ALEN), s->eh.h_dest, ETH_ALEN); 348 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), ETH_ALEN); 349 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); 353 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), 355 memcpy(__skb_put(nskb, ETH_ALE [all...] |
/net/x25/ |
H A D | x25_dev.c | 100 struct sk_buff *nskb; local 106 nskb = skb_copy(skb, GFP_ATOMIC); 107 if (!nskb) 110 skb = nskb;
|
/net/bluetooth/ |
H A D | hci_sock.c | 81 struct sk_buff *nskb; local 127 nskb = skb_clone(skb_copy, GFP_ATOMIC); 128 if (!nskb) 131 if (sock_queue_rcv_skb(sk, nskb)) 132 kfree_skb(nskb); 150 struct sk_buff *nskb; local 162 nskb = skb_clone(skb, GFP_ATOMIC); 163 if (!nskb) 166 if (sock_queue_rcv_skb(sk, nskb)) 167 kfree_skb(nskb); 211 struct sk_buff *nskb; local 257 struct sk_buff *nskb; local [all...] |
/net/core/ |
H A D | skbuff.c | 2791 struct sk_buff *nskb; local 2810 nskb = skb_clone(fskb, GFP_ATOMIC); 2813 if (unlikely(!nskb)) 2816 hsize = skb_end_offset(nskb); 2817 if (skb_cow_head(nskb, doffset + headroom)) { 2818 kfree_skb(nskb); 2822 nskb->truesize += skb_end_offset(nskb) - hsize; 2823 skb_release_head_state(nskb); 2824 __skb_push(nskb, doffse 2945 struct sk_buff *nskb; local [all...] |
H A D | dev.c | 2416 struct sk_buff *nskb = skb->next; local 2418 skb->next = nskb->next; 2419 nskb->next = NULL; 2420 kfree_skb(nskb); 2589 struct sk_buff *nskb = skb->next; local 2591 skb->next = nskb->next; 2592 nskb->next = NULL; 2595 dev_queue_xmit_nit(nskb, dev); 2597 skb_len = nskb->len; 2598 rc = ops->ndo_start_xmit(nskb, de 3776 struct sk_buff *nskb = *pp; local [all...] |
H A D | rtnetlink.c | 1850 struct sk_buff *nskb; local 1875 nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); 1876 if (nskb == NULL) 1879 err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid, 1884 kfree_skb(nskb); 1886 err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
|
/net/xfrm/ |
H A D | xfrm_output.c | 163 struct sk_buff *nskb = segs->next; local 170 while ((segs = nskb)) { 171 nskb = segs->next; 178 segs = nskb;
|
/net/ipv4/ |
H A D | tcp_output.c | 1678 struct sk_buff *skb, *nskb, *next; local 1723 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1725 sk->sk_wmem_queued += nskb->truesize; 1726 sk_mem_charge(sk, nskb->truesize); 1730 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1731 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1732 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 1733 TCP_SKB_CB(nskb)->sacked = 0; 1734 nskb->csum = 0; 1735 nskb 2364 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, local 2625 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); local [all...] |
H A D | ip_output.c | 1482 struct sk_buff *nskb; local 1525 nskb = skb_peek(&sk->sk_write_queue); 1526 if (nskb) { 1528 *((__sum16 *)skb_transport_header(nskb) + 1529 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1531 nskb->ip_summed = CHECKSUM_NONE; 1532 skb_orphan(nskb); 1533 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
|
H A D | tcp_input.c | 4491 struct sk_buff *nskb; local 4500 nskb = alloc_skb(copy + header, GFP_ATOMIC); 4501 if (!nskb) 4504 skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); 4505 skb_set_network_header(nskb, (skb_network_header(skb) - 4507 skb_set_transport_header(nskb, (skb_transport_header(skb) - 4509 skb_reserve(nskb, header); 4510 memcpy(nskb->head, skb->head, header); 4511 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4512 TCP_SKB_CB(nskb) [all...] |
/net/can/ |
H A D | gw.c | 352 struct sk_buff *nskb; local 394 nskb = skb_copy(skb, GFP_ATOMIC); 396 nskb = skb_clone(skb, GFP_ATOMIC); 398 if (!nskb) { 404 cgw_hops(nskb) = cgw_hops(skb) + 1; 405 nskb->dev = gwj->dst.dev; 408 cf = (struct can_frame *)nskb->data; 425 nskb->tstamp.tv64 = 0; 428 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
|
/net/netfilter/ |
H A D | nfnetlink_queue_core.c | 500 struct sk_buff *nskb; local 505 nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr); 506 if (nskb == NULL) { 530 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 531 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); 543 kfree_skb(nskb); 665 struct sk_buff *nskb = segs->next; local 673 segs = nskb; 690 struct sk_buff *nskb; local 699 nskb [all...] |
/net/openvswitch/ |
H A D | datapath.c | 322 struct sk_buff *segs, *nskb; local 353 nskb = skb->next; 358 } while ((skb = nskb)); 396 struct sk_buff *nskb = NULL; local 402 nskb = skb_clone(skb, GFP_ATOMIC); 403 if (!nskb) 406 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb)); [all...] |
/net/iucv/ |
H A D | af_iucv.c | 321 struct sk_buff *nskb; local 367 nskb = skb_clone(skb, GFP_ATOMIC); 368 if (!nskb) 370 skb_queue_tail(&iucv->send_skb_q, nskb); 373 skb_unlink(nskb, &iucv->send_skb_q); 374 kfree_skb(nskb); 1207 struct sk_buff *nskb; local 1216 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); 1217 if (!nskb) 1221 IUCV_SKB_CB(nskb) 2222 struct sk_buff *nskb; local [all...] |
/net/sched/ |
H A D | sch_choke.c | 250 struct sk_buff *nskb, 260 return choke_get_classid(nskb) == choke_get_classid(oskb); 262 return choke_match_flow(oskb, nskb); 249 choke_match_random(const struct choke_sched_data *q, struct sk_buff *nskb, unsigned int *pidx) argument
|
H A D | sch_netem.c | 334 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) argument 337 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; 342 return __skb_queue_tail(list, nskb); 349 __skb_queue_after(list, skb, nskb);
|
/net/nfc/ |
H A D | llcp_core.c | 672 struct sk_buff *skb_copy = NULL, *nskb; local 695 nskb = skb_clone(skb_copy, GFP_ATOMIC); 696 if (!nskb) 699 if (sock_queue_rcv_skb(sk, nskb)) 700 kfree_skb(nskb);
|