/net/rose/ |
H A D | rose_out.c | 51 struct sk_buff *skb, *skbn; local 79 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { 84 skb_set_owner_w(skbn, sk); 89 rose_send_iframe(sk, skbn);
|
H A D | rose_loopback.c | 37 struct sk_buff *skbn; local 39 skbn = skb_clone(skb, GFP_ATOMIC); 43 if (skbn != NULL) { 44 skb_queue_tail(&loopback_queue, skbn);
|
H A D | rose_dev.c | 62 struct sk_buff *skbn; local 69 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { 75 skb_set_owner_w(skbn, skb->sk); 79 len = skbn->len; 81 if (!rose_route_frame(skbn, NULL)) { 82 kfree_skb(skbn);
|
H A D | rose_link.c | 147 struct sk_buff *skbn; local 173 while ((skbn = skb_dequeue(&neigh->queue)) != NULL) 174 if (!rose_send_frame(skbn, neigh)) 175 kfree_skb(skbn);
|
H A D | af_rose.c | 1161 struct sk_buff *skbn; local 1172 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { 1177 skbn->sk = sk; 1178 skbn->free = 1; 1179 skbn->arp = 1; 1181 skb_reserve(skbn, frontlen); 1186 skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); 1190 skb_push(skbn, ROSE_MIN_LEN); 1191 skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); 1194 skbn [all...] |
/net/netrom/ |
H A D | nr_loopback.c | 35 struct sk_buff *skbn; local 37 if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) { 38 skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len); 39 skb_reset_transport_header(skbn); 41 skb_queue_tail(&loopback_queue, skbn);
|
H A D | nr_in.c | 34 struct sk_buff *skbo, *skbn = skb; local 51 if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL) 54 skb_reset_transport_header(skbn); 58 skb_put(skbn, skbo->len), 66 return sock_queue_rcv_skb(sk, skbn); 153 struct sk_buff *skbn; local 236 while ((skbn = skb_dequeue(&nrom->reseq_queue)) != NULL) { 237 ns = skbn->data[17]; 239 if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) { 243 skb_queue_tail(&temp_queue, skbn); [all...] |
H A D | nr_out.c | 37 struct sk_buff *skbn; local 49 if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL) 52 skb_reserve(skbn, frontlen); 57 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); 61 skb_push(skbn, NR_TRANSPORT_LEN); 62 skb_copy_to_linear_data(skbn, transport, 65 skbn->data[4] |= NR_MORE_FLAG; 67 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ 102 struct sk_buff *skb, *skbn; local 108 if ((skbn 128 struct sk_buff *skb, *skbn; local [all...] |
H A D | nr_subr.c | 216 struct sk_buff *skbn; local 222 if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL) 225 skb_reserve(skbn, 0); 227 dptr = skb_put(skbn, NR_NETWORK_LEN + NR_TRANSPORT_LEN); 258 if (!nr_route_frame(skbn, NULL)) 259 kfree_skb(skbn);
|
H A D | nr_route.c | 784 struct sk_buff *skbn; local 837 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) { 844 skb=skbn;
|
/net/ax25/ |
H A D | ax25_in.c | 40 struct sk_buff *skbn, *skbo; local 53 skbn = alloc_skb(AX25_MAX_HEADER_LEN + 56 if (!skbn) { 61 skb_reserve(skbn, AX25_MAX_HEADER_LEN); 63 skbn->dev = ax25->ax25_dev->dev; 64 skb_reset_network_header(skbn); 65 skb_reset_transport_header(skbn); 70 skb_put(skbn, skbo->len), 77 if (ax25_rx_iframe(ax25, skbn) == 0) 78 kfree_skb(skbn); 120 struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC); local [all...] |
H A D | ax25_out.c | 122 struct sk_buff *skbn; local 148 if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) { 155 skb_set_owner_w(skbn, skb->sk); 162 skb_reserve(skbn, frontlen + 2); 163 skb_set_network_header(skbn, 165 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); 166 p = skb_push(skbn, 2); 176 skb_reserve(skbn, frontlen + 1); 177 skb_set_network_header(skbn, 179 skb_copy_from_linear_data(skb, skb_put(skbn, le 247 struct sk_buff *skb, *skbn; local 332 struct sk_buff *skbn; local [all...] |
H A D | ax25_route.c | 462 struct sk_buff *skbn; local 469 if ((skbn = skb_realloc_headroom(skb, len)) == NULL) { 475 skb_set_owner_w(skbn, skb->sk); 479 skb = skbn;
|
/net/lapb/ |
H A D | lapb_out.c | 73 struct sk_buff *skb, *skbn; local 90 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { 96 skb_set_owner_w(skbn, skb->sk); 101 lapb_send_iframe(lapb, skbn, LAPB_POLLOFF);
|
/net/x25/ |
H A D | x25_forward.c | 29 struct sk_buff *skbn; local 79 if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){ 82 x25_transmit_link(skbn, neigh_new); 103 struct sk_buff *skbn; local 124 if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){ 128 x25_transmit_link(skbn, nb);
|
H A D | x25_in.c | 39 struct sk_buff *skbo, *skbn = skb; local 52 if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){ 59 skb_reset_transport_header(skbn); 62 skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len), 71 skb_put(skbn, skbo->len), 79 skb_set_owner_r(skbn, sk); 80 skb_queue_tail(&sk->sk_receive_queue, skbn);
|
H A D | x25_out.c | 54 struct sk_buff *skbn; local 72 skbn = sock_alloc_send_skb(sk, frontlen + max_len, 75 if (!skbn) { 86 skb_reserve(skbn, frontlen); 91 skb_copy_from_linear_data(skb, skb_put(skbn, len), len); 95 skb_push(skbn, header_len); 96 skb_copy_to_linear_data(skbn, header, header_len); 100 skbn->data[3] |= X25_EXT_M_BIT; 102 skbn->data[2] |= X25_STD_M_BIT; 105 skb_queue_tail(&sk->sk_write_queue, skbn); 143 struct sk_buff *skb, *skbn; local [all...] |
H A D | x25_link.c | 77 struct sk_buff *skbn; local 110 while ((skbn = skb_dequeue(&nb->queue)) != NULL) 111 x25_send_frame(skbn, nb);
|