Lines Matching refs:skb

512 	if (tx_buffer->skb) {
516 dev_kfree_skb_any(tx_buffer->skb);
530 tx_buffer->skb = NULL;
704 /* free the skb */
705 dev_consume_skb_any(tx_buf->skb);
707 /* unmap skb header data */
714 tx_buf->skb = NULL;
1003 if (rx_bi->skb) {
1004 dev_kfree_skb(rx_bi->skb);
1005 rx_bi->skb = NULL;
1117 struct sk_buff *skb;
1126 skb = bi->skb;
1128 if (!skb) {
1129 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1131 if (!skb) {
1136 skb_record_rx_queue(skb, rx_ring->queue_index);
1137 bi->skb = skb;
1142 skb->data,
1199 * @skb: packet to send up
1203 struct sk_buff *skb, u16 vlan_tag)
1210 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1213 netif_rx(skb);
1215 napi_gro_receive(&q_vector->napi, skb);
1219 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1221 * @skb: skb currently being received and modified
1227 struct sk_buff *skb,
1244 skb->ip_summed = CHECKSUM_NONE;
1296 skb->transport_header = skb->mac_header +
1298 (ip_hdr(skb)->ihl * 4);
1301 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1302 skb->protocol == htons(ETH_P_8021AD))
1305 rx_udp_csum = udp_csum(skb);
1306 iph = ip_hdr(skb);
1309 (skb->len - skb_transport_offset(skb)),
1312 if (udp_hdr(skb)->check != csum)
1316 skb->ip_summed = CHECKSUM_UNNECESSARY;
1317 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1398 struct sk_buff *skb;
1406 skb = rx_bi->skb;
1407 prefetch(skb->data);
1423 rx_bi->skb = NULL;
1432 * If this is an skb from previous receive dma will be 0
1446 skb_put(skb, len);
1457 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1462 skb->len += rx_packet_len;
1463 skb->data_len += rx_packet_len;
1464 skb->truesize += rx_packet_len;
1487 rx_bi->skb = next_buffer->skb;
1489 next_buffer->skb = skb;
1498 dev_kfree_skb_any(skb);
1505 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1508 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1515 total_rx_bytes += skb->len;
1518 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1520 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1526 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1527 dev_kfree_skb_any(skb);
1531 i40e_receive_skb(rx_ring, skb, vlan_tag);
1644 * @skb: send buffer
1648 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1672 hdr.network = skb_network_header(skb);
1751 * @skb: send buffer
1755 * Checks the skb and set up correspondingly several generic transmit flags
1762 int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1766 static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1771 __be16 protocol = skb->protocol;
1775 if (vlan_tx_tag_present(skb)) {
1776 tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1781 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1792 (skb->priority != TC_PRIO_CONTROL)) {
1794 tx_flags |= (skb->priority & 0x7) <<
1800 rc = skb_cow_head(skb, 0);
1803 vhdr = (struct vlan_ethhdr *)skb->data;
1817 * @skb: ptr to the skb we're sending
1825 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1836 if (!skb_is_gso(skb))
1839 err = skb_cow_head(skb, 0);
1844 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1845 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1850 } else if (skb_is_gso_v6(skb)) {
1852 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1853 : ipv6_hdr(skb);
1854 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1860 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
1861 *hdr_len = (skb->encapsulation
1862 ? (skb_inner_transport_header(skb) - skb->data)
1863 : skb_transport_offset(skb)) + l4len;
1867 cd_tso_len = skb->len - *hdr_len;
1868 cd_mss = skb_shinfo(skb)->gso_size;
1879 * @skb: ptr to the skb we're sending
1884 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1889 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1902 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1903 pf->ptp_tx_skb = skb_get(skb);
1916 * @skb: send buffer
1922 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1933 if (skb->encapsulation) {
1934 network_hdr_len = skb_inner_network_header_len(skb);
1935 this_ip_hdr = inner_ip_hdr(skb);
1936 this_ipv6_hdr = inner_ipv6_hdr(skb);
1937 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
1943 ip_hdr(skb)->check = 0;
1951 ip_hdr(skb)->check = 0;
1959 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
1962 ((skb_inner_network_offset(skb) -
1963 skb_transport_offset(skb)) >> 1) <<
1967 network_hdr_len = skb_network_header_len(skb);
1968 this_ip_hdr = ip_hdr(skb);
1969 this_ipv6_hdr = ipv6_hdr(skb);
1970 this_tcp_hdrlen = tcp_hdrlen(skb);
1996 *td_offset |= (skb_network_offset(skb) >> 1) <<
2099 * @skb: send buffer
2107 void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2111 static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2116 unsigned int data_len = skb->data_len;
2117 unsigned int size = skb_headlen(skb);
2133 gso_segs = skb_shinfo(skb)->gso_segs;
2138 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2140 first->skb = skb;
2143 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2148 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2241 if (!skb->xmit_more ||
2267 * @skb: send buffer
2270 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2275 int i40e_xmit_descriptor_count(struct sk_buff *skb,
2278 static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2291 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2292 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2294 count += TXD_USE_COUNT(skb_headlen(skb));
2304 * @skb: send buffer
2309 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2322 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2326 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2329 /* obtain protocol of skb */
2330 protocol = vlan_get_protocol(skb);
2341 tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
2349 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2354 skb_tx_timestamp(skb);
2360 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2363 i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
2374 i40e_atr(tx_ring, skb, tx_flags, protocol);
2376 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2382 dev_kfree_skb_any(skb);
2388 * @skb: send buffer
2393 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2397 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2402 if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
2403 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
2405 skb->len = I40E_MIN_TX_LEN;
2406 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
2409 return i40e_xmit_frame_ring(skb, tx_ring);