Lines Matching refs:skb

192 				 struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
194 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
304 struct sk_buff *skb)
306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
307 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
308 u16 protocol = ntohs(skb->protocol);
321 vh = (struct vlan_ethhdr *)skb->data;
323 } else if (vlan_tx_tag_present(skb)) {
324 vlan_id = vlan_tx_tag_get(skb);
372 struct sk_buff *skb,
384 if (skb_is_gso(skb)) {
385 inner_hdr_len = skb_inner_transport_header(skb) +
386 inner_tcp_hdrlen(skb) -
387 skb_inner_mac_header(skb);
390 outer_hdr_len = skb_transport_offset(skb) + 8 +
396 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
408 tx_ring->cmd_buf_arr[producer].skb = NULL;
409 skb_copy_from_linear_data_offset(skb, copied,
425 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
426 if (inner_ip_hdr(skb)->version == 6) {
427 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
430 if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
439 if (ip_hdr(skb)->version == 6)
443 encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
446 encap_descr |= skb_network_offset(skb) << 10;
449 first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
450 skb->data;
451 first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
459 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
467 u16 protocol = ntohs(skb->protocol);
471 vh = (struct vlan_ethhdr *)skb->data;
475 } else if (vlan_tx_tag_present(skb)) {
477 vlan_tci = vlan_tx_tag_get(skb);
492 if (*(skb->data) & BIT_0) {
494 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
497 if (skb_is_gso(skb)) {
498 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
499 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
519 tx_ring->cmd_buf_arr[producer].skb = NULL;
525 skb_copy_from_linear_data(skb, vh, 12);
529 skb_copy_from_linear_data_offset(skb, 12,
541 tx_ring->cmd_buf_arr[producer].skb = NULL;
542 skb_copy_from_linear_data_offset(skb, copied,
554 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
556 l4proto = ip_hdr(skb)->protocol;
563 l4proto = ipv6_hdr(skb)->nexthdr;
571 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
572 first_desc->ip_hdr_offset += skb_network_offset(skb);
578 static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
586 nr_frags = skb_shinfo(skb)->nr_frags;
589 map = pci_map_single(pdev, skb->data, skb_headlen(skb),
595 nf->length = skb_headlen(skb);
598 frag = &skb_shinfo(skb)->frags[i];
618 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
624 static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
628 int i, nr_frags = skb_shinfo(skb)->nr_frags;
636 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
637 pbuf->skb = NULL;
647 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
667 phdr = (struct ethhdr *)skb->data;
672 tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
675 frag_count = skb_shinfo(skb)->nr_frags + 1;
680 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
682 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
684 if (!__pskb_pull_tail(skb, delta))
687 frag_count = 1 + skb_shinfo(skb)->nr_frags;
707 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
712 pbuf->skb = skb;
715 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
726 tx_ring->cmd_buf_arr[producer].skb = NULL;
750 protocol = ntohs(skb->protocol);
752 l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
754 l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
757 if (!skb->encapsulation || !l4_is_udp ||
759 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
764 skb, tx_ring)))
769 qlcnic_send_filter(adapter, first_desc, skb);
771 tx_ring->tx_stats.tx_bytes += skb->len;
779 qlcnic_unmap_buffers(pdev, skb, pbuf);
782 dev_kfree_skb_any(skb);
814 struct sk_buff *skb;
818 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
819 if (!skb) {
824 skb_reserve(skb, NET_IP_ALIGN);
825 dma = pci_map_single(pdev, skb->data,
830 dev_kfree_skb_any(skb);
834 buffer->skb = skb;
858 if (!buffer->skb) {
901 if (buffer->skb) {
913 dev_kfree_skb_any(buffer->skb);
914 buffer->skb = NULL;
1131 struct sk_buff *skb;
1134 if (unlikely(buffer->skb == NULL)) {
1142 skb = buffer->skb;
1146 skb->ip_summed = CHECKSUM_UNNECESSARY;
1148 skb_checksum_none_assert(skb);
1152 buffer->skb = NULL;
1154 return skb;
1158 struct sk_buff *skb, u16 *vlan_tag)
1162 if (!__vlan_get_tag(skb, vlan_tag)) {
1163 eth_hdr = (struct ethhdr *)skb->data;
1164 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1165 skb_pull(skb, VLAN_HLEN);
1189 struct sk_buff *skb;
1208 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1209 if (!skb)
1215 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1219 skb_put(skb, rds_ring->skb_size);
1221 skb_put(skb, length);
1224 skb_pull(skb, pkt_offset);
1226 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1228 dev_kfree_skb(skb);
1232 skb->protocol = eth_type_trans(skb, netdev);
1235 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1237 napi_gro_receive(&sds_ring->napi, skb);
1256 struct sk_buff *skb;
1284 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1285 if (!skb)
1291 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
1299 skb_put(skb, lro_length + data_offset);
1300 skb_pull(skb, l2_hdr_offset);
1302 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1304 dev_kfree_skb(skb);
1308 skb->protocol = eth_type_trans(skb, netdev);
1310 if (ntohs(skb->protocol) == ETH_P_IPV6) {
1311 ipv6h = (struct ipv6hdr *)skb->data;
1312 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1316 iph = (struct iphdr *)skb->data;
1317 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1325 length = skb->len;
1328 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1329 if (skb->protocol == htons(ETH_P_IPV6))
1330 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1332 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1336 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1337 netif_receive_skb(skb);
1441 if (!buffer->skb) {
1466 static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1469 unsigned char *data = skb->data;
1472 for (i = 0; i < skb->len; i++) {
1483 struct sk_buff *skb;
1500 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1501 if (!skb)
1505 skb_put(skb, rds_ring->skb_size);
1507 skb_put(skb, length);
1510 skb_pull(skb, pkt_offset);
1512 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1515 dump_skb(skb, adapter);
1517 dev_kfree_skb_any(skb);
1712 struct sk_buff *skb;
1730 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1731 if (!skb)
1735 skb_put(skb, rds_ring->skb_size);
1737 skb_put(skb, length);
1739 err = qlcnic_check_rx_tagging(adapter, skb, &vid);
1743 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
1748 dev_kfree_skb(skb);
1752 skb->protocol = eth_type_trans(skb, netdev);
1755 skb->ip_summed == CHECKSUM_UNNECESSARY) {
1756 skb->csum_level = 1;
1761 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1763 napi_gro_receive(&sds_ring->napi, skb);
1778 struct sk_buff *skb;
1806 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1807 if (!skb)
1815 skb_put(skb, lro_length + data_offset);
1816 skb_pull(skb, l2_hdr_offset);
1818 err = qlcnic_check_rx_tagging(adapter, skb, &vid);
1822 qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
1827 dev_kfree_skb(skb);
1831 skb->protocol = eth_type_trans(skb, netdev);
1832 if (ntohs(skb->protocol) == ETH_P_IPV6) {
1833 ipv6h = (struct ipv6hdr *)skb->data;
1834 th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
1839 iph = (struct iphdr *)skb->data;
1840 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1847 length = skb->len;
1851 skb_shinfo(skb)->gso_size = gso_size;
1852 if (skb->protocol == htons(ETH_P_IPV6))
1853 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1855 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1859 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1861 netif_receive_skb(skb);
2154 struct sk_buff *skb;
2168 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
2169 if (!skb)
2173 skb_put(skb, rds_ring->skb_size);
2175 skb_put(skb, length);
2177 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
2180 dump_skb(skb, adapter);
2182 dev_kfree_skb_any(skb);