Searched refs:tx_ring (Results 1 - 25 of 197) sorted by relevance

12345678

/drivers/net/ethernet/intel/i40evf/
H A Di40e_txrx.c77 * @tx_ring: ring to be cleaned
79 void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) argument
85 if (!tx_ring->tx_bi)
89 for (i = 0; i < tx_ring->count; i++)
90 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
92 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
93 memset(tx_ring->tx_bi, 0, bi_size);
96 memset(tx_ring->desc, 0, tx_ring
115 i40evf_free_tx_resources(struct i40e_ring *tx_ring) argument
147 i40e_check_tx_hang(struct i40e_ring *tx_ring) argument
188 i40e_get_head(struct i40e_ring *tx_ring) argument
202 i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) argument
445 i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) argument
1117 i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags) argument
1155 i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, __be16 protocol, u8 *hdr_len, u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) argument
1214 i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) argument
1323 i40e_create_tx_ctx(struct i40e_ring *tx_ring, const u64 cd_type_cmd_tso_mss, const u32 cd_tunneling, const u32 cd_l2tag2) argument
1357 i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) argument
1513 __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) argument
1536 i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) argument
1552 i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring) argument
1582 i40e_xmit_frame_ring(struct sk_buff *skb, struct i40e_ring *tx_ring) argument
1659 struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; local
[all...]
/drivers/net/ethernet/intel/i40e/
H A Di40e_txrx.c56 struct i40e_ring *tx_ring; local
73 tx_ring = vsi->tx_rings[0];
74 dev = tx_ring->dev;
78 if (I40E_DESC_UNUSED(tx_ring) > 1)
84 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
93 i = tx_ring->next_to_use;
94 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
95 first = &tx_ring->tx_bi[i];
98 tx_ring->next_to_use = ((i + 1) < tx_ring
539 i40e_clean_tx_ring(struct i40e_ring *tx_ring) argument
575 i40e_free_tx_resources(struct i40e_ring *tx_ring) argument
607 i40e_check_tx_hang(struct i40e_ring *tx_ring) argument
654 i40e_get_head(struct i40e_ring *tx_ring) argument
668 i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) argument
940 i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) argument
1648 i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 flags, __be16 protocol) argument
1762 i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags) argument
1825 i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, __be16 protocol, u8 *hdr_len, u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) argument
1884 i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u64 *cd_type_cmd_tso_mss) argument
1922 i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) argument
2031 i40e_create_tx_ctx(struct i40e_ring *tx_ring, const u64 cd_type_cmd_tso_mss, const u32 cd_tunneling, const u32 cd_l2tag2) argument
2062 __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) argument
2086 i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) argument
2107 i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) argument
2275 i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring) argument
2309 i40e_xmit_frame_ring(struct sk_buff *skb, struct i40e_ring *tx_ring) argument
2397 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; local
[all...]
H A Di40e_fcoe.c996 * @tx_ring: transmit ring for this packet
1010 static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring, argument
1017 struct i40e_pf *pf = tx_ring->vsi->back;
1018 u16 i = tx_ring->next_to_use;
1040 ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i);
1042 if (i == tx_ring->count)
1062 queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++);
1063 if (i == tx_ring->count)
1072 filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i);
1074 if (i == tx_ring
1108 i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_fcoe_ddp *ddp) argument
1148 i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 sof) argument
1209 i40e_fcoe_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, u8 sof) argument
1280 i40e_fcoe_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, u8 hdr_len, u8 eof) argument
1365 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; local
[all...]
H A Di40e_txrx.h287 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
289 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
291 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
295 void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
298 int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
299 int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
301 struct i40e_ring *tx_ring, u32 *flags);
/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_main.c771 static int fm10k_tso(struct fm10k_ring *tx_ring, argument
804 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
810 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
812 netdev_err(tx_ring->netdev,
817 static void fm10k_tx_csum(struct fm10k_ring *tx_ring, argument
837 dev_warn(tx_ring->dev,
839 tx_ring->tx_stats.csum_err++;
858 dev_warn(tx_ring->dev,
862 tx_ring
915 fm10k_tx_desc_push(struct fm10k_ring *tx_ring, struct fm10k_tx_desc *tx_desc, u16 i, dma_addr_t dma, unsigned int size, u8 desc_flags) argument
932 __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) argument
949 fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) argument
956 fm10k_tx_map(struct fm10k_ring *tx_ring, struct fm10k_tx_buffer *first) argument
1080 fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring) argument
1147 fm10k_check_tx_hang(struct fm10k_ring *tx_ring) argument
1196 fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, struct fm10k_ring *tx_ring) argument
[all...]
H A Dfm10k_netdev.c29 * @tx_ring: tx descriptor ring (for a specific queue) to setup
33 int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) argument
35 struct device *dev = tx_ring->dev;
38 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
40 tx_ring->tx_buffer = vzalloc(size);
41 if (!tx_ring->tx_buffer)
44 u64_stats_init(&tx_ring->syncp);
47 tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc);
48 tx_ring
188 fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) argument
220 fm10k_free_tx_resources(struct fm10k_ring *tx_ring) argument
695 struct fm10k_ring *tx_ring = interface->tx_ring[i]; local
[all...]
/drivers/net/ethernet/intel/igbvf/
H A Dnetdev.c434 struct igbvf_ring *tx_ring)
439 size = sizeof(struct igbvf_buffer) * tx_ring->count;
440 tx_ring->buffer_info = vzalloc(size);
441 if (!tx_ring->buffer_info)
445 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
446 tx_ring->size = ALIGN(tx_ring->size, 4096);
448 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring
433 igbvf_setup_tx_resources(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring) argument
512 igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) argument
547 igbvf_free_tx_resources(struct igbvf_ring *tx_ring) argument
790 igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) argument
894 struct igbvf_ring *tx_ring = adapter->tx_ring; local
995 struct igbvf_ring *tx_ring = adapter->tx_ring; local
1279 struct igbvf_ring *tx_ring = adapter->tx_ring; local
1848 struct igbvf_ring *tx_ring = adapter->tx_ring; local
1908 igbvf_tso(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) argument
1985 igbvf_tx_csum(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) argument
2071 igbvf_tx_map_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, struct sk_buff *skb) argument
2144 igbvf_tx_queue_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, int tx_flags, int count, unsigned int first, u32 paylen, u8 hdr_len) argument
2205 igbvf_xmit_frame_ring_adv(struct sk_buff *skb, struct net_device *netdev, struct igbvf_ring *tx_ring) argument
2284 struct igbvf_ring *tx_ring; local
[all...]
/drivers/net/ethernet/qlogic/qlcnic/
H A Dqlcnic_io.c277 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; local
281 producer = tx_ring->producer;
282 hwdesc = &tx_ring->desc_head[tx_ring->producer];
298 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
373 struct qlcnic_host_tx_ring *tx_ring)
377 u32 producer = tx_ring->producer;
407 hwdesc = &tx_ring
370 qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, struct sk_buff *skb, struct qlcnic_host_tx_ring *tx_ring) argument
458 qlcnic_tx_pkt(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, struct sk_buff *skb, struct qlcnic_host_tx_ring *tx_ring) argument
650 struct qlcnic_host_tx_ring *tx_ring; local
882 qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring, int budget) argument
961 struct qlcnic_host_tx_ring *tx_ring; local
983 struct qlcnic_host_tx_ring *tx_ring; local
1567 struct qlcnic_host_tx_ring *tx_ring; local
1611 struct qlcnic_host_tx_ring *tx_ring; local
1634 struct qlcnic_host_tx_ring *tx_ring; local
1661 struct qlcnic_host_tx_ring *tx_ring; local
1944 struct qlcnic_host_tx_ring *tx_ring; local
1967 struct qlcnic_host_tx_ring *tx_ring; local
1987 struct qlcnic_host_tx_ring *tx_ring; local
2025 struct qlcnic_host_tx_ring *tx_ring; local
2053 struct qlcnic_host_tx_ring *tx_ring; local
2082 struct qlcnic_host_tx_ring *tx_ring; local
2130 struct qlcnic_host_tx_ring *tx_ring; local
[all...]
H A Dqlcnic_ctx.c416 struct qlcnic_host_tx_ring *tx_ring,
434 tx_ring->producer = 0;
435 tx_ring->sw_consumer = 0;
436 *(tx_ring->hw_consumer) = 0;
477 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
481 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
482 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
496 tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
498 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
499 tx_ring
415 qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring, int ring) argument
526 qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) argument
566 struct qlcnic_host_tx_ring *tx_ring; local
719 struct qlcnic_host_tx_ring *tx_ring; local
[all...]
/drivers/net/ethernet/oki-semi/pch_gbe/
H A Dpch_gbe_main.c631 adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
632 sizeof(*adapter->tx_ring), GFP_KERNEL);
633 if (!adapter->tx_ring)
858 (unsigned long long)adapter->tx_ring->dma,
859 adapter->tx_ring->size);
862 tdba = adapter->tx_ring->dma;
863 tdlen = adapter->tx_ring->size - 0x10;
974 * @tx_ring: Ring to be cleaned
977 struct pch_gbe_tx_ring *tx_ring)
985 for (i = 0; i < tx_ring
976 pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter, struct pch_gbe_tx_ring *tx_ring) argument
1147 pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, struct pch_gbe_tx_ring *tx_ring, struct sk_buff *skb) argument
1503 pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter, struct pch_gbe_tx_ring *tx_ring) argument
1535 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, struct pch_gbe_tx_ring *tx_ring) argument
1784 pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, struct pch_gbe_tx_ring *tx_ring) argument
1867 pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, struct pch_gbe_tx_ring *tx_ring) argument
1941 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; local
2137 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; local
[all...]
/drivers/net/ethernet/intel/ixgbevf/
H A Dixgbevf_main.c191 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, argument
197 dma_unmap_single(tx_ring->dev,
202 dma_unmap_page(tx_ring->dev,
225 * @tx_ring: tx ring to clean
228 struct ixgbevf_ring *tx_ring)
234 unsigned int budget = tx_ring->count / 2;
235 unsigned int i = tx_ring->next_to_clean;
240 tx_buffer = &tx_ring->tx_buffer_info[i];
241 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
242 i -= tx_ring
227 ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *tx_ring) argument
1744 ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) argument
2491 ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) argument
2529 ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) argument
2814 ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) argument
2835 ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, u8 *hdr_len) argument
2904 ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first) argument
3012 ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, const u8 hdr_len) argument
3138 __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) argument
3158 ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) argument
3169 struct ixgbevf_ring *tx_ring; local
[all...]
H A Dethtool.c264 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; local
289 adapter->tx_ring[i]->count = new_tx_count;
298 tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
299 if (!tx_ring) {
306 tx_ring[i] = *adapter->tx_ring[i];
307 tx_ring[i].count = new_tx_count;
308 err = ixgbevf_setup_tx_resources(&tx_ring[i]);
312 ixgbevf_free_tx_resources(&tx_ring[
[all...]
/drivers/net/ethernet/qualcomm/
H A Dqca_spi.h56 struct tx_ring { struct
83 struct tx_ring txr;
/drivers/net/ethernet/agere/
H A Det131x.c357 struct tx_ring { struct
458 spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */
459 spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */
488 struct tx_ring tx_ring; member in struct:et131x_adapter
1642 struct tx_ring *tx_ring = &adapter->tx_ring; local
1645 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1646 writel(lower_32_bits(tx_ring
1755 struct tx_ring *tx_ring = &adapter->tx_ring; local
2360 struct tx_ring *tx_ring = &adapter->tx_ring; local
2394 struct tx_ring *tx_ring = &adapter->tx_ring; local
2431 struct tx_ring *tx_ring = &adapter->tx_ring; local
2592 struct tx_ring *tx_ring = &adapter->tx_ring; local
2643 struct tx_ring *tx_ring = &adapter->tx_ring; local
2699 struct tx_ring *tx_ring = &adapter->tx_ring; local
2744 struct tx_ring *tx_ring = &adapter->tx_ring; local
3400 struct tx_ring *tx_ring = &adapter->tx_ring; local
3813 struct tx_ring *tx_ring = &adapter->tx_ring; local
3850 struct tx_ring *tx_ring = &adapter->tx_ring; local
[all...]
/drivers/net/ethernet/intel/e1000/
H A De1000_main.c106 struct e1000_tx_ring *tx_ring);
125 struct e1000_tx_ring *tx_ring);
139 struct e1000_tx_ring *tx_ring);
1240 kfree(adapter->tx_ring);
1278 kfree(adapter->tx_ring);
1331 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1333 if (!adapter->tx_ring)
1339 kfree(adapter->tx_ring);
1573 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1578 &adapter->tx_ring[
1923 e1000_free_tx_resources(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring) argument
1980 e1000_clean_tx_ring(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring) argument
2700 e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, struct sk_buff *skb, __be16 protocol) argument
2772 e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, struct sk_buff *skb, __be16 protocol) argument
2828 e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, unsigned int nr_frags, unsigned int mss) argument
2976 e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, int tx_flags, int count) argument
3081 struct e1000_tx_ring *tx_ring = adapter->tx_ring; local
3102 e1000_maybe_stop_tx(struct net_device *netdev, struct e1000_tx_ring *tx_ring, int size) argument
3116 struct e1000_tx_ring *tx_ring; local
3352 struct e1000_tx_ring *tx_ring = adapter->tx_ring; local
3832 e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring) argument
[all...]
/drivers/net/ethernet/intel/ixgb/
H A Dixgb_main.c701 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
738 u64 tdba = adapter->tx_ring.dma;
739 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
744 * tx_ring.dma can be either a 32 or 64 bit value
915 vfree(adapter->tx_ring.buffer_info);
916 adapter->tx_ring.buffer_info = NULL;
918 dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
919 adapter->tx_ring.desc, adapter->tx_ring.dma);
921 adapter->tx_ring
956 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; local
1325 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; local
1426 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; local
1476 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; local
1495 ixgb_maybe_stop_tx(struct net_device *netdev, struct ixgb_desc_ring *tx_ring, int size) argument
1843 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; local
[all...]
H A Dixgb_param.c278 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; local
281 tx_ring->count = TxDescriptors[bd];
282 ixgb_validate_option(&tx_ring->count, &opt);
284 tx_ring->count = opt.def;
286 tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
/drivers/infiniband/hw/amso1100/
H A Dc2.c111 static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr, argument
119 tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
120 if (!tx_ring->start)
123 elem = tx_ring->start;
126 for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
141 if (i == tx_ring->count - 1) {
142 elem->next = tx_ring->start;
151 tx_ring->to_use = tx_ring
326 struct c2_ring *tx_ring = &c2_port->tx_ring; local
383 struct c2_ring *tx_ring = &c2_port->tx_ring; local
760 struct c2_ring *tx_ring = &c2_port->tx_ring; local
[all...]
/drivers/net/ethernet/apm/xgene/
H A Dxgene_enet_main.c223 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, argument
226 struct device *dev = ndev_to_dev(tx_ring->ndev);
229 u16 tail = tx_ring->tail;
232 raw_desc = &tx_ring->raw_desc[tail];
237 netdev_err(tx_ring->ndev, "DMA mapping error\n");
247 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
249 tx_ring->cp_ring->cp_skb[tail] = skb;
258 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; local
259 struct xgene_enet_desc_ring *cp_ring = tx_ring
640 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; local
[all...]
/drivers/net/ethernet/atheros/atl1e/
H A Datl1e_main.c636 hw->tpd_thresh = adapter->tx_ring.count / 2;
664 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; local
669 if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
672 ring_count = tx_ring->count;
675 tx_buffer = &tx_ring->tx_buffer[index];
688 tx_buffer = &tx_ring->tx_buffer[index];
695 memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
697 memset(tx_ring
762 struct atl1e_tx_ring *tx_ring = NULL; local
818 struct atl1e_tx_ring *tx_ring; local
906 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; local
1248 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; local
1571 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; local
1590 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; local
1604 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; local
1856 struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; local
[all...]
/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_main.c538 struct ixgbe_ring *tx_ring; local
580 tx_ring = adapter->tx_ring[n];
581 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
583 n, tx_ring->next_to_use, tx_ring->next_to_clean,
632 tx_ring = adapter->tx_ring[n];
634 pr_info("TX QUEUE INDEX = %d\n", tx_ring
971 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; local
1006 ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) argument
1058 ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring) argument
1209 ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, int cpu) argument
4808 ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) argument
5176 ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) argument
5348 ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) argument
5777 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; local
6204 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; local
6555 ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len) argument
6622 ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first) argument
6747 __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) argument
6769 ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) argument
6780 ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, const u8 hdr_len) argument
7055 ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) argument
7194 struct ixgbe_ring *tx_ring; local
[all...]
/drivers/net/ethernet/intel/e1000e/
H A Dnetdev.c217 struct e1000_ring *tx_ring = adapter->tx_ring; local
261 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
263 0, tx_ring->next_to_use, tx_ring->next_to_clean,
305 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
307 tx_desc = E1000_TX_DESC(*tx_ring, i);
308 buffer_info = &tx_ring
639 e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) argument
1071 e1000_put_txbuf(struct e1000_ring *tx_ring, struct e1000_buffer *buffer_info) argument
1098 struct e1000_ring *tx_ring = adapter->tx_ring; local
1214 e1000_clean_tx_irq(struct e1000_ring *tx_ring) argument
1943 struct e1000_ring *tx_ring = adapter->tx_ring; local
1988 struct e1000_ring *tx_ring = adapter->tx_ring; local
2341 e1000e_setup_tx_resources(struct e1000_ring *tx_ring) argument
2426 e1000_clean_tx_ring(struct e1000_ring *tx_ring) argument
2460 e1000e_free_tx_resources(struct e1000_ring *tx_ring) argument
2938 struct e1000_ring *tx_ring = adapter->tx_ring; local
4928 struct e1000_ring *tx_ring = adapter->tx_ring; local
5167 e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, __be16 protocol) argument
5235 e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, __be16 protocol) argument
5289 e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, unsigned int nr_frags) argument
5386 e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) argument
5500 __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) argument
5523 e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) argument
5536 struct e1000_ring *tx_ring = adapter->tx_ring; local
[all...]
/drivers/net/ethernet/qlogic/netxen/
H A Dnetxen_nic_ctx.c448 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; local
490 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
491 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
503 tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
706 struct nx_host_tx_ring *tx_ring; local
712 tx_ring = adapter->tx_ring;
716 hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
717 hwctx->cmd_ring_size = cpu_to_le32(tx_ring
762 struct nx_host_tx_ring *tx_ring; local
876 struct nx_host_tx_ring *tx_ring; local
[all...]
/drivers/net/ethernet/intel/igb/
H A Digb_main.c369 struct igb_ring *tx_ring; local
404 tx_ring = adapter->tx_ring[n];
405 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
407 n, tx_ring->next_to_use, tx_ring->next_to_clean,
432 tx_ring = adapter->tx_ring[n];
434 pr_info("TX QUEUE INDEX = %d\n", tx_ring
3168 igb_setup_tx_resources(struct igb_ring *tx_ring) argument
3695 igb_free_tx_resources(struct igb_ring *tx_ring) argument
3753 igb_clean_tx_ring(struct igb_ring *tx_ring) argument
4358 struct igb_ring *tx_ring = adapter->tx_ring[i]; local
4615 igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) argument
4639 igb_tso(struct igb_ring *tx_ring, struct igb_tx_buffer *first, u8 *hdr_len) argument
4704 igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) argument
4800 igb_tx_olinfo_status(struct igb_ring *tx_ring, union e1000_adv_tx_desc *tx_desc, u32 tx_flags, unsigned int paylen) argument
4823 __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) argument
4851 igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) argument
4858 igb_tx_map(struct igb_ring *tx_ring, struct igb_tx_buffer *first, const u8 hdr_len) argument
4990 igb_xmit_frame_ring(struct sk_buff *skb, struct igb_ring *tx_ring) argument
5476 igb_update_tx_dca(struct igb_adapter *adapter, struct igb_ring *tx_ring, int cpu) argument
6348 struct igb_ring *tx_ring = q_vector->tx.ring; local
[all...]
/drivers/net/ethernet/qlogic/qlge/
H A Dqlge_main.c2100 struct tx_ring *tx_ring; local
2104 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2105 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2107 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2108 tx_ring->tx_packets++;
2133 atomic_inc(&tx_ring->tx_count);
2202 struct tx_ring *tx_ring; local
2633 struct tx_ring *tx_ring; local
2757 ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) argument
2775 ql_free_tx_resources(struct ql_adapter *qdev, struct tx_ring *tx_ring) argument
2787 ql_alloc_tx_resources(struct ql_adapter *qdev, struct tx_ring *tx_ring) argument
3047 struct tx_ring *tx_ring; local
3250 ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) argument
4095 struct tx_ring *tx_ring; local
4287 struct tx_ring *tx_ring = &qdev->tx_ring[0]; local
[all...]

Completed in 635 milliseconds

12345678