Lines Matching refs:tx_queue

37 efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
39 return tx_queue->insert_count & tx_queue->ptr_mask;
43 __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
45 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
49 efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
52 __efx_tx_queue_get_insert_buffer(tx_queue);
61 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
67 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
82 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
84 tx_queue->queue, tx_queue->read_count);
93 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
271 efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
274 efx_tx_queue_get_insert_buffer(tx_queue);
275 u8 __iomem *piobuf = tx_queue->piobuf;
290 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
292 efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
300 __iowrite64_copy(tx_queue->piobuf, skb->data,
310 tx_queue->piobuf_offset);
311 ++tx_queue->pio_packets;
312 ++tx_queue->insert_count;
333 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
335 struct efx_nic *efx = tx_queue->efx;
338 unsigned int old_insert_count = tx_queue->insert_count;
347 return efx_enqueue_skb_tso(tx_queue, skb);
363 efx_nic_may_tx_pio(tx_queue)) {
364 buffer = efx_enqueue_skb_pio(tx_queue, skb);
389 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
401 ++tx_queue->insert_count;
429 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
431 efx_tx_maybe_stop_queue(tx_queue);
434 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
435 efx_nic_push_buffers(tx_queue);
437 tx_queue->tx_packets++;
444 "fragments for DMA\n", tx_queue->queue, skb->len,
451 while (tx_queue->insert_count != old_insert_count) {
453 --tx_queue->insert_count;
454 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
455 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
476 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
481 struct efx_nic *efx = tx_queue->efx;
484 stop_index = (index + 1) & tx_queue->ptr_mask;
485 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
488 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
494 tx_queue->queue, read_ptr);
499 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
501 ++tx_queue->read_count;
502 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
519 struct efx_tx_queue *tx_queue;
536 tx_queue = efx_get_tx_queue(efx, index, type);
538 return efx_enqueue_skb(tx_queue, skb);
541 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
543 struct efx_nic *efx = tx_queue->efx;
546 tx_queue->core_txq =
548 tx_queue->queue / EFX_TXQ_TYPES +
549 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
557 struct efx_tx_queue *tx_queue;
575 efx_for_each_possible_channel_tx_queue(tx_queue,
577 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
579 if (!tx_queue->buffer) {
580 rc = efx_probe_tx_queue(tx_queue);
584 if (!tx_queue->initialised)
585 efx_init_tx_queue(tx_queue);
586 efx_init_tx_queue_core_txq(tx_queue);
610 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
613 struct efx_nic *efx = tx_queue->efx;
617 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
619 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
620 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
623 ++tx_queue->merge_events;
630 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
633 txq2 = efx_tx_queue_partner(tx_queue);
634 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
637 netif_tx_wake_queue(tx_queue->core_txq);
641 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
642 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
643 if (tx_queue->read_count == tx_queue->old_write_count) {
645 tx_queue->empty_read_count =
646 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
661 static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
663 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
666 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
668 struct efx_nic *efx = tx_queue->efx;
675 tx_queue->ptr_mask = entries - 1;
679 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
682 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
684 if (!tx_queue->buffer)
687 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
688 tx_queue->tsoh_page =
689 kcalloc(efx_tsoh_page_count(tx_queue),
690 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
691 if (!tx_queue->tsoh_page) {
698 rc = efx_nic_probe_tx(tx_queue);
705 kfree(tx_queue->tsoh_page);
706 tx_queue->tsoh_page = NULL;
708 kfree(tx_queue->buffer);
709 tx_queue->buffer = NULL;
713 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
715 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
716 "initialising TX queue %d\n", tx_queue->queue);
718 tx_queue->insert_count = 0;
719 tx_queue->write_count = 0;
720 tx_queue->old_write_count = 0;
721 tx_queue->read_count = 0;
722 tx_queue->old_read_count = 0;
723 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
726 efx_nic_init_tx(tx_queue);
728 tx_queue->initialised = true;
731 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
735 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
736 "shutting down TX queue %d\n", tx_queue->queue);
738 if (!tx_queue->buffer)
742 while (tx_queue->read_count != tx_queue->write_count) {
744 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
745 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
747 ++tx_queue->read_count;
749 netdev_tx_reset_queue(tx_queue->core_txq);
752 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
756 if (!tx_queue->buffer)
759 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
760 "destroying TX queue %d\n", tx_queue->queue);
761 efx_nic_remove_tx(tx_queue);
763 if (tx_queue->tsoh_page) {
764 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
765 efx_nic_free_buffer(tx_queue->efx,
766 &tx_queue->tsoh_page[i]);
767 kfree(tx_queue->tsoh_page);
768 tx_queue->tsoh_page = NULL;
771 kfree(tx_queue->buffer);
772 tx_queue->buffer = NULL;
861 static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
872 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
874 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
879 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
887 tx_queue->tso_long_headers++;
903 * @tx_queue: Efx TX queue
910 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
915 struct efx_nic *efx = tx_queue->efx;
921 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
922 ++tx_queue->insert_count;
924 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
925 tx_queue->read_count >=
955 static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
959 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
962 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
974 ++tx_queue->insert_count;
979 /* Remove buffers put into a tx_queue. None of the buffers must have
982 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
988 while (tx_queue->insert_count != insert_count) {
989 --tx_queue->insert_count;
990 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
991 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
1072 * @tx_queue: Efx TX queue
1079 static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1100 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1124 * @tx_queue: Efx TX queue
1131 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1136 efx_tx_queue_get_insert_buffer(tx_queue);
1155 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1182 rc = efx_tso_put_header(tx_queue, buffer, header);
1201 ++tx_queue->insert_count;
1206 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
1221 ++tx_queue->insert_count;
1229 ++tx_queue->tso_packets;
1231 ++tx_queue->tx_packets;
1239 * @tx_queue: Efx TX queue
1244 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1248 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1251 struct efx_nic *efx = tx_queue->efx;
1252 unsigned int old_insert_count = tx_queue->insert_count;
1276 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1280 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1295 tso_start_new_packet(tx_queue, skb, &state) < 0)
1299 netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1301 efx_tx_maybe_stop_queue(tx_queue);
1304 if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
1305 efx_nic_push_buffers(tx_queue);
1307 tx_queue->tso_bursts++;
1330 efx_enqueue_unwind(tx_queue, old_insert_count);