Lines Matching refs:txq

206 #define IS_TSO_HEADER(txq, addr) \
207 ((addr >= txq->tso_hdrs_dma) && \
208 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
219 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
225 if (bdp >= txq->tx_bd_base) {
226 base = txq->tx_bd_base;
227 ring_size = txq->tx_ring_size;
228 ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
250 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
256 if (bdp >= txq->tx_bd_base) {
257 base = txq->tx_bd_base;
258 ring_size = txq->tx_ring_size;
259 ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
280 struct fec_enet_priv_tx_q *txq)
284 entries = ((const char *)txq->dirty_tx -
285 (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
287 return entries > 0 ? entries : entries + txq->tx_ring_size;
315 struct fec_enet_priv_tx_q *txq;
321 txq = fep->tx_queue[0];
322 bdp = txq->tx_bd_base;
327 bdp == txq->cur_tx ? 'S' : ' ',
328 bdp == txq->dirty_tx ? 'H' : ' ',
330 txq->tx_skbuff[index]);
333 } while (bdp != txq->tx_bd_base);
359 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
366 struct bufdesc *bdp = txq->cur_tx;
411 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
414 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
415 bufaddr = txq->tx_bounce[index];
435 txq->cur_tx = bdp;
440 bdp = txq->cur_tx;
449 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
467 entries_free = fec_enet_get_free_txdesc_num(fep, txq);
482 bdp = txq->cur_tx;
491 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
494 memcpy(txq->tx_bounce[index], skb->data, buflen);
495 bufaddr = txq->tx_bounce[index];
511 ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
542 last_bdp = txq->cur_tx;
543 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
545 txq->tx_skbuff[index] = skb;
561 txq->cur_tx = bdp;
570 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
591 memcpy(txq->tx_bounce[index], data, size);
592 data = txq->tx_bounce[index];
633 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
652 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
653 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
656 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
657 bufaddr = txq->tx_bounce[index];
689 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
696 struct bufdesc *bdp = txq->cur_tx;
704 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
724 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
729 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
731 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
740 index = fec_enet_get_bd_index(txq->tx_bd_base,
742 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
758 txq->tx_skbuff[index] = skb;
761 txq->cur_tx = bdp;
784 struct fec_enet_priv_tx_q *txq;
789 txq = fep->tx_queue[queue];
793 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
795 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
799 entries_free = fec_enet_get_free_txdesc_num(fep, txq);
800 if (entries_free <= txq->tx_stop_threshold)
811 struct fec_enet_priv_tx_q *txq;
841 txq = fep->tx_queue[q];
842 bdp = txq->tx_bd_base;
843 txq->cur_tx = bdp;
845 for (i = 0; i < txq->tx_ring_size; i++) {
848 if (txq->tx_skbuff[i]) {
849 dev_kfree_skb_any(txq->tx_skbuff[i]);
850 txq->tx_skbuff[i] = NULL;
859 txq->dirty_tx = bdp;
875 struct fec_enet_priv_tx_q *txq;
890 txq = fep->tx_queue[i];
891 writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
903 struct fec_enet_priv_tx_q *txq;
907 txq = fep->tx_queue[i];
909 for (j = 0; j < txq->tx_ring_size; j++) {
910 if (txq->tx_skbuff[j]) {
911 dev_kfree_skb_any(txq->tx_skbuff[j]);
912 txq->tx_skbuff[j] = NULL;
1190 struct fec_enet_priv_tx_q *txq;
1199 txq = fep->tx_queue[queue_id];
1202 bdp = txq->dirty_tx;
1210 if (bdp == txq->cur_tx)
1213 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
1215 skb = txq->tx_skbuff[index];
1216 txq->tx_skbuff[index] = NULL;
1217 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1264 txq->dirty_tx = bdp;
1272 entries_free = fec_enet_get_free_txdesc_num(fep, txq);
1273 if (entries_free >= txq->tx_wake_threshold)
1279 if (bdp != txq->cur_tx &&
2508 struct fec_enet_priv_tx_q *txq;
2530 txq = fep->tx_queue[q];
2531 bdp = txq->tx_bd_base;
2532 for (i = 0; i < txq->tx_ring_size; i++) {
2533 kfree(txq->tx_bounce[i]);
2534 txq->tx_bounce[i] = NULL;
2535 skb = txq->tx_skbuff[i];
2536 txq->tx_skbuff[i] = NULL;
2546 struct fec_enet_priv_tx_q *txq;
2550 txq = fep->tx_queue[i];
2552 txq->tx_ring_size * TSO_HEADER_SIZE,
2553 txq->tso_hdrs,
2554 txq->tso_hdrs_dma);
2571 struct fec_enet_priv_tx_q *txq;
2574 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
2575 if (!txq) {
2580 fep->tx_queue[i] = txq;
2581 txq->tx_ring_size = TX_RING_SIZE;
2584 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
2585 txq->tx_wake_threshold =
2586 (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
2588 txq->tso_hdrs = dma_alloc_coherent(NULL,
2589 txq->tx_ring_size * TSO_HEADER_SIZE,
2590 &txq->tso_hdrs_dma,
2592 if (!txq->tso_hdrs) {
2664 struct fec_enet_priv_tx_q *txq;
2666 txq = fep->tx_queue[queue];
2667 bdp = txq->tx_bd_base;
2668 for (i = 0; i < txq->tx_ring_size; i++) {
2669 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
2670 if (!txq->tx_bounce[i])
2956 struct fec_enet_priv_tx_q *txq;
3011 txq = fep->tx_queue[i];
3012 txq->index = i;
3013 txq->tx_bd_base = (struct bufdesc *)cbd_base;
3014 txq->bd_dma = bd_dma;
3016 bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
3018 (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
3020 bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
3021 cbd_base += txq->tx_ring_size;