Lines Matching defs:priv

56 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
60 struct mlx4_en_dev *mdev = priv->mdev;
77 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
90 en_err(priv, "Failed allocating hwq resources\n");
96 en_err(priv, "Failed to map TX buffer\n");
102 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
109 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
116 en_dbg(DRV, priv, "working without blueflame (%d)", err);
138 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
141 struct mlx4_en_dev *mdev = priv->mdev;
142 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
157 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
161 struct mlx4_en_dev *mdev = priv->mdev;
176 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
187 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
190 struct mlx4_en_dev *mdev = priv->mdev;
197 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
216 dma_unmap_single(priv->ddev,
225 dma_unmap_page(priv->ddev,
243 dma_unmap_single(priv->ddev,
255 dma_unmap_page(priv->ddev,
279 struct mlx4_en_priv *priv = netdev_priv(dev);
284 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
288 if (netif_msg_tx_err(priv))
289 en_warn(priv, "Tx consumer passed producer!\n");
294 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
302 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
309 struct mlx4_en_priv *priv = netdev_priv(dev);
311 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
321 if (!priv->port_up)
345 priv, ring, ring_index,
371 priv->port_stats.wake_queue++;
379 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
380 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
393 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
394 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
397 INC_PERF_COUNTER(priv->pstats.tx_poll);
409 if (inflight && priv->port_up)
415 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
443 static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
445 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
446 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
457 mlx4_en_process_tx_cq(priv->dev, cq);
499 struct mlx4_en_priv *priv = netdev_priv(dev);
512 if (netif_msg_tx_err(priv))
513 en_warn(priv, "Non-linear headers\n");
573 struct mlx4_en_priv *priv = netdev_priv(dev);
579 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
594 struct mlx4_en_priv *priv = netdev_priv(dev);
595 struct mlx4_en_dev *mdev = priv->mdev;
616 if (!priv->port_up)
627 if (netif_msg_tx_err(priv))
628 en_warn(priv, "Oversized header or SG list\n");
633 ring = &priv->tx_ring[tx_ind];
643 priv->port_stats.queue_stopped++;
646 cq = &priv->tx_cq[tx_ind];
647 mlx4_en_arm_cq(priv, cq);
652 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
679 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
707 priv->port_stats.tso_packets++;
722 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
735 dma = skb_frag_dma_map(priv->ddev, frag,
747 dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
764 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
796 mlx4_en_xmit_poll(priv, tx_ind);
802 priv->stats.tx_dropped++;