Lines Matching refs:txq

382 		il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
2724 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2727 int txq_id = txq->q.id;
2729 if (txq->need_update == 0)
2747 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2755 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2756 txq->need_update = 0;
2766 struct il_tx_queue *txq = &il->txq[txq_id];
2767 struct il_queue *q = &txq->q;
2773 il->ops->txq_free_tfd(il, txq);
2781 * @txq: Transmit queue to deallocate.
2785 * 0-fill, but do not free "txq" descriptor structure.
2790 struct il_tx_queue *txq = &il->txq[txq_id];
2798 kfree(txq->cmd[i]);
2801 if (txq->q.n_bd)
2802 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2803 txq->tfds, txq->q.dma_addr);
2806 kfree(txq->skbs);
2807 txq->skbs = NULL;
2810 kfree(txq->cmd);
2811 kfree(txq->meta);
2812 txq->cmd = NULL;
2813 txq->meta = NULL;
2816 memset(txq, 0, sizeof(*txq));
2826 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2827 struct il_queue *q = &txq->q;
2836 if (txq->meta[i].flags & CMD_MAPPED) {
2838 dma_unmap_addr(&txq->meta[i], mapping),
2839 dma_unmap_len(&txq->meta[i], len),
2841 txq->meta[i].flags = 0;
2848 if (txq->meta[i].flags & CMD_MAPPED) {
2850 dma_unmap_addr(&txq->meta[i], mapping),
2851 dma_unmap_len(&txq->meta[i], len),
2853 txq->meta[i].flags = 0;
2860 * @txq: Transmit queue to deallocate.
2864 * 0-fill, but do not free "txq" descriptor structure.
2869 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2877 kfree(txq->cmd[i]);
2880 if (txq->q.n_bd)
2881 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2882 txq->tfds, txq->q.dma_addr);
2885 kfree(txq->cmd);
2886 kfree(txq->meta);
2887 txq->cmd = NULL;
2888 txq->meta = NULL;
2891 memset(txq, 0, sizeof(*txq));
2975 il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2983 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX,
2986 if (!txq->skbs) {
2991 txq->skbs = NULL;
2995 txq->tfds =
2996 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2997 if (!txq->tfds)
3000 txq->q.id = id;
3005 kfree(txq->skbs);
3006 txq->skbs = NULL;
3019 struct il_tx_queue *txq = &il->txq[txq_id];
3037 txq->meta =
3039 txq->cmd =
3042 if (!txq->meta || !txq->cmd)
3051 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
3052 if (!txq->cmd[i])
3057 ret = il_tx_queue_alloc(il, txq, txq_id);
3061 txq->need_update = 0;
3069 il_set_swq_id(txq, txq_id, txq_id);
3072 il_queue_init(il, &txq->q, slots, txq_id);
3075 il->ops->txq_init(il, txq);
3080 kfree(txq->cmd[i]);
3082 kfree(txq->meta);
3083 kfree(txq->cmd);
3093 struct il_tx_queue *txq = &il->txq[txq_id];
3103 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3104 txq->need_update = 0;
3107 il_queue_init(il, &txq->q, slots, txq_id);
3110 il->ops->txq_init(il, txq);
3128 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3129 struct il_queue *q = &txq->q;
3167 out_cmd = txq->cmd[idx];
3168 out_meta = &txq->meta[idx];
3226 txq->need_update = 1;
3230 il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
3232 il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
3237 il_txq_update_write_ptr(il, txq);
3254 struct il_tx_queue *txq = &il->txq[txq_id];
3255 struct il_queue *q = &txq->q;
3259 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3296 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3305 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3306 il->txq[il->cmd_queue].q.write_ptr)) {
3311 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3312 cmd = txq->cmd[cmd_idx];
3313 meta = &txq->meta[cmd_idx];
3315 txq->time_stamp = jiffies;
4647 if (!il->txq)
4648 il->txq =
4651 if (!il->txq) {
4652 IL_ERR("Not enough memory for txq\n");
4662 kfree(il->txq);
4663 il->txq = NULL;
4770 if (il->txq == NULL)
4779 q = &il->txq[i].q;
4803 struct il_tx_queue *txq = &il->txq[cnt];
4804 struct il_queue *q = &txq->q;
4810 txq->time_stamp = now;
4815 txq->time_stamp +
4820 jiffies_to_msecs(now - txq->time_stamp));