Lines Matching refs:txq

149 	struct iwl_txq *txq = (void *)data;
150 struct iwl_queue *q = &txq->q;
151 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
154 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
158 spin_lock(&txq->lock);
160 if (txq->q.read_ptr == txq->q.write_ptr) {
161 spin_unlock(&txq->lock);
164 spin_unlock(&txq->lock);
166 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
169 txq->q.read_ptr, txq->q.write_ptr);
204 le32_to_cpu(txq->scratchbufs[i].scratch));
213 struct iwl_txq *txq, u16 byte_cnt)
217 int write_ptr = txq->q.write_ptr;
218 int txq_id = txq->q.id;
224 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
258 struct iwl_txq *txq)
263 int txq_id = txq->q.id;
264 int read_ptr = txq->q.read_ptr;
268 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
287 struct iwl_txq *txq)
291 int txq_id = txq->q.id;
293 lockdep_assert_held(&txq->lock);
316 txq->need_update = true;
325 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
326 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
335 struct iwl_txq *txq = &trans_pcie->txq[i];
337 spin_lock_bh(&txq->lock);
338 if (trans_pcie->txq[i].need_update) {
339 iwl_pcie_txq_inc_wr_ptr(trans, txq);
340 trans_pcie->txq[i].need_update = false;
342 spin_unlock_bh(&txq->lock);
405 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
407 * @txq - tx queue
413 static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
415 struct iwl_tfd *tfd_tmp = txq->tfds;
420 int rd_ptr = txq->q.read_ptr;
421 int idx = get_cmd_index(&txq->q, rd_ptr);
423 lockdep_assert_held(&txq->lock);
425 /* We have only q->n_window txq->entries, but we use
428 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
431 if (txq->entries) {
434 skb = txq->entries[idx].skb;
442 txq->entries[idx].skb = NULL;
447 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
454 q = &txq->q;
455 tfd_tmp = txq->tfds;
480 struct iwl_txq *txq, int slots_num,
488 if (WARN_ON(txq->entries || txq->tfds))
491 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
492 (unsigned long)txq);
493 txq->trans_pcie = trans_pcie;
495 txq->q.n_window = slots_num;
497 txq->entries = kcalloc(slots_num,
501 if (!txq->entries)
506 txq->entries[i].cmd =
509 if (!txq->entries[i].cmd)
515 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
516 &txq->q.dma_addr, GFP_KERNEL);
517 if (!txq->tfds)
520 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
525 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
527 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
528 &txq->scratchbufs_dma,
530 if (!txq->scratchbufs)
533 txq->q.id = txq_id;
537 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
539 if (txq->entries && txq_id == trans_pcie->cmd_queue)
541 kfree(txq->entries[i].cmd);
542 kfree(txq->entries);
543 txq->entries = NULL;
549 static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
554 txq->need_update = false;
561 ret = iwl_queue_init(&txq->q, slots_num, txq_id);
565 spin_lock_init(&txq->lock);
572 txq->q.dma_addr >> 8);
583 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
584 struct iwl_queue *q = &txq->q;
586 spin_lock_bh(&txq->lock);
590 iwl_pcie_txq_free_tfd(trans, txq);
593 txq->active = false;
594 spin_unlock_bh(&txq->lock);
597 iwl_wake_queue(trans, txq);
602 * @txq: Transmit queue to deallocate.
606 * 0-fill, but do not free "txq" descriptor structure.
611 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
615 if (WARN_ON(!txq))
622 for (i = 0; i < txq->q.n_window; i++) {
623 kzfree(txq->entries[i].cmd);
624 kzfree(txq->entries[i].free_buf);
628 if (txq->tfds) {
631 txq->tfds, txq->q.dma_addr);
632 txq->q.dma_addr = 0;
633 txq->tfds = NULL;
636 sizeof(*txq->scratchbufs) * txq->q.n_window,
637 txq->scratchbufs, txq->scratchbufs_dma);
640 kfree(txq->entries);
641 txq->entries = NULL;
643 del_timer_sync(&txq->stuck_timer);
646 memset(txq, 0, sizeof(*txq));
712 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
715 txq->q.dma_addr >> 8);
717 txq->q.read_ptr = 0;
718 txq->q.write_ptr = 0;
765 if (!trans_pcie->txq)
787 if (trans_pcie->txq) {
793 kfree(trans_pcie->txq);
794 trans_pcie->txq = NULL;
816 if (WARN_ON(trans_pcie->txq)) {
835 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
837 if (!trans_pcie->txq) {
838 IWL_ERR(trans, "Not enough memory for txq\n");
848 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
870 if (!trans_pcie->txq) {
893 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
910 struct iwl_txq *txq)
919 if (txq->q.read_ptr == txq->q.write_ptr)
920 del_timer(&txq->stuck_timer);
922 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
930 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
932 struct iwl_queue *q = &txq->q;
939 spin_lock_bh(&txq->lock);
941 if (!txq->active) {
947 if (txq->q.read_ptr == tfd_num)
951 txq_id, txq->q.read_ptr, tfd_num, ssn);
959 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
972 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
975 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
977 txq->entries[txq->q.read_ptr].skb = NULL;
979 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
981 iwl_pcie_txq_free_tfd(trans, txq);
984 iwl_pcie_txq_progress(trans_pcie, txq);
986 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
987 iwl_wake_queue(trans, txq);
989 spin_unlock_bh(&txq->lock);
1002 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1003 struct iwl_queue *q = &txq->q;
1007 lockdep_assert_held(&txq->lock);
1011 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1038 iwl_pcie_txq_progress(trans_pcie, txq);
1102 trans_pcie->txq[txq_id].ampdu = true;
1111 ssn = trans_pcie->txq[txq_id].q.read_ptr;
1117 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
1118 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1151 trans_pcie->txq[txq_id].active = true;
1184 trans_pcie->txq[txq_id].ampdu = false;
1204 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1205 struct iwl_queue *q = &txq->q;
1290 spin_lock_bh(&txq->lock);
1293 spin_unlock_bh(&txq->lock);
1302 out_cmd = txq->entries[idx].cmd;
1303 out_meta = &txq->entries[idx].meta;
1365 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1366 iwl_pcie_txq_build_tfd(trans, txq,
1367 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1378 &txq->tfds[q->write_ptr]);
1383 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1402 &txq->tfds[q->write_ptr]);
1407 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1411 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1412 kzfree(txq->entries[idx].free_buf);
1413 txq->entries[idx].free_buf = dup_buf;
1419 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1452 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1457 spin_unlock_bh(&txq->lock);
1485 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1493 trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
1494 trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
1499 spin_lock_bh(&txq->lock);
1501 cmd_index = get_cmd_index(&txq->q, index);
1502 cmd = txq->entries[cmd_index].cmd;
1503 meta = &txq->entries[cmd_index].meta;
1505 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
1533 spin_unlock_bh(&txq->lock);
1592 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
1593 struct iwl_queue *q = &txq->q;
1645 trans_pcie->txq[trans_pcie->cmd_queue].
1680 struct iwl_txq *txq;
1690 txq = &trans_pcie->txq[txq_id];
1691 q = &txq->q;
1697 spin_lock(&txq->lock);
1705 WARN_ONCE(txq->ampdu &&
1711 txq->entries[q->write_ptr].skb = skb;
1712 txq->entries[q->write_ptr].cmd = dev_cmd;
1718 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
1726 out_meta = &txq->entries[q->write_ptr].meta;
1743 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1745 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1756 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
1769 &txq->tfds[q->write_ptr]);
1772 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1776 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1779 &txq->tfds[txq->q.write_ptr],
1789 if (txq->need_update && q->read_ptr == q->write_ptr &&
1791 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1796 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1804 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1806 iwl_stop_queue(trans, txq);
1808 spin_unlock(&txq->lock);
1811 spin_unlock(&txq->lock);