Lines Matching defs:txq_id

218 	int txq_id = txq->q.id;
250 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
253 scd_bc_tbl[txq_id].
263 int txq_id = txq->q.id;
272 if (txq_id != trans_pcie->cmd_queue)
276 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
279 scd_bc_tbl[txq_id].
291 int txq_id = txq->q.id;
302 txq_id != trans_pcie->cmd_queue &&
313 txq_id, reg);
325 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
326 iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
481 u32 txq_id)
504 if (txq_id == trans_pcie->cmd_queue)
533 txq->q.id = txq_id;
539 if (txq->entries && txq_id == trans_pcie->cmd_queue)
550 int slots_num, u32 txq_id)
561 ret = iwl_queue_init(&txq->q, slots_num, txq_id);
571 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
580 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
583 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
589 txq_id, q->read_ptr);
608 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
611 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
618 iwl_pcie_txq_unmap(trans, txq_id);
621 if (txq_id == trans_pcie->cmd_queue)
708 int txq_id;
710 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
711 txq_id++) {
712 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
714 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
716 iwl_pcie_txq_unmap(trans, txq_id);
734 int ch, txq_id, ret;
769 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
770 txq_id++)
771 iwl_pcie_txq_unmap(trans, txq_id);
783 int txq_id;
788 for (txq_id = 0;
789 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
790 iwl_pcie_txq_free(trans, txq_id);
808 int txq_id, slots_num;
844 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
845 txq_id++) {
846 slots_num = (txq_id == trans_pcie->cmd_queue) ?
848 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
849 slots_num, txq_id);
851 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
867 int txq_id, slots_num;
889 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
890 txq_id++) {
891 slots_num = (txq_id == trans_pcie->cmd_queue) ?
893 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
894 slots_num, txq_id);
896 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
926 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
930 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
936 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
943 txq_id, ssn);
951 txq_id, txq->q.read_ptr, tfd_num, ssn);
960 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
999 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1002 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1012 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
1042 u16 txq_id)
1052 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1056 if (txq_id & 0x1)
1070 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1076 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1077 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1083 if (txq_id == trans_pcie->cmd_queue &&
1088 iwl_scd_txq_set_inactive(trans, txq_id);
1091 if (txq_id != trans_pcie->cmd_queue)
1092 iwl_scd_txq_set_chain(trans, txq_id);
1098 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1101 iwl_scd_txq_enable_agg(trans, txq_id);
1102 trans_pcie->txq[txq_id].ampdu = true;
1109 iwl_scd_txq_disable_agg(trans, txq_id);
1111 ssn = trans_pcie->txq[txq_id].q.read_ptr;
1117 trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
1118 trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
1124 (ssn & 0xff) | (txq_id << 8));
1125 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1129 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1132 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1139 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1146 if (txq_id == trans_pcie->cmd_queue &&
1148 iwl_scd_enable_set_active(trans, BIT(txq_id));
1151 trans_pcie->txq[txq_id].active = true;
1153 txq_id, fifo, ssn & 0xff);
1156 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1161 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1170 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1172 "queue %d not used", txq_id);
1177 iwl_scd_txq_set_inactive(trans, txq_id);
1183 iwl_pcie_txq_unmap(trans, txq_id);
1184 trans_pcie->txq[txq_id].ampdu = false;
1186 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1479 int txq_id = SEQ_TO_QUEUE(sequence);
1490 if (WARN(txq_id != trans_pcie->cmd_queue,
1492 txq_id, trans_pcie->cmd_queue, sequence,
1517 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1674 struct iwl_device_cmd *dev_cmd, int txq_id)
1690 txq = &trans_pcie->txq[txq_id];
1693 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
1694 "TX on unused queue %d\n", txq_id))
1708 txq_id, wifi_seq, q->write_ptr);
1715 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |