Lines Matching defs:rxq

893  * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
894 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
895 * to replenish the iwl->rxq->rx_free.
897 * iwl->rxq is replenished and the READ IDX is updated (updating the
900 * detached from the iwl->rxq. The driver 'processed' idx is updated.
901 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
902 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
949 struct il_rx_queue *rxq = &il->rxq;
955 spin_lock_irqsave(&rxq->lock, flags);
956 write = rxq->write & ~0x7;
957 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
959 element = rxq->rx_free.next;
964 rxq->bd[rxq->write] =
966 rxq->queue[rxq->write] = rxb;
967 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
968 rxq->free_count--;
970 spin_unlock_irqrestore(&rxq->lock, flags);
973 if (rxq->free_count <= RX_LOW_WATERMARK)
978 if (rxq->write_actual != (rxq->write & ~0x7) ||
979 abs(rxq->write - rxq->read) > 7) {
980 spin_lock_irqsave(&rxq->lock, flags);
981 rxq->need_update = 1;
982 spin_unlock_irqrestore(&rxq->lock, flags);
983 il_rx_queue_update_write_ptr(il, rxq);
998 struct il_rx_queue *rxq = &il->rxq;
1006 spin_lock_irqsave(&rxq->lock, flags);
1008 if (list_empty(&rxq->rx_used)) {
1009 spin_unlock_irqrestore(&rxq->lock, flags);
1012 spin_unlock_irqrestore(&rxq->lock, flags);
1014 if (rxq->free_count > RX_LOW_WATERMARK)
1025 if (rxq->free_count <= RX_LOW_WATERMARK &&
1029 priority, rxq->free_count);
1036 spin_lock_irqsave(&rxq->lock, flags);
1037 if (list_empty(&rxq->rx_used)) {
1038 spin_unlock_irqrestore(&rxq->lock, flags);
1042 element = rxq->rx_used.next;
1045 spin_unlock_irqrestore(&rxq->lock, flags);
1054 spin_lock_irqsave(&rxq->lock, flags);
1056 list_add_tail(&rxb->list, &rxq->rx_free);
1057 rxq->free_count++;
1060 spin_unlock_irqrestore(&rxq->lock, flags);
1065 il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
1069 spin_lock_irqsave(&rxq->lock, flags);
1070 INIT_LIST_HEAD(&rxq->rx_free);
1071 INIT_LIST_HEAD(&rxq->rx_used);
1076 if (rxq->pool[i].page != NULL) {
1077 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1080 __il_free_pages(il, rxq->pool[i].page);
1081 rxq->pool[i].page = NULL;
1083 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1088 rxq->read = rxq->write = 0;
1089 rxq->write_actual = 0;
1090 rxq->free_count = 0;
1091 spin_unlock_irqrestore(&rxq->lock, flags);
1121 il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
1125 if (rxq->pool[i].page != NULL) {
1126 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1129 __il_free_pages(il, rxq->pool[i].page);
1130 rxq->pool[i].page = NULL;
1134 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1135 rxq->bd_dma);
1137 rxq->rb_stts, rxq->rb_stts_dma);
1138 rxq->bd = NULL;
1139 rxq->rb_stts = NULL;
1192 struct il_rx_queue *rxq = &il->rxq;
1202 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1203 i = rxq->read;
1206 total_empty = r - rxq->write_actual;
1219 rxb = rxq->queue[i];
1226 rxq->queue[i] = NULL;
1279 spin_lock_irqsave(&rxq->lock, flags);
1285 list_add_tail(&rxb->list, &rxq->rx_free);
1286 rxq->free_count++;
1288 list_add_tail(&rxb->list, &rxq->rx_used);
1290 spin_unlock_irqrestore(&rxq->lock, flags);
1298 rxq->read = i;
1306 rxq->read = i;
1475 il_rx_queue_update_write_ptr(il, &il->rxq);
3841 if (il->rxq.bd)
3842 il3945_rx_queue_free(il, &il->rxq);