Lines Matching refs:queue

44  * Routines for managing the command/response queue
65 * @queue: crq_queue to initialize and register
71 static void rpavscsi_release_crq_queue(struct crq_queue *queue,
85 queue->msg_token,
86 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
87 free_page((unsigned long)queue->msgs);
91 * crq_queue_next_crq: - Returns the next entry in message queue
92 * @queue: crq_queue to use
94 * Returns pointer to next entry in queue, or NULL if there are no new
97 static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
102 spin_lock_irqsave(&queue->lock, flags);
103 crq = &queue->msgs[queue->cur];
105 if (++queue->cur == queue->size)
106 queue->cur = 0;
109 spin_unlock_irqrestore(&queue->lock, flags);
141 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
147 if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
199 * @queue: crq_queue to initialize and register
203 static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
216 /* Clean out the queue */
217 memset(queue->msgs, 0x00, PAGE_SIZE);
218 queue->cur = 0;
225 queue->msg_token, PAGE_SIZE);
237 * @queue: crq_queue to initialize and register
244 static int rpavscsi_init_crq_queue(struct crq_queue *queue,
252 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
254 if (!queue->msgs)
256 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
258 queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
259 queue->size * sizeof(*queue->msgs),
262 if (dma_mapping_error(hostdata->dev, queue->msg_token))
270 queue->msg_token, PAGE_SIZE);
273 rc = rpavscsi_reset_crq_queue(queue,
285 queue->cur = 0;
286 spin_lock_init(&queue->lock);
317 queue->msg_token,
318 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
320 free_page((unsigned long)queue->msgs);
327 * @queue: crq_queue to initialize and register
331 static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,