Lines Matching refs:queue
4 * internal queue handling
51 void *ipz_qpageit_get_inc(struct ipz_queue *queue)
53 void *ret = ipz_qeit_get(queue);
54 queue->current_q_offset += queue->pagesize;
55 if (queue->current_q_offset > queue->queue_length) {
56 queue->current_q_offset -= queue->pagesize;
59 if (((u64)ret) % queue->pagesize) {
66 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
68 void *ret = ipz_qeit_get(queue);
69 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
71 queue->current_q_offset += queue->qe_size;
72 if (queue->current_q_offset > last_entry_in_q) {
73 queue->current_q_offset = 0;
74 queue->toggle_state = (~queue->toggle_state) & 1;
80 int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
83 for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
84 u64 page = (u64)virt_to_abs(queue->queue_pages[i]);
85 if (addr >= page && addr < page + queue->pagesize) {
86 *q_offset = addr - page + i * queue->pagesize;
98 * allocate pages for queue:
100 * inner loop divides a kernel page into smaller hca queue pages
102 static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
113 queue->queue_pages[f] = (struct ipz_page *)kpage;
121 for (f = 0; f < nr_of_pages && queue->queue_pages[f];
123 free_page((unsigned long)(queue->queue_pages)[f]);
127 static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
129 int order = ilog2(queue->pagesize) - 9;
161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
162 queue->small_page = page;
163 queue->offset = bit << (order + 9);
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
172 static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
174 int order = ilog2(queue->pagesize) - 9;
175 struct ipz_small_queue_page *page = queue->small_page;
179 bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
204 int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
215 /* init queue fields */
216 queue->queue_length = nr_of_pages * pagesize;
217 queue->pagesize = pagesize;
218 queue->qe_size = qe_size;
219 queue->act_nr_of_sg = nr_of_sg;
220 queue->current_q_offset = 0;
221 queue->toggle_state = 1;
222 queue->small_page = NULL;
224 /* allocate queue page pointers */
225 queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
226 if (!queue->queue_pages) {
227 queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
228 if (!queue->queue_pages) {
229 ehca_gen_err("Couldn't allocate queue page list");
234 /* allocate actual queue pages */
236 if (!alloc_small_queue_page(queue, pd))
239 if (!alloc_queue_pages(queue, nr_of_pages))
245 ehca_gen_err("Couldn't alloc pages queue=%p "
246 "nr_of_pages=%x", queue, nr_of_pages);
247 if (is_vmalloc_addr(queue->queue_pages))
248 vfree(queue->queue_pages);
250 kfree(queue->queue_pages);
255 int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
259 if (!queue || !queue->queue_pages) {
260 ehca_gen_dbg("queue or queue_pages is NULL");
264 if (queue->small_page)
265 free_small_queue_page(queue, pd);
267 nr_pages = queue->queue_length / queue->pagesize;
269 free_page((unsigned long)queue->queue_pages[i]);
272 if (is_vmalloc_addr(queue->queue_pages))
273 vfree(queue->queue_pages);
275 kfree(queue->queue_pages);