Lines Matching refs:page

84 		u64 page = (u64)virt_to_abs(queue->queue_pages[i]);
85 if (addr >= page && addr < page + queue->pagesize) {
86 *q_offset = addr - page + i * queue->pagesize;
99 * outer loop allocates whole kernel pages (page aligned) and
100 * inner loop divides a kernel page into smaller hca queue pages
130 struct ipz_small_queue_page *page;
136 page = list_entry(pd->free[order].next,
139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
140 if (!page)
143 page->page = get_zeroed_page(GFP_KERNEL);
144 if (!page->page) {
145 kmem_cache_free(small_qp_cache, page);
149 list_add(&page->list, &pd->free[order]);
152 bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
153 __set_bit(bit, page->bitmap);
154 page->fill++;
156 if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
157 list_move(&page->list, &pd->full[order]);
161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
162 queue->small_page = page;
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
175 struct ipz_small_queue_page *page = queue->small_page;
184 __clear_bit(bit, page->bitmap);
185 page->fill--;
187 if (page->fill == 0) {
188 list_del(&page->list);
192 if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
193 /* the page was full until we freed the chunk */
194 list_move_tail(&page->list, &pd->free[order]);
199 free_page(page->page);
200 kmem_cache_free(small_qp_cache, page);
211 "is greater than kernel page size", pagesize);
224 /* allocate queue page pointers */
229 ehca_gen_err("Couldn't allocate queue page list");