1/* 2 * IBM eServer eHCA Infiniband device driver for Linux on POWER 3 * 4 * internal queue handling 5 * 6 * Authors: Waleri Fomin <fomin@de.ibm.com> 7 * Reinhard Ernst <rernst@de.ibm.com> 8 * Christoph Raisch <raisch@de.ibm.com> 9 * 10 * Copyright (c) 2005 IBM Corporation 11 * 12 * This source code is distributed under a dual license of GPL v2.0 and OpenIB 13 * BSD. 14 * 15 * OpenIB BSD License 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions are met: 19 * 20 * Redistributions of source code must retain the above copyright notice, this 21 * list of conditions and the following disclaimer. 22 * 23 * Redistributions in binary form must reproduce the above copyright notice, 24 * this list of conditions and the following disclaimer in the documentation 25 * and/or other materials 26 * provided with the distribution. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 36 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGE. 39 */ 40 41#include <linux/slab.h> 42 43#include "ehca_tools.h" 44#include "ipz_pt_fn.h" 45#include "ehca_classes.h" 46 47#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT) 48 49struct kmem_cache *small_qp_cache; 50 51void *ipz_qpageit_get_inc(struct ipz_queue *queue) 52{ 53 void *ret = ipz_qeit_get(queue); 54 queue->current_q_offset += queue->pagesize; 55 if (queue->current_q_offset > queue->queue_length) { 56 queue->current_q_offset -= queue->pagesize; 57 ret = NULL; 58 } 59 if (((u64)ret) % queue->pagesize) { 60 ehca_gen_err("ERROR!! not at PAGE-Boundary"); 61 return NULL; 62 } 63 return ret; 64} 65 66void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) 67{ 68 void *ret = ipz_qeit_get(queue); 69 u64 last_entry_in_q = queue->queue_length - queue->qe_size; 70 71 queue->current_q_offset += queue->qe_size; 72 if (queue->current_q_offset > last_entry_in_q) { 73 queue->current_q_offset = 0; 74 queue->toggle_state = (~queue->toggle_state) & 1; 75 } 76 77 return ret; 78} 79 80int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset) 81{ 82 int i; 83 for (i = 0; i < queue->queue_length / queue->pagesize; i++) { 84 u64 page = (u64)virt_to_abs(queue->queue_pages[i]); 85 if (addr >= page && addr < page + queue->pagesize) { 86 *q_offset = addr - page + i * queue->pagesize; 87 return 0; 88 } 89 } 90 return -EINVAL; 91} 92 93#if PAGE_SHIFT < EHCA_PAGESHIFT 94#error Kernel pages must be at least as large than eHCA pages (4K) ! 95#endif 96 97/* 98 * allocate pages for queue: 99 * outer loop allocates whole kernel pages (page aligned) and 100 * inner loop divides a kernel page into smaller hca queue pages 101 */ 102static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages) 103{ 104 int k, f = 0; 105 u8 *kpage; 106 107 while (f < nr_of_pages) { 108 kpage = (u8 *)get_zeroed_page(GFP_KERNEL); 109 if (!kpage) 110 goto out; 111 112 for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) { 113 queue->queue_pages[f] = (struct ipz_page *)kpage; 114 kpage += EHCA_PAGESIZE; 115 f++; 116 } 117 } 118 return 1; 119 120out: 121 for (f = 0; f < nr_of_pages && queue->queue_pages[f]; 122 f += PAGES_PER_KPAGE) 123 free_page((unsigned long)(queue->queue_pages)[f]); 124 return 0; 125} 126 127static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) 128{ 129 int order = ilog2(queue->pagesize) - 9; 130 struct ipz_small_queue_page *page; 131 unsigned long bit; 132 133 mutex_lock(&pd->lock); 134 135 if (!list_empty(&pd->free[order])) 136 page = list_entry(pd->free[order].next, 137 struct ipz_small_queue_page, list); 138 else { 139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL); 140 if (!page) 141 goto out; 142 143 page->page = get_zeroed_page(GFP_KERNEL); 144 if (!page->page) { 145 kmem_cache_free(small_qp_cache, page); 146 goto out; 147 } 148 149 list_add(&page->list, &pd->free[order]); 150 } 151 152 bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order); 153 __set_bit(bit, page->bitmap); 154 page->fill++; 155 156 if (page->fill == IPZ_SPAGE_PER_KPAGE >> order) 157 list_move(&page->list, &pd->full[order]); 158 159 mutex_unlock(&pd->lock); 160 161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9))); 162 queue->small_page = page; 163 queue->offset = bit << (order + 9); 164 return 1; 165 166out: 167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page"); 168 mutex_unlock(&pd->lock); 169 return 0; 170} 171 172static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) 173{ 174 int order = ilog2(queue->pagesize) - 9; 175 struct ipz_small_queue_page *page = queue->small_page; 176 unsigned long bit; 177 int free_page = 0; 178 179 bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK) 180 >> (order + 9); 181 182 mutex_lock(&pd->lock); 183 184 __clear_bit(bit, page->bitmap); 185 page->fill--; 186 187 if (page->fill == 0) { 188 list_del(&page->list); 189 free_page = 1; 190 } 191 192 if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1) 193 /* the page was full until we freed the chunk */ 194 list_move_tail(&page->list, &pd->free[order]); 195 196 mutex_unlock(&pd->lock); 197 198 if (free_page) { 199 free_page(page->page); 200 kmem_cache_free(small_qp_cache, page); 201 } 202} 203 204int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, 205 const u32 nr_of_pages, const u32 pagesize, 206 const u32 qe_size, const u32 nr_of_sg, 207 int is_small) 208{ 209 if (pagesize > PAGE_SIZE) { 210 ehca_gen_err("FATAL ERROR: pagesize=%x " 211 "is greater than kernel page size", pagesize); 212 return 0; 213 } 214 215 /* init queue fields */ 216 queue->queue_length = nr_of_pages * pagesize; 217 queue->pagesize = pagesize; 218 queue->qe_size = qe_size; 219 queue->act_nr_of_sg = nr_of_sg; 220 queue->current_q_offset = 0; 221 queue->toggle_state = 1; 222 queue->small_page = NULL; 223 224 /* allocate queue page pointers */ 225 queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 226 if (!queue->queue_pages) { 227 queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *)); 228 if (!queue->queue_pages) { 229 ehca_gen_err("Couldn't allocate queue page list"); 230 return 0; 231 } 232 } 233 234 /* allocate actual queue pages */ 235 if (is_small) { 236 if (!alloc_small_queue_page(queue, pd)) 237 goto ipz_queue_ctor_exit0; 238 } else 239 if (!alloc_queue_pages(queue, nr_of_pages)) 240 goto ipz_queue_ctor_exit0; 241 242 return 1; 243 244ipz_queue_ctor_exit0: 245 ehca_gen_err("Couldn't alloc pages queue=%p " 246 "nr_of_pages=%x", queue, nr_of_pages); 247 if (is_vmalloc_addr(queue->queue_pages)) 248 vfree(queue->queue_pages); 249 else 250 kfree(queue->queue_pages); 251 252 return 0; 253} 254 255int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) 256{ 257 int i, nr_pages; 258 259 if (!queue || !queue->queue_pages) { 260 ehca_gen_dbg("queue or queue_pages is NULL"); 261 return 0; 262 } 263 264 if (queue->small_page) 265 free_small_queue_page(queue, pd); 266 else { 267 nr_pages = queue->queue_length / queue->pagesize; 268 for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE) 269 free_page((unsigned long)queue->queue_pages[i]); 270 } 271 272 if (is_vmalloc_addr(queue->queue_pages)) 273 vfree(queue->queue_pages); 274 else 275 kfree(queue->queue_pages); 276 277 return 1; 278} 279 280int ehca_init_small_qp_cache(void) 281{ 282 small_qp_cache = kmem_cache_create("ehca_cache_small_qp", 283 sizeof(struct ipz_small_queue_page), 284 0, SLAB_HWCACHE_ALIGN, NULL); 285 if (!small_qp_cache) 286 return -ENOMEM; 287 288 return 0; 289} 290 291void ehca_cleanup_small_qp_cache(void) 292{ 293 kmem_cache_destroy(small_qp_cache); 294} 295