ion_system_heap.c revision c13bd1c4eb714c08214e897fcbe51b13e0e0f279
1/* 2 * drivers/staging/android/ion/ion_system_heap.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <asm/page.h> 18#include <linux/dma-mapping.h> 19#include <linux/err.h> 20#include <linux/highmem.h> 21#include <linux/mm.h> 22#include <linux/scatterlist.h> 23#include <linux/seq_file.h> 24#include <linux/slab.h> 25#include <linux/vmalloc.h> 26#include "ion.h" 27#include "ion_priv.h" 28 29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | 30 __GFP_NOWARN | __GFP_NORETRY) & 31 ~__GFP_WAIT; 32static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | 33 __GFP_NOWARN); 34static const unsigned int orders[] = {8, 4, 0}; 35static const int num_orders = ARRAY_SIZE(orders); 36static int order_to_index(unsigned int order) 37{ 38 int i; 39 for (i = 0; i < num_orders; i++) 40 if (order == orders[i]) 41 return i; 42 BUG(); 43 return -1; 44} 45 46static unsigned int order_to_size(int order) 47{ 48 return PAGE_SIZE << order; 49} 50 51struct ion_system_heap { 52 struct ion_heap heap; 53 struct ion_page_pool **pools; 54}; 55 56struct page_info { 57 struct page *page; 58 unsigned int order; 59 struct list_head list; 60}; 61 62static struct page *alloc_buffer_page(struct ion_system_heap *heap, 63 struct ion_buffer *buffer, 64 unsigned long order) 65{ 66 bool cached = ion_buffer_cached(buffer); 67 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 68 struct page *page; 69 70 if (!cached) { 71 page = ion_page_pool_alloc(pool); 72 } else { 73 gfp_t gfp_flags = low_order_gfp_flags; 74 75 if (order > 4) 76 gfp_flags = high_order_gfp_flags; 77 page = ion_heap_alloc_pages(buffer, gfp_flags, order); 78 if (!page) 79 return 0; 80 arm_dma_ops.sync_single_for_device(NULL, 81 pfn_to_dma(NULL, page_to_pfn(page)), 82 PAGE_SIZE << order, DMA_BIDIRECTIONAL); 83 } 84 if (!page) 85 return 0; 86 87 return page; 88} 89 90static void free_buffer_page(struct ion_system_heap *heap, 91 struct ion_buffer *buffer, struct page *page, 92 unsigned int order) 93{ 94 bool cached = ion_buffer_cached(buffer); 95 bool split_pages = ion_buffer_fault_user_mappings(buffer); 96 int i; 97 98 if (!cached) { 99 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 100 ion_page_pool_free(pool, page); 101 } else if (split_pages) { 102 for (i = 0; i < (1 << order); i++) 103 __free_page(page + i); 104 } else { 105 __free_pages(page, order); 106 } 107} 108 109 110static struct page_info *alloc_largest_available(struct ion_system_heap *heap, 111 struct ion_buffer *buffer, 112 unsigned long size, 113 unsigned int max_order) 114{ 115 struct page *page; 116 struct page_info *info; 117 int i; 118 119 for (i = 0; i < num_orders; i++) { 120 if (size < order_to_size(orders[i])) 121 continue; 122 if (max_order < orders[i]) 123 continue; 124 125 page = alloc_buffer_page(heap, buffer, orders[i]); 126 if (!page) 127 continue; 128 129 info = kmalloc(sizeof(struct page_info), GFP_KERNEL); 130 info->page = page; 131 info->order = orders[i]; 132 return info; 133 } 134 return NULL; 135} 136 137static int ion_system_heap_allocate(struct ion_heap *heap, 138 struct ion_buffer *buffer, 139 unsigned long size, unsigned long align, 140 unsigned long flags) 141{ 142 struct ion_system_heap *sys_heap = container_of(heap, 143 struct ion_system_heap, 144 heap); 145 struct sg_table *table; 146 struct scatterlist *sg; 147 int ret; 148 struct list_head pages; 149 struct page_info *info, *tmp_info; 150 int i = 0; 151 long size_remaining = PAGE_ALIGN(size); 152 unsigned int max_order = orders[0]; 153 154 INIT_LIST_HEAD(&pages); 155 while (size_remaining > 0) { 156 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order); 157 if (!info) 158 goto err; 159 list_add_tail(&info->list, &pages); 160 size_remaining -= (1 << info->order) * PAGE_SIZE; 161 max_order = info->order; 162 i++; 163 } 164 165 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 166 if (!table) 167 goto err; 168 169 ret = sg_alloc_table(table, i, GFP_KERNEL); 170 if (ret) 171 goto err1; 172 173 sg = table->sgl; 174 list_for_each_entry_safe(info, tmp_info, &pages, list) { 175 struct page *page = info->page; 176 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0); 177 sg = sg_next(sg); 178 list_del(&info->list); 179 kfree(info); 180 } 181 182 buffer->priv_virt = table; 183 return 0; 184err1: 185 kfree(table); 186err: 187 list_for_each_entry(info, &pages, list) { 188 free_buffer_page(sys_heap, buffer, info->page, info->order); 189 kfree(info); 190 } 191 return -ENOMEM; 192} 193 194void ion_system_heap_free(struct ion_buffer *buffer) 195{ 196 struct ion_heap *heap = buffer->heap; 197 struct ion_system_heap *sys_heap = container_of(heap, 198 struct ion_system_heap, 199 heap); 200 struct sg_table *table = buffer->sg_table; 201 bool cached = ion_buffer_cached(buffer); 202 struct scatterlist *sg; 203 LIST_HEAD(pages); 204 int i; 205 206 /* uncached pages come from the page pools, zero them before returning 207 for security purposes (other allocations are zerod at alloc time */ 208 if (!cached) 209 ion_heap_buffer_zero(buffer); 210 211 for_each_sg(table->sgl, sg, table->nents, i) 212 free_buffer_page(sys_heap, buffer, sg_page(sg), 213 get_order(sg_dma_len(sg))); 214 sg_free_table(table); 215 kfree(table); 216} 217 218struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, 219 struct ion_buffer *buffer) 220{ 221 return buffer->priv_virt; 222} 223 224void ion_system_heap_unmap_dma(struct ion_heap *heap, 225 struct ion_buffer *buffer) 226{ 227 return; 228} 229 230static struct ion_heap_ops system_heap_ops = { 231 .allocate = ion_system_heap_allocate, 232 .free = ion_system_heap_free, 233 .map_dma = ion_system_heap_map_dma, 234 .unmap_dma = ion_system_heap_unmap_dma, 235 .map_kernel = ion_heap_map_kernel, 236 .unmap_kernel = ion_heap_unmap_kernel, 237 .map_user = ion_heap_map_user, 238}; 239 240static int ion_system_heap_shrink(struct shrinker *shrinker, 241 struct shrink_control *sc) { 242 243 struct ion_heap *heap = container_of(shrinker, struct ion_heap, 244 shrinker); 245 struct ion_system_heap *sys_heap = container_of(heap, 246 struct ion_system_heap, 247 heap); 248 int nr_total = 0; 249 int nr_freed = 0; 250 int i; 251 252 if (sc->nr_to_scan == 0) 253 goto end; 254 255 /* shrink the free list first, no point in zeroing the memory if 256 we're just going to reclaim it */ 257 nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) / 258 PAGE_SIZE; 259 260 if (nr_freed >= sc->nr_to_scan) 261 goto end; 262 263 for (i = 0; i < num_orders; i++) { 264 struct ion_page_pool *pool = sys_heap->pools[i]; 265 266 nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask, 267 sc->nr_to_scan); 268 if (nr_freed >= sc->nr_to_scan) 269 break; 270 } 271 272end: 273 /* total number of items is whatever the page pools are holding 274 plus whatever's in the freelist */ 275 for (i = 0; i < num_orders; i++) { 276 struct ion_page_pool *pool = sys_heap->pools[i]; 277 nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0); 278 } 279 nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE; 280 return nr_total; 281 282} 283 284static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, 285 void *unused) 286{ 287 288 struct ion_system_heap *sys_heap = container_of(heap, 289 struct ion_system_heap, 290 heap); 291 int i; 292 for (i = 0; i < num_orders; i++) { 293 struct ion_page_pool *pool = sys_heap->pools[i]; 294 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", 295 pool->high_count, pool->order, 296 (1 << pool->order) * PAGE_SIZE * pool->high_count); 297 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", 298 pool->low_count, pool->order, 299 (1 << pool->order) * PAGE_SIZE * pool->low_count); 300 } 301 return 0; 302} 303 304struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) 305{ 306 struct ion_system_heap *heap; 307 int i; 308 309 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); 310 if (!heap) 311 return ERR_PTR(-ENOMEM); 312 heap->heap.ops = &system_heap_ops; 313 heap->heap.type = ION_HEAP_TYPE_SYSTEM; 314 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; 315 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, 316 GFP_KERNEL); 317 if (!heap->pools) 318 goto err_alloc_pools; 319 for (i = 0; i < num_orders; i++) { 320 struct ion_page_pool *pool; 321 gfp_t gfp_flags = low_order_gfp_flags; 322 323 if (orders[i] > 4) 324 gfp_flags = high_order_gfp_flags; 325 pool = ion_page_pool_create(gfp_flags, orders[i]); 326 if (!pool) 327 goto err_create_pool; 328 heap->pools[i] = pool; 329 } 330 331 heap->heap.shrinker.shrink = ion_system_heap_shrink; 332 heap->heap.shrinker.seeks = DEFAULT_SEEKS; 333 heap->heap.shrinker.batch = 0; 334 register_shrinker(&heap->heap.shrinker); 335 heap->heap.debug_show = ion_system_heap_debug_show; 336 return &heap->heap; 337err_create_pool: 338 for (i = 0; i < num_orders; i++) 339 if (heap->pools[i]) 340 ion_page_pool_destroy(heap->pools[i]); 341 kfree(heap->pools); 342err_alloc_pools: 343 kfree(heap); 344 return ERR_PTR(-ENOMEM); 345} 346 347void ion_system_heap_destroy(struct ion_heap *heap) 348{ 349 struct ion_system_heap *sys_heap = container_of(heap, 350 struct ion_system_heap, 351 heap); 352 int i; 353 354 for (i = 0; i < num_orders; i++) 355 ion_page_pool_destroy(sys_heap->pools[i]); 356 kfree(sys_heap->pools); 357 kfree(sys_heap); 358} 359 360static int ion_system_contig_heap_allocate(struct ion_heap *heap, 361 struct ion_buffer *buffer, 362 unsigned long len, 363 unsigned long align, 364 unsigned long flags) 365{ 366 buffer->priv_virt = kzalloc(len, GFP_KERNEL); 367 if (!buffer->priv_virt) 368 return -ENOMEM; 369 return 0; 370} 371 372void ion_system_contig_heap_free(struct ion_buffer *buffer) 373{ 374 kfree(buffer->priv_virt); 375} 376 377static int ion_system_contig_heap_phys(struct ion_heap *heap, 378 struct ion_buffer *buffer, 379 ion_phys_addr_t *addr, size_t *len) 380{ 381 *addr = virt_to_phys(buffer->priv_virt); 382 *len = buffer->size; 383 return 0; 384} 385 386struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, 387 struct ion_buffer *buffer) 388{ 389 struct sg_table *table; 390 int ret; 391 392 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 393 if (!table) 394 return ERR_PTR(-ENOMEM); 395 ret = sg_alloc_table(table, 1, GFP_KERNEL); 396 if (ret) { 397 kfree(table); 398 return ERR_PTR(ret); 399 } 400 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size, 401 0); 402 return table; 403} 404 405void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, 406 struct ion_buffer *buffer) 407{ 408 sg_free_table(buffer->sg_table); 409 kfree(buffer->sg_table); 410} 411 412int ion_system_contig_heap_map_user(struct ion_heap *heap, 413 struct ion_buffer *buffer, 414 struct vm_area_struct *vma) 415{ 416 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); 417 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 418 vma->vm_end - vma->vm_start, 419 vma->vm_page_prot); 420 421} 422 423static struct ion_heap_ops kmalloc_ops = { 424 .allocate = ion_system_contig_heap_allocate, 425 .free = ion_system_contig_heap_free, 426 .phys = ion_system_contig_heap_phys, 427 .map_dma = ion_system_contig_heap_map_dma, 428 .unmap_dma = ion_system_contig_heap_unmap_dma, 429 .map_kernel = ion_heap_map_kernel, 430 .unmap_kernel = ion_heap_unmap_kernel, 431 .map_user = ion_system_contig_heap_map_user, 432}; 433 434struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) 435{ 436 struct ion_heap *heap; 437 438 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); 439 if (!heap) 440 return ERR_PTR(-ENOMEM); 441 heap->ops = &kmalloc_ops; 442 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; 443 return heap; 444} 445 446void ion_system_contig_heap_destroy(struct ion_heap *heap) 447{ 448 kfree(heap); 449} 450 451