ion_system_heap.c revision def5c4926b3f691dfbcef39a94f206c0f0f3a92f
1/* 2 * drivers/staging/android/ion/ion_system_heap.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <asm/page.h> 18#include <linux/dma-mapping.h> 19#include <linux/err.h> 20#include <linux/highmem.h> 21#include <linux/mm.h> 22#include <linux/scatterlist.h> 23#include <linux/seq_file.h> 24#include <linux/slab.h> 25#include <linux/vmalloc.h> 26#include "ion.h" 27#include "ion_priv.h" 28 29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | 30 __GFP_NOWARN | __GFP_NORETRY | 31 __GFP_NO_KSWAPD) & ~__GFP_WAIT; 32static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | 33 __GFP_NOWARN); 34static const unsigned int orders[] = {8, 4, 0}; 35static const int num_orders = ARRAY_SIZE(orders); 36static int order_to_index(unsigned int order) 37{ 38 int i; 39 for (i = 0; i < num_orders; i++) 40 if (order == orders[i]) 41 return i; 42 BUG(); 43 return -1; 44} 45 46static unsigned int order_to_size(int order) 47{ 48 return PAGE_SIZE << order; 49} 50 51struct ion_system_heap { 52 struct ion_heap heap; 53 struct ion_page_pool **pools; 54}; 55 56struct page_info { 57 struct page *page; 58 unsigned int order; 59 struct list_head list; 60}; 61 62static struct page *alloc_buffer_page(struct ion_system_heap *heap, 63 struct ion_buffer *buffer, 64 unsigned long order) 65{ 66 bool cached = ion_buffer_cached(buffer); 67 bool split_pages = ion_buffer_fault_user_mappings(buffer); 68 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 69 struct page *page; 70 71 if (!cached) { 72 page = ion_page_pool_alloc(pool); 73 } else { 74 gfp_t gfp_flags = low_order_gfp_flags; 75 76 if (order > 4) 77 gfp_flags = high_order_gfp_flags; 78 page = alloc_pages(gfp_flags, order); 79 if (!page) 80 return 0; 81 arm_dma_ops.sync_single_for_device(NULL, 82 pfn_to_dma(NULL, page_to_pfn(page)), 83 PAGE_SIZE << order, DMA_BIDIRECTIONAL); 84 } 85 if (!page) 86 return 0; 87 88 if (split_pages) 89 split_page(page, order); 90 return page; 91} 92 93static void free_buffer_page(struct ion_system_heap *heap, 94 struct ion_buffer *buffer, struct page *page, 95 unsigned int order) 96{ 97 bool cached = ion_buffer_cached(buffer); 98 bool split_pages = ion_buffer_fault_user_mappings(buffer); 99 int i; 100 101 if (!cached) { 102 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 103 ion_page_pool_free(pool, page); 104 } else if (split_pages) { 105 for (i = 0; i < (1 << order); i++) 106 __free_page(page + i); 107 } else { 108 __free_pages(page, order); 109 } 110} 111 112 113static struct page_info *alloc_largest_available(struct ion_system_heap *heap, 114 struct ion_buffer *buffer, 115 unsigned long size, 116 unsigned int max_order) 117{ 118 struct page *page; 119 struct page_info *info; 120 int i; 121 122 for (i = 0; i < num_orders; i++) { 123 if (size < order_to_size(orders[i])) 124 continue; 125 if (max_order < orders[i]) 126 continue; 127 128 page = alloc_buffer_page(heap, buffer, orders[i]); 129 if (!page) 130 continue; 131 132 info = kmalloc(sizeof(struct page_info), GFP_KERNEL); 133 info->page = page; 134 info->order = orders[i]; 135 return info; 136 } 137 return NULL; 138} 139 140static int ion_system_heap_allocate(struct ion_heap *heap, 141 struct ion_buffer *buffer, 142 unsigned long size, unsigned long align, 143 unsigned long flags) 144{ 145 struct ion_system_heap *sys_heap = container_of(heap, 146 struct ion_system_heap, 147 heap); 148 struct sg_table *table; 149 struct scatterlist *sg; 150 int ret; 151 struct list_head pages; 152 struct page_info *info, *tmp_info; 153 int i = 0; 154 long size_remaining = PAGE_ALIGN(size); 155 unsigned int max_order = orders[0]; 156 bool split_pages = ion_buffer_fault_user_mappings(buffer); 157 158 INIT_LIST_HEAD(&pages); 159 while (size_remaining > 0) { 160 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order); 161 if (!info) 162 goto err; 163 list_add_tail(&info->list, &pages); 164 size_remaining -= (1 << info->order) * PAGE_SIZE; 165 max_order = info->order; 166 i++; 167 } 168 169 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 170 if (!table) 171 goto err; 172 173 if (split_pages) 174 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, 175 GFP_KERNEL); 176 else 177 ret = sg_alloc_table(table, i, GFP_KERNEL); 178 179 if (ret) 180 goto err1; 181 182 sg = table->sgl; 183 list_for_each_entry_safe(info, tmp_info, &pages, list) { 184 struct page *page = info->page; 185 if (split_pages) { 186 for (i = 0; i < (1 << info->order); i++) { 187 sg_set_page(sg, page + i, PAGE_SIZE, 0); 188 sg = sg_next(sg); 189 } 190 } else { 191 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 192 0); 193 sg = sg_next(sg); 194 } 195 list_del(&info->list); 196 kfree(info); 197 } 198 199 buffer->priv_virt = table; 200 return 0; 201err1: 202 kfree(table); 203err: 204 list_for_each_entry(info, &pages, list) { 205 free_buffer_page(sys_heap, buffer, info->page, info->order); 206 kfree(info); 207 } 208 return -ENOMEM; 209} 210 211void ion_system_heap_free(struct ion_buffer *buffer) 212{ 213 struct ion_heap *heap = buffer->heap; 214 struct ion_system_heap *sys_heap = container_of(heap, 215 struct ion_system_heap, 216 heap); 217 struct sg_table *table = buffer->sg_table; 218 bool cached = ion_buffer_cached(buffer); 219 struct scatterlist *sg; 220 LIST_HEAD(pages); 221 int i; 222 223 /* uncached pages come from the page pools, zero them before returning 224 for security purposes (other allocations are zerod at alloc time */ 225 if (!cached) 226 ion_heap_buffer_zero(buffer); 227 228 for_each_sg(table->sgl, sg, table->nents, i) 229 free_buffer_page(sys_heap, buffer, sg_page(sg), 230 get_order(sg_dma_len(sg))); 231 sg_free_table(table); 232 kfree(table); 233} 234 235struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, 236 struct ion_buffer *buffer) 237{ 238 return buffer->priv_virt; 239} 240 241void ion_system_heap_unmap_dma(struct ion_heap *heap, 242 struct ion_buffer *buffer) 243{ 244 return; 245} 246 247static struct ion_heap_ops system_heap_ops = { 248 .allocate = ion_system_heap_allocate, 249 .free = ion_system_heap_free, 250 .map_dma = ion_system_heap_map_dma, 251 .unmap_dma = ion_system_heap_unmap_dma, 252 .map_kernel = ion_heap_map_kernel, 253 .unmap_kernel = ion_heap_unmap_kernel, 254 .map_user = ion_heap_map_user, 255}; 256 257static int ion_system_heap_shrink(struct shrinker *shrinker, 258 struct shrink_control *sc) { 259 260 struct ion_heap *heap = container_of(shrinker, struct ion_heap, 261 shrinker); 262 struct ion_system_heap *sys_heap = container_of(heap, 263 struct ion_system_heap, 264 heap); 265 int nr_total = 0; 266 int nr_freed = 0; 267 int i; 268 269 if (sc->nr_to_scan == 0) 270 goto end; 271 272 /* shrink the free list first, no point in zeroing the memory if 273 we're just going to reclaim it */ 274 nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) / 275 PAGE_SIZE; 276 277 if (nr_freed >= sc->nr_to_scan) 278 goto end; 279 280 for (i = 0; i < num_orders; i++) { 281 struct ion_page_pool *pool = sys_heap->pools[i]; 282 283 nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask, 284 sc->nr_to_scan); 285 if (nr_freed >= sc->nr_to_scan) 286 break; 287 } 288 289end: 290 /* total number of items is whatever the page pools are holding 291 plus whatever's in the freelist */ 292 for (i = 0; i < num_orders; i++) { 293 struct ion_page_pool *pool = sys_heap->pools[i]; 294 nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0); 295 } 296 nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE; 297 return nr_total; 298 299} 300 301static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, 302 void *unused) 303{ 304 305 struct ion_system_heap *sys_heap = container_of(heap, 306 struct ion_system_heap, 307 heap); 308 int i; 309 for (i = 0; i < num_orders; i++) { 310 struct ion_page_pool *pool = sys_heap->pools[i]; 311 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", 312 pool->high_count, pool->order, 313 (1 << pool->order) * PAGE_SIZE * pool->high_count); 314 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", 315 pool->low_count, pool->order, 316 (1 << pool->order) * PAGE_SIZE * pool->low_count); 317 } 318 return 0; 319} 320 321struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) 322{ 323 struct ion_system_heap *heap; 324 int i; 325 326 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); 327 if (!heap) 328 return ERR_PTR(-ENOMEM); 329 heap->heap.ops = &system_heap_ops; 330 heap->heap.type = ION_HEAP_TYPE_SYSTEM; 331 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; 332 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, 333 GFP_KERNEL); 334 if (!heap->pools) 335 goto err_alloc_pools; 336 for (i = 0; i < num_orders; i++) { 337 struct ion_page_pool *pool; 338 gfp_t gfp_flags = low_order_gfp_flags; 339 340 if (orders[i] > 4) 341 gfp_flags = high_order_gfp_flags; 342 pool = ion_page_pool_create(gfp_flags, orders[i]); 343 if (!pool) 344 goto err_create_pool; 345 heap->pools[i] = pool; 346 } 347 348 heap->heap.shrinker.shrink = ion_system_heap_shrink; 349 heap->heap.shrinker.seeks = DEFAULT_SEEKS; 350 heap->heap.shrinker.batch = 0; 351 register_shrinker(&heap->heap.shrinker); 352 heap->heap.debug_show = ion_system_heap_debug_show; 353 return &heap->heap; 354err_create_pool: 355 for (i = 0; i < num_orders; i++) 356 if (heap->pools[i]) 357 ion_page_pool_destroy(heap->pools[i]); 358 kfree(heap->pools); 359err_alloc_pools: 360 kfree(heap); 361 return ERR_PTR(-ENOMEM); 362} 363 364void ion_system_heap_destroy(struct ion_heap *heap) 365{ 366 struct ion_system_heap *sys_heap = container_of(heap, 367 struct ion_system_heap, 368 heap); 369 int i; 370 371 for (i = 0; i < num_orders; i++) 372 ion_page_pool_destroy(sys_heap->pools[i]); 373 kfree(sys_heap->pools); 374 kfree(sys_heap); 375} 376 377static int ion_system_contig_heap_allocate(struct ion_heap *heap, 378 struct ion_buffer *buffer, 379 unsigned long len, 380 unsigned long align, 381 unsigned long flags) 382{ 383 buffer->priv_virt = kzalloc(len, GFP_KERNEL); 384 if (!buffer->priv_virt) 385 return -ENOMEM; 386 return 0; 387} 388 389void ion_system_contig_heap_free(struct ion_buffer *buffer) 390{ 391 kfree(buffer->priv_virt); 392} 393 394static int ion_system_contig_heap_phys(struct ion_heap *heap, 395 struct ion_buffer *buffer, 396 ion_phys_addr_t *addr, size_t *len) 397{ 398 *addr = virt_to_phys(buffer->priv_virt); 399 *len = buffer->size; 400 return 0; 401} 402 403struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, 404 struct ion_buffer *buffer) 405{ 406 struct sg_table *table; 407 int ret; 408 409 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 410 if (!table) 411 return ERR_PTR(-ENOMEM); 412 ret = sg_alloc_table(table, 1, GFP_KERNEL); 413 if (ret) { 414 kfree(table); 415 return ERR_PTR(ret); 416 } 417 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size, 418 0); 419 return table; 420} 421 422void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, 423 struct ion_buffer *buffer) 424{ 425 sg_free_table(buffer->sg_table); 426 kfree(buffer->sg_table); 427} 428 429int ion_system_contig_heap_map_user(struct ion_heap *heap, 430 struct ion_buffer *buffer, 431 struct vm_area_struct *vma) 432{ 433 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); 434 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 435 vma->vm_end - vma->vm_start, 436 vma->vm_page_prot); 437 438} 439 440static struct ion_heap_ops kmalloc_ops = { 441 .allocate = ion_system_contig_heap_allocate, 442 .free = ion_system_contig_heap_free, 443 .phys = ion_system_contig_heap_phys, 444 .map_dma = ion_system_contig_heap_map_dma, 445 .unmap_dma = ion_system_contig_heap_unmap_dma, 446 .map_kernel = ion_heap_map_kernel, 447 .unmap_kernel = ion_heap_unmap_kernel, 448 .map_user = ion_system_contig_heap_map_user, 449}; 450 451struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) 452{ 453 struct ion_heap *heap; 454 455 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); 456 if (!heap) 457 return ERR_PTR(-ENOMEM); 458 heap->ops = &kmalloc_ops; 459 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; 460 return heap; 461} 462 463void ion_system_contig_heap_destroy(struct ion_heap *heap) 464{ 465 kfree(heap); 466} 467 468