ion_system_heap.c revision ea313b5f88ed7119f79ad3f6b85e9620971b9875
1/* 2 * drivers/staging/android/ion/ion_system_heap.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <asm/page.h> 18#include <linux/dma-mapping.h> 19#include <linux/err.h> 20#include <linux/highmem.h> 21#include <linux/mm.h> 22#include <linux/scatterlist.h> 23#include <linux/seq_file.h> 24#include <linux/slab.h> 25#include <linux/vmalloc.h> 26#include "ion.h" 27#include "ion_priv.h" 28 29static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | 30 __GFP_NOWARN | __GFP_NORETRY | 31 __GFP_NO_KSWAPD) & ~__GFP_WAIT; 32static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | 33 __GFP_NOWARN); 34static const unsigned int orders[] = {8, 4, 0}; 35static const int num_orders = ARRAY_SIZE(orders); 36static int order_to_index(unsigned int order) 37{ 38 int i; 39 for (i = 0; i < num_orders; i++) 40 if (order == orders[i]) 41 return i; 42 BUG(); 43 return -1; 44} 45 46static unsigned int order_to_size(int order) 47{ 48 return PAGE_SIZE << order; 49} 50 51struct ion_system_heap { 52 struct ion_heap heap; 53 struct ion_page_pool **pools; 54}; 55 56struct page_info { 57 struct page *page; 58 unsigned int order; 59 struct list_head list; 60}; 61 62static struct page *alloc_buffer_page(struct ion_system_heap *heap, 63 struct ion_buffer *buffer, 64 unsigned long order) 65{ 66 bool cached = ion_buffer_cached(buffer); 67 bool split_pages = ion_buffer_fault_user_mappings(buffer); 68 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 69 struct page *page; 70 71 if (!cached) { 72 page = ion_page_pool_alloc(pool); 73 } else { 74 gfp_t gfp_flags = low_order_gfp_flags; 75 76 if (order > 4) 77 gfp_flags = high_order_gfp_flags; 78 page = alloc_pages(gfp_flags, order); 79 if (!page) 80 return 0; 81 __dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order, 82 DMA_BIDIRECTIONAL); 83 } 84 if (!page) 85 return 0; 86 87 if (split_pages) 88 split_page(page, order); 89 return page; 90} 91 92static void free_buffer_page(struct ion_system_heap *heap, 93 struct ion_buffer *buffer, struct page *page, 94 unsigned int order) 95{ 96 bool cached = ion_buffer_cached(buffer); 97 bool split_pages = ion_buffer_fault_user_mappings(buffer); 98 int i; 99 100 if (!cached) { 101 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 102 ion_page_pool_free(pool, page); 103 } else if (split_pages) { 104 for (i = 0; i < (1 << order); i++) 105 __free_page(page + i); 106 } else { 107 __free_pages(page, order); 108 } 109} 110 111 112static struct page_info *alloc_largest_available(struct ion_system_heap *heap, 113 struct ion_buffer *buffer, 114 unsigned long size, 115 unsigned int max_order) 116{ 117 struct page *page; 118 struct page_info *info; 119 int i; 120 121 for (i = 0; i < num_orders; i++) { 122 if (size < order_to_size(orders[i])) 123 continue; 124 if (max_order < orders[i]) 125 continue; 126 127 page = alloc_buffer_page(heap, buffer, orders[i]); 128 if (!page) 129 continue; 130 131 info = kmalloc(sizeof(struct page_info), GFP_KERNEL); 132 info->page = page; 133 info->order = orders[i]; 134 return info; 135 } 136 return NULL; 137} 138 139static int ion_system_heap_allocate(struct ion_heap *heap, 140 struct ion_buffer *buffer, 141 unsigned long size, unsigned long align, 142 unsigned long flags) 143{ 144 struct ion_system_heap *sys_heap = container_of(heap, 145 struct ion_system_heap, 146 heap); 147 struct sg_table *table; 148 struct scatterlist *sg; 149 int ret; 150 struct list_head pages; 151 struct page_info *info, *tmp_info; 152 int i = 0; 153 long size_remaining = PAGE_ALIGN(size); 154 unsigned int max_order = orders[0]; 155 bool split_pages = ion_buffer_fault_user_mappings(buffer); 156 157 INIT_LIST_HEAD(&pages); 158 while (size_remaining > 0) { 159 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order); 160 if (!info) 161 goto err; 162 list_add_tail(&info->list, &pages); 163 size_remaining -= (1 << info->order) * PAGE_SIZE; 164 max_order = info->order; 165 i++; 166 } 167 168 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 169 if (!table) 170 goto err; 171 172 if (split_pages) 173 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, 174 GFP_KERNEL); 175 else 176 ret = sg_alloc_table(table, i, GFP_KERNEL); 177 178 if (ret) 179 goto err1; 180 181 sg = table->sgl; 182 list_for_each_entry_safe(info, tmp_info, &pages, list) { 183 struct page *page = info->page; 184 if (split_pages) { 185 for (i = 0; i < (1 << info->order); i++) { 186 sg_set_page(sg, page + i, PAGE_SIZE, 0); 187 sg = sg_next(sg); 188 } 189 } else { 190 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 191 0); 192 sg = sg_next(sg); 193 } 194 list_del(&info->list); 195 kfree(info); 196 } 197 198 buffer->priv_virt = table; 199 return 0; 200err1: 201 kfree(table); 202err: 203 list_for_each_entry(info, &pages, list) { 204 free_buffer_page(sys_heap, buffer, info->page, info->order); 205 kfree(info); 206 } 207 return -ENOMEM; 208} 209 210void ion_system_heap_free(struct ion_buffer *buffer) 211{ 212 struct ion_heap *heap = buffer->heap; 213 struct ion_system_heap *sys_heap = container_of(heap, 214 struct ion_system_heap, 215 heap); 216 struct sg_table *table = buffer->sg_table; 217 bool cached = ion_buffer_cached(buffer); 218 struct scatterlist *sg; 219 LIST_HEAD(pages); 220 int i; 221 222 /* uncached pages come from the page pools, zero them before returning 223 for security purposes (other allocations are zerod at alloc time */ 224 if (!cached) 225 ion_heap_buffer_zero(buffer); 226 227 for_each_sg(table->sgl, sg, table->nents, i) 228 free_buffer_page(sys_heap, buffer, sg_page(sg), 229 get_order(sg_dma_len(sg))); 230 sg_free_table(table); 231 kfree(table); 232} 233 234struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, 235 struct ion_buffer *buffer) 236{ 237 return buffer->priv_virt; 238} 239 240void ion_system_heap_unmap_dma(struct ion_heap *heap, 241 struct ion_buffer *buffer) 242{ 243 return; 244} 245 246static struct ion_heap_ops system_heap_ops = { 247 .allocate = ion_system_heap_allocate, 248 .free = ion_system_heap_free, 249 .map_dma = ion_system_heap_map_dma, 250 .unmap_dma = ion_system_heap_unmap_dma, 251 .map_kernel = ion_heap_map_kernel, 252 .unmap_kernel = ion_heap_unmap_kernel, 253 .map_user = ion_heap_map_user, 254}; 255 256static int ion_system_heap_shrink(struct shrinker *shrinker, 257 struct shrink_control *sc) { 258 259 struct ion_heap *heap = container_of(shrinker, struct ion_heap, 260 shrinker); 261 struct ion_system_heap *sys_heap = container_of(heap, 262 struct ion_system_heap, 263 heap); 264 int nr_total = 0; 265 int nr_freed = 0; 266 int i; 267 268 if (sc->nr_to_scan == 0) 269 goto end; 270 271 /* shrink the free list first, no point in zeroing the memory if 272 we're just going to reclaim it */ 273 nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) / 274 PAGE_SIZE; 275 276 if (nr_freed >= sc->nr_to_scan) 277 goto end; 278 279 for (i = 0; i < num_orders; i++) { 280 struct ion_page_pool *pool = sys_heap->pools[i]; 281 282 nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask, 283 sc->nr_to_scan); 284 if (nr_freed >= sc->nr_to_scan) 285 break; 286 } 287 288end: 289 /* total number of items is whatever the page pools are holding 290 plus whatever's in the freelist */ 291 for (i = 0; i < num_orders; i++) { 292 struct ion_page_pool *pool = sys_heap->pools[i]; 293 nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0); 294 } 295 nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE; 296 return nr_total; 297 298} 299 300static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, 301 void *unused) 302{ 303 304 struct ion_system_heap *sys_heap = container_of(heap, 305 struct ion_system_heap, 306 heap); 307 int i; 308 for (i = 0; i < num_orders; i++) { 309 struct ion_page_pool *pool = sys_heap->pools[i]; 310 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", 311 pool->high_count, pool->order, 312 (1 << pool->order) * PAGE_SIZE * pool->high_count); 313 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", 314 pool->low_count, pool->order, 315 (1 << pool->order) * PAGE_SIZE * pool->low_count); 316 } 317 return 0; 318} 319 320struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) 321{ 322 struct ion_system_heap *heap; 323 int i; 324 325 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); 326 if (!heap) 327 return ERR_PTR(-ENOMEM); 328 heap->heap.ops = &system_heap_ops; 329 heap->heap.type = ION_HEAP_TYPE_SYSTEM; 330 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; 331 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, 332 GFP_KERNEL); 333 if (!heap->pools) 334 goto err_alloc_pools; 335 for (i = 0; i < num_orders; i++) { 336 struct ion_page_pool *pool; 337 gfp_t gfp_flags = low_order_gfp_flags; 338 339 if (orders[i] > 4) 340 gfp_flags = high_order_gfp_flags; 341 pool = ion_page_pool_create(gfp_flags, orders[i]); 342 if (!pool) 343 goto err_create_pool; 344 heap->pools[i] = pool; 345 } 346 347 heap->heap.shrinker.shrink = ion_system_heap_shrink; 348 heap->heap.shrinker.seeks = DEFAULT_SEEKS; 349 heap->heap.shrinker.batch = 0; 350 register_shrinker(&heap->heap.shrinker); 351 heap->heap.debug_show = ion_system_heap_debug_show; 352 return &heap->heap; 353err_create_pool: 354 for (i = 0; i < num_orders; i++) 355 if (heap->pools[i]) 356 ion_page_pool_destroy(heap->pools[i]); 357 kfree(heap->pools); 358err_alloc_pools: 359 kfree(heap); 360 return ERR_PTR(-ENOMEM); 361} 362 363void ion_system_heap_destroy(struct ion_heap *heap) 364{ 365 struct ion_system_heap *sys_heap = container_of(heap, 366 struct ion_system_heap, 367 heap); 368 int i; 369 370 for (i = 0; i < num_orders; i++) 371 ion_page_pool_destroy(sys_heap->pools[i]); 372 kfree(sys_heap->pools); 373 kfree(sys_heap); 374} 375 376static int ion_system_contig_heap_allocate(struct ion_heap *heap, 377 struct ion_buffer *buffer, 378 unsigned long len, 379 unsigned long align, 380 unsigned long flags) 381{ 382 buffer->priv_virt = kzalloc(len, GFP_KERNEL); 383 if (!buffer->priv_virt) 384 return -ENOMEM; 385 return 0; 386} 387 388void ion_system_contig_heap_free(struct ion_buffer *buffer) 389{ 390 kfree(buffer->priv_virt); 391} 392 393static int ion_system_contig_heap_phys(struct ion_heap *heap, 394 struct ion_buffer *buffer, 395 ion_phys_addr_t *addr, size_t *len) 396{ 397 *addr = virt_to_phys(buffer->priv_virt); 398 *len = buffer->size; 399 return 0; 400} 401 402struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, 403 struct ion_buffer *buffer) 404{ 405 struct sg_table *table; 406 int ret; 407 408 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 409 if (!table) 410 return ERR_PTR(-ENOMEM); 411 ret = sg_alloc_table(table, 1, GFP_KERNEL); 412 if (ret) { 413 kfree(table); 414 return ERR_PTR(ret); 415 } 416 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size, 417 0); 418 return table; 419} 420 421void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, 422 struct ion_buffer *buffer) 423{ 424 sg_free_table(buffer->sg_table); 425 kfree(buffer->sg_table); 426} 427 428int ion_system_contig_heap_map_user(struct ion_heap *heap, 429 struct ion_buffer *buffer, 430 struct vm_area_struct *vma) 431{ 432 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); 433 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 434 vma->vm_end - vma->vm_start, 435 vma->vm_page_prot); 436 437} 438 439static struct ion_heap_ops kmalloc_ops = { 440 .allocate = ion_system_contig_heap_allocate, 441 .free = ion_system_contig_heap_free, 442 .phys = ion_system_contig_heap_phys, 443 .map_dma = ion_system_contig_heap_map_dma, 444 .unmap_dma = ion_system_contig_heap_unmap_dma, 445 .map_kernel = ion_heap_map_kernel, 446 .unmap_kernel = ion_heap_unmap_kernel, 447 .map_user = ion_system_contig_heap_map_user, 448}; 449 450struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) 451{ 452 struct ion_heap *heap; 453 454 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); 455 if (!heap) 456 return ERR_PTR(-ENOMEM); 457 heap->ops = &kmalloc_ops; 458 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; 459 return heap; 460} 461 462void ion_system_contig_heap_destroy(struct ion_heap *heap) 463{ 464 kfree(heap); 465} 466 467