ion_system_heap.c revision f4ea823be2ca9e61522de002804c9a7a54c9be16
1/* 2 * drivers/staging/android/ion/ion_system_heap.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <asm/page.h> 18#include <linux/dma-mapping.h> 19#include <linux/err.h> 20#include <linux/highmem.h> 21#include <linux/mm.h> 22#include <linux/scatterlist.h> 23#include <linux/seq_file.h> 24#include <linux/slab.h> 25#include <linux/vmalloc.h> 26#include "ion.h" 27#include "ion_priv.h" 28 29static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | 30 __GFP_NORETRY) & ~__GFP_WAIT; 31static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); 32static const unsigned int orders[] = {8, 4, 0}; 33static const int num_orders = ARRAY_SIZE(orders); 34static int order_to_index(unsigned int order) 35{ 36 int i; 37 for (i = 0; i < num_orders; i++) 38 if (order == orders[i]) 39 return i; 40 BUG(); 41 return -1; 42} 43 44static unsigned int order_to_size(int order) 45{ 46 return PAGE_SIZE << order; 47} 48 49struct ion_system_heap { 50 struct ion_heap heap; 51 struct ion_page_pool **pools; 52}; 53 54struct page_info { 55 struct page *page; 56 unsigned int order; 57 struct list_head list; 58}; 59 60static struct page *alloc_buffer_page(struct ion_system_heap *heap, 61 struct ion_buffer *buffer, 62 unsigned long order) 63{ 64 bool cached = ion_buffer_cached(buffer); 65 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 66 struct page *page; 67 68 if (!cached) { 69 page = ion_page_pool_alloc(pool); 70 } else { 71 gfp_t gfp_flags = low_order_gfp_flags; 72 73 if (order > 4) 74 gfp_flags = high_order_gfp_flags; 75 page = alloc_pages(gfp_flags, order); 76 if (!page) 77 return NULL; 78 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, 79 DMA_BIDIRECTIONAL); 80 } 81 if (!page) 82 return NULL; 83 84 return page; 85} 86 87static void free_buffer_page(struct ion_system_heap *heap, 88 struct ion_buffer *buffer, struct page *page, 89 unsigned int order) 90{ 91 bool cached = ion_buffer_cached(buffer); 92 93 if (!cached) { 94 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; 95 ion_page_pool_free(pool, page); 96 } else { 97 __free_pages(page, order); 98 } 99} 100 101 102static struct page_info *alloc_largest_available(struct ion_system_heap *heap, 103 struct ion_buffer *buffer, 104 unsigned long size, 105 unsigned int max_order) 106{ 107 struct page *page; 108 struct page_info *info; 109 int i; 110 111 info = kmalloc(sizeof(struct page_info), GFP_KERNEL); 112 if (!info) 113 return NULL; 114 115 for (i = 0; i < num_orders; i++) { 116 if (size < order_to_size(orders[i])) 117 continue; 118 if (max_order < orders[i]) 119 continue; 120 121 page = alloc_buffer_page(heap, buffer, orders[i]); 122 if (!page) 123 continue; 124 125 info->page = page; 126 info->order = orders[i]; 127 return info; 128 } 129 kfree(info); 130 131 return NULL; 132} 133 134static int ion_system_heap_allocate(struct ion_heap *heap, 135 struct ion_buffer *buffer, 136 unsigned long size, unsigned long align, 137 unsigned long flags) 138{ 139 struct ion_system_heap *sys_heap = container_of(heap, 140 struct ion_system_heap, 141 heap); 142 struct sg_table *table; 143 struct scatterlist *sg; 144 int ret; 145 struct list_head pages; 146 struct page_info *info, *tmp_info; 147 int i = 0; 148 long size_remaining = PAGE_ALIGN(size); 149 unsigned int max_order = orders[0]; 150 151 if (align > PAGE_SIZE) 152 return -EINVAL; 153 154 INIT_LIST_HEAD(&pages); 155 while (size_remaining > 0) { 156 info = alloc_largest_available(sys_heap, buffer, size_remaining, 157 max_order); 158 if (!info) 159 goto err; 160 list_add_tail(&info->list, &pages); 161 size_remaining -= (1 << info->order) * PAGE_SIZE; 162 max_order = info->order; 163 i++; 164 } 165 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 166 if (!table) 167 goto err; 168 169 ret = sg_alloc_table(table, i, GFP_KERNEL); 170 if (ret) 171 goto err1; 172 173 sg = table->sgl; 174 list_for_each_entry_safe(info, tmp_info, &pages, list) { 175 struct page *page = info->page; 176 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0); 177 sg = sg_next(sg); 178 list_del(&info->list); 179 kfree(info); 180 } 181 182 buffer->priv_virt = table; 183 return 0; 184err1: 185 kfree(table); 186err: 187 list_for_each_entry_safe(info, tmp_info, &pages, list) { 188 free_buffer_page(sys_heap, buffer, info->page, info->order); 189 kfree(info); 190 } 191 return -ENOMEM; 192} 193 194static void ion_system_heap_free(struct ion_buffer *buffer) 195{ 196 struct ion_heap *heap = buffer->heap; 197 struct ion_system_heap *sys_heap = container_of(heap, 198 struct ion_system_heap, 199 heap); 200 struct sg_table *table = buffer->sg_table; 201 bool cached = ion_buffer_cached(buffer); 202 struct scatterlist *sg; 203 LIST_HEAD(pages); 204 int i; 205 206 /* uncached pages come from the page pools, zero them before returning 207 for security purposes (other allocations are zerod at alloc time */ 208 if (!cached) 209 ion_heap_buffer_zero(buffer); 210 211 for_each_sg(table->sgl, sg, table->nents, i) 212 free_buffer_page(sys_heap, buffer, sg_page(sg), 213 get_order(sg->length)); 214 sg_free_table(table); 215 kfree(table); 216} 217 218static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, 219 struct ion_buffer *buffer) 220{ 221 return buffer->priv_virt; 222} 223 224static void ion_system_heap_unmap_dma(struct ion_heap *heap, 225 struct ion_buffer *buffer) 226{ 227 return; 228} 229 230static struct ion_heap_ops system_heap_ops = { 231 .allocate = ion_system_heap_allocate, 232 .free = ion_system_heap_free, 233 .map_dma = ion_system_heap_map_dma, 234 .unmap_dma = ion_system_heap_unmap_dma, 235 .map_kernel = ion_heap_map_kernel, 236 .unmap_kernel = ion_heap_unmap_kernel, 237 .map_user = ion_heap_map_user, 238}; 239 240static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker, 241 struct shrink_control *sc) 242{ 243 struct ion_heap *heap = container_of(shrinker, struct ion_heap, 244 shrinker); 245 struct ion_system_heap *sys_heap = container_of(heap, 246 struct ion_system_heap, 247 heap); 248 int nr_total = 0; 249 int i; 250 251 /* total number of items is whatever the page pools are holding 252 plus whatever's in the freelist */ 253 for (i = 0; i < num_orders; i++) { 254 struct ion_page_pool *pool = sys_heap->pools[i]; 255 nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0); 256 } 257 nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE; 258 return nr_total; 259 260} 261 262static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker, 263 struct shrink_control *sc) 264{ 265 266 struct ion_heap *heap = container_of(shrinker, struct ion_heap, 267 shrinker); 268 struct ion_system_heap *sys_heap = container_of(heap, 269 struct ion_system_heap, 270 heap); 271 int nr_freed = 0; 272 int i; 273 274 if (sc->nr_to_scan == 0) 275 goto end; 276 277 /* shrink the free list first, no point in zeroing the memory if 278 we're just going to reclaim it */ 279 nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) / 280 PAGE_SIZE; 281 282 if (nr_freed >= sc->nr_to_scan) 283 goto end; 284 285 for (i = 0; i < num_orders; i++) { 286 struct ion_page_pool *pool = sys_heap->pools[i]; 287 288 nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask, 289 sc->nr_to_scan); 290 if (nr_freed >= sc->nr_to_scan) 291 break; 292 } 293 294end: 295 return nr_freed; 296 297} 298 299static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, 300 void *unused) 301{ 302 303 struct ion_system_heap *sys_heap = container_of(heap, 304 struct ion_system_heap, 305 heap); 306 int i; 307 for (i = 0; i < num_orders; i++) { 308 struct ion_page_pool *pool = sys_heap->pools[i]; 309 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", 310 pool->high_count, pool->order, 311 (1 << pool->order) * PAGE_SIZE * pool->high_count); 312 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", 313 pool->low_count, pool->order, 314 (1 << pool->order) * PAGE_SIZE * pool->low_count); 315 } 316 return 0; 317} 318 319struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) 320{ 321 struct ion_system_heap *heap; 322 int i; 323 324 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); 325 if (!heap) 326 return ERR_PTR(-ENOMEM); 327 heap->heap.ops = &system_heap_ops; 328 heap->heap.type = ION_HEAP_TYPE_SYSTEM; 329 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; 330 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, 331 GFP_KERNEL); 332 if (!heap->pools) 333 goto err_alloc_pools; 334 for (i = 0; i < num_orders; i++) { 335 struct ion_page_pool *pool; 336 gfp_t gfp_flags = low_order_gfp_flags; 337 338 if (orders[i] > 4) 339 gfp_flags = high_order_gfp_flags; 340 pool = ion_page_pool_create(gfp_flags, orders[i]); 341 if (!pool) 342 goto err_create_pool; 343 heap->pools[i] = pool; 344 } 345 346 heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan; 347 heap->heap.shrinker.count_objects = ion_system_heap_shrink_count; 348 heap->heap.shrinker.seeks = DEFAULT_SEEKS; 349 heap->heap.shrinker.batch = 0; 350 register_shrinker(&heap->heap.shrinker); 351 heap->heap.debug_show = ion_system_heap_debug_show; 352 return &heap->heap; 353err_create_pool: 354 for (i = 0; i < num_orders; i++) 355 if (heap->pools[i]) 356 ion_page_pool_destroy(heap->pools[i]); 357 kfree(heap->pools); 358err_alloc_pools: 359 kfree(heap); 360 return ERR_PTR(-ENOMEM); 361} 362 363void ion_system_heap_destroy(struct ion_heap *heap) 364{ 365 struct ion_system_heap *sys_heap = container_of(heap, 366 struct ion_system_heap, 367 heap); 368 int i; 369 370 for (i = 0; i < num_orders; i++) 371 ion_page_pool_destroy(sys_heap->pools[i]); 372 kfree(sys_heap->pools); 373 kfree(sys_heap); 374} 375 376static int ion_system_contig_heap_allocate(struct ion_heap *heap, 377 struct ion_buffer *buffer, 378 unsigned long len, 379 unsigned long align, 380 unsigned long flags) 381{ 382 int order = get_order(len); 383 struct page *page; 384 struct sg_table *table; 385 unsigned long i; 386 int ret; 387 388 if (align > (PAGE_SIZE << order)) 389 return -EINVAL; 390 391 page = alloc_pages(low_order_gfp_flags, order); 392 if (!page) 393 return -ENOMEM; 394 395 split_page(page, order); 396 397 len = PAGE_ALIGN(len); 398 for (i = len >> PAGE_SHIFT; i < (1 << order); i++) 399 __free_page(page + i); 400 401 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 402 if (!table) { 403 ret = -ENOMEM; 404 goto out; 405 } 406 407 ret = sg_alloc_table(table, 1, GFP_KERNEL); 408 if (ret) 409 goto out; 410 411 sg_set_page(table->sgl, page, len, 0); 412 413 buffer->priv_virt = table; 414 415 ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); 416 417 return 0; 418 419out: 420 for (i = 0; i < len >> PAGE_SHIFT; i++) 421 __free_page(page + i); 422 kfree(table); 423 return ret; 424} 425 426static void ion_system_contig_heap_free(struct ion_buffer *buffer) 427{ 428 struct sg_table *table = buffer->priv_virt; 429 struct page *page = sg_page(table->sgl); 430 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; 431 unsigned long i; 432 433 for (i = 0; i < pages; i++) 434 __free_page(page + i); 435 sg_free_table(table); 436 kfree(table); 437} 438 439static int ion_system_contig_heap_phys(struct ion_heap *heap, 440 struct ion_buffer *buffer, 441 ion_phys_addr_t *addr, size_t *len) 442{ 443 struct sg_table *table = buffer->priv_virt; 444 struct page *page = sg_page(table->sgl); 445 *addr = page_to_phys(page); 446 *len = buffer->size; 447 return 0; 448} 449 450static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, 451 struct ion_buffer *buffer) 452{ 453 return buffer->priv_virt; 454} 455 456static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, 457 struct ion_buffer *buffer) 458{ 459} 460 461static struct ion_heap_ops kmalloc_ops = { 462 .allocate = ion_system_contig_heap_allocate, 463 .free = ion_system_contig_heap_free, 464 .phys = ion_system_contig_heap_phys, 465 .map_dma = ion_system_contig_heap_map_dma, 466 .unmap_dma = ion_system_contig_heap_unmap_dma, 467 .map_kernel = ion_heap_map_kernel, 468 .unmap_kernel = ion_heap_unmap_kernel, 469 .map_user = ion_heap_map_user, 470}; 471 472struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) 473{ 474 struct ion_heap *heap; 475 476 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); 477 if (!heap) 478 return ERR_PTR(-ENOMEM); 479 heap->ops = &kmalloc_ops; 480 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; 481 return heap; 482} 483 484void ion_system_contig_heap_destroy(struct ion_heap *heap) 485{ 486 kfree(heap); 487} 488 489