ion_heap.c revision 8b312bb9a795c9c07661aee7b694cbfd3217e25c
1/* 2 * drivers/staging/android/ion/ion_heap.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/err.h> 18#include <linux/freezer.h> 19#include <linux/kthread.h> 20#include <linux/mm.h> 21#include <linux/rtmutex.h> 22#include <linux/sched.h> 23#include <linux/scatterlist.h> 24#include <linux/vmalloc.h> 25#include "ion.h" 26#include "ion_priv.h" 27 28void *ion_heap_map_kernel(struct ion_heap *heap, 29 struct ion_buffer *buffer) 30{ 31 struct scatterlist *sg; 32 int i, j; 33 void *vaddr; 34 pgprot_t pgprot; 35 struct sg_table *table = buffer->sg_table; 36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 37 struct page **pages = vmalloc(sizeof(struct page *) * npages); 38 struct page **tmp = pages; 39 40 if (!pages) 41 return 0; 42 43 if (buffer->flags & ION_FLAG_CACHED) 44 pgprot = PAGE_KERNEL; 45 else 46 pgprot = pgprot_writecombine(PAGE_KERNEL); 47 48 for_each_sg(table->sgl, sg, table->nents, i) { 49 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; 50 struct page *page = sg_page(sg); 51 BUG_ON(i >= npages); 52 for (j = 0; j < npages_this_entry; j++) { 53 *(tmp++) = page++; 54 } 55 } 56 vaddr = vmap(pages, npages, VM_MAP, pgprot); 57 vfree(pages); 58 59 if (vaddr == NULL) 60 return ERR_PTR(-ENOMEM); 61 62 return vaddr; 63} 64 65void ion_heap_unmap_kernel(struct ion_heap *heap, 66 struct ion_buffer *buffer) 67{ 68 vunmap(buffer->vaddr); 69} 70 71int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, 72 struct vm_area_struct *vma) 73{ 74 struct sg_table *table = buffer->sg_table; 75 unsigned long addr = vma->vm_start; 76 unsigned long offset = vma->vm_pgoff * PAGE_SIZE; 77 struct scatterlist *sg; 78 int i; 79 80 for_each_sg(table->sgl, sg, table->nents, i) { 81 struct page *page = sg_page(sg); 82 unsigned long remainder = vma->vm_end - addr; 83 unsigned long len = sg->length; 84 85 if (offset >= sg->length) { 86 offset -= sg->length; 87 continue; 88 } else if (offset) { 89 page += offset / PAGE_SIZE; 90 len = sg->length - offset; 91 offset = 0; 92 } 93 len = min(len, remainder); 94 remap_pfn_range(vma, addr, page_to_pfn(page), len, 95 vma->vm_page_prot); 96 addr += len; 97 if (addr >= vma->vm_end) 98 return 0; 99 } 100 return 0; 101} 102 103static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) 104{ 105 void *addr = vm_map_ram(pages, num, -1, pgprot); 106 if (!addr) 107 return -ENOMEM; 108 memset(addr, 0, PAGE_SIZE * num); 109 vm_unmap_ram(addr, num); 110 111 return 0; 112} 113 114int ion_heap_buffer_zero(struct ion_buffer *buffer) 115{ 116 struct sg_table *table = buffer->sg_table; 117 pgprot_t pgprot; 118 struct scatterlist *sg; 119 int i, j, ret = 0; 120 struct page *pages[32]; 121 int k = 0; 122 123 if (buffer->flags & ION_FLAG_CACHED) 124 pgprot = PAGE_KERNEL; 125 else 126 pgprot = pgprot_writecombine(PAGE_KERNEL); 127 128 for_each_sg(table->sgl, sg, table->nents, i) { 129 struct page *page = sg_page(sg); 130 unsigned long len = sg->length; 131 132 for (j = 0; j < len / PAGE_SIZE; j++) { 133 pages[k++] = page + j; 134 if (k == ARRAY_SIZE(pages)) { 135 ret = ion_heap_clear_pages(pages, k, pgprot); 136 if (ret) 137 goto end; 138 k = 0; 139 } 140 } 141 if (k) 142 ret = ion_heap_clear_pages(pages, k, pgprot); 143 } 144end: 145 return ret; 146} 147 148struct page *ion_heap_alloc_pages(struct ion_buffer *buffer, gfp_t gfp_flags, 149 unsigned int order) 150{ 151 struct page *page = alloc_pages(gfp_flags, order); 152 153 if (!page) 154 return page; 155 156 if (ion_buffer_fault_user_mappings(buffer)) 157 split_page(page, order); 158 159 return page; 160} 161 162void ion_heap_free_pages(struct ion_buffer *buffer, struct page *page, 163 unsigned int order) 164{ 165 int i; 166 167 if (!ion_buffer_fault_user_mappings(buffer)) { 168 __free_pages(page, order); 169 return; 170 } 171 for (i = 0; i < (1 << order); i++) 172 __free_page(page + i); 173} 174 175void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer) 176{ 177 rt_mutex_lock(&heap->lock); 178 list_add(&buffer->list, &heap->free_list); 179 heap->free_list_size += buffer->size; 180 rt_mutex_unlock(&heap->lock); 181 wake_up(&heap->waitqueue); 182} 183 184size_t ion_heap_freelist_size(struct ion_heap *heap) 185{ 186 size_t size; 187 188 rt_mutex_lock(&heap->lock); 189 size = heap->free_list_size; 190 rt_mutex_unlock(&heap->lock); 191 192 return size; 193} 194 195size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) 196{ 197 struct ion_buffer *buffer, *tmp; 198 size_t total_drained = 0; 199 200 if (ion_heap_freelist_size(heap) == 0) 201 return 0; 202 203 rt_mutex_lock(&heap->lock); 204 if (size == 0) 205 size = heap->free_list_size; 206 207 list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) { 208 if (total_drained >= size) 209 break; 210 list_del(&buffer->list); 211 heap->free_list_size -= buffer->size; 212 total_drained += buffer->size; 213 ion_buffer_destroy(buffer); 214 } 215 rt_mutex_unlock(&heap->lock); 216 217 return total_drained; 218} 219 220int ion_heap_deferred_free(void *data) 221{ 222 struct ion_heap *heap = data; 223 224 while (true) { 225 struct ion_buffer *buffer; 226 227 wait_event_freezable(heap->waitqueue, 228 ion_heap_freelist_size(heap) > 0); 229 230 rt_mutex_lock(&heap->lock); 231 if (list_empty(&heap->free_list)) { 232 rt_mutex_unlock(&heap->lock); 233 continue; 234 } 235 buffer = list_first_entry(&heap->free_list, struct ion_buffer, 236 list); 237 list_del(&buffer->list); 238 heap->free_list_size -= buffer->size; 239 rt_mutex_unlock(&heap->lock); 240 ion_buffer_destroy(buffer); 241 } 242 243 return 0; 244} 245 246int ion_heap_init_deferred_free(struct ion_heap *heap) 247{ 248 struct sched_param param = { .sched_priority = 0 }; 249 250 INIT_LIST_HEAD(&heap->free_list); 251 heap->free_list_size = 0; 252 rt_mutex_init(&heap->lock); 253 init_waitqueue_head(&heap->waitqueue); 254 heap->task = kthread_run(ion_heap_deferred_free, heap, 255 "%s", heap->name); 256 sched_setscheduler(heap->task, SCHED_IDLE, ¶m); 257 if (IS_ERR(heap->task)) { 258 pr_err("%s: creating thread for deferred free failed\n", 259 __func__); 260 return PTR_RET(heap->task); 261 } 262 return 0; 263} 264 265struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) 266{ 267 struct ion_heap *heap = NULL; 268 269 switch (heap_data->type) { 270 case ION_HEAP_TYPE_SYSTEM_CONTIG: 271 heap = ion_system_contig_heap_create(heap_data); 272 break; 273 case ION_HEAP_TYPE_SYSTEM: 274 heap = ion_system_heap_create(heap_data); 275 break; 276 case ION_HEAP_TYPE_CARVEOUT: 277 heap = ion_carveout_heap_create(heap_data); 278 break; 279 case ION_HEAP_TYPE_CHUNK: 280 heap = ion_chunk_heap_create(heap_data); 281 break; 282 case ION_HEAP_TYPE_DMA: 283 heap = ion_cma_heap_create(heap_data); 284 break; 285 default: 286 pr_err("%s: Invalid heap type %d\n", __func__, 287 heap_data->type); 288 return ERR_PTR(-EINVAL); 289 } 290 291 if (IS_ERR_OR_NULL(heap)) { 292 pr_err("%s: error creating heap %s type %d base %lu size %zu\n", 293 __func__, heap_data->name, heap_data->type, 294 heap_data->base, heap_data->size); 295 return ERR_PTR(-EINVAL); 296 } 297 298 heap->name = heap_data->name; 299 heap->id = heap_data->id; 300 return heap; 301} 302 303void ion_heap_destroy(struct ion_heap *heap) 304{ 305 if (!heap) 306 return; 307 308 switch (heap->type) { 309 case ION_HEAP_TYPE_SYSTEM_CONTIG: 310 ion_system_contig_heap_destroy(heap); 311 break; 312 case ION_HEAP_TYPE_SYSTEM: 313 ion_system_heap_destroy(heap); 314 break; 315 case ION_HEAP_TYPE_CARVEOUT: 316 ion_carveout_heap_destroy(heap); 317 break; 318 case ION_HEAP_TYPE_CHUNK: 319 ion_chunk_heap_destroy(heap); 320 break; 321 case ION_HEAP_TYPE_DMA: 322 ion_cma_heap_destroy(heap); 323 break; 324 default: 325 pr_err("%s: Invalid heap type %d\n", __func__, 326 heap->type); 327 } 328} 329