ion_heap.c revision ea313b5f88ed7119f79ad3f6b85e9620971b9875
1/* 2 * drivers/staging/android/ion/ion_heap.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/err.h> 18#include <linux/freezer.h> 19#include <linux/kthread.h> 20#include <linux/mm.h> 21#include <linux/rtmutex.h> 22#include <linux/sched.h> 23#include <linux/scatterlist.h> 24#include <linux/vmalloc.h> 25#include "ion.h" 26#include "ion_priv.h" 27 28void *ion_heap_map_kernel(struct ion_heap *heap, 29 struct ion_buffer *buffer) 30{ 31 struct scatterlist *sg; 32 int i, j; 33 void *vaddr; 34 pgprot_t pgprot; 35 struct sg_table *table = buffer->sg_table; 36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 37 struct page **pages = vmalloc(sizeof(struct page *) * npages); 38 struct page **tmp = pages; 39 40 if (!pages) 41 return 0; 42 43 if (buffer->flags & ION_FLAG_CACHED) 44 pgprot = PAGE_KERNEL; 45 else 46 pgprot = pgprot_writecombine(PAGE_KERNEL); 47 48 for_each_sg(table->sgl, sg, table->nents, i) { 49 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE; 50 struct page *page = sg_page(sg); 51 BUG_ON(i >= npages); 52 for (j = 0; j < npages_this_entry; j++) { 53 *(tmp++) = page++; 54 } 55 } 56 vaddr = vmap(pages, npages, VM_MAP, pgprot); 57 vfree(pages); 58 59 return vaddr; 60} 61 62void ion_heap_unmap_kernel(struct ion_heap *heap, 63 struct ion_buffer *buffer) 64{ 65 vunmap(buffer->vaddr); 66} 67 68int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, 69 struct vm_area_struct *vma) 70{ 71 struct sg_table *table = buffer->sg_table; 72 unsigned long addr = vma->vm_start; 73 unsigned long offset = vma->vm_pgoff * PAGE_SIZE; 74 struct scatterlist *sg; 75 int i; 76 77 for_each_sg(table->sgl, sg, table->nents, i) { 78 struct page *page = sg_page(sg); 79 unsigned long remainder = vma->vm_end - addr; 80 unsigned long len = sg_dma_len(sg); 81 82 if (offset >= sg_dma_len(sg)) { 83 offset -= sg_dma_len(sg); 84 continue; 85 } else if (offset) { 86 page += offset / PAGE_SIZE; 87 len = sg_dma_len(sg) - offset; 88 offset = 0; 89 } 90 len = min(len, remainder); 91 remap_pfn_range(vma, addr, page_to_pfn(page), len, 92 vma->vm_page_prot); 93 addr += len; 94 if (addr >= vma->vm_end) 95 return 0; 96 } 97 return 0; 98} 99 100int ion_heap_buffer_zero(struct ion_buffer *buffer) 101{ 102 struct sg_table *table = buffer->sg_table; 103 pgprot_t pgprot; 104 struct scatterlist *sg; 105 struct vm_struct *vm_struct; 106 int i, j, ret = 0; 107 108 if (buffer->flags & ION_FLAG_CACHED) 109 pgprot = PAGE_KERNEL; 110 else 111 pgprot = pgprot_writecombine(PAGE_KERNEL); 112 113 vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC); 114 if (!vm_struct) 115 return -ENOMEM; 116 117 for_each_sg(table->sgl, sg, table->nents, i) { 118 struct page *page = sg_page(sg); 119 unsigned long len = sg_dma_len(sg); 120 121 for (j = 0; j < len / PAGE_SIZE; j++) { 122 struct page *sub_page = page + j; 123 struct page **pages = &sub_page; 124 ret = map_vm_area(vm_struct, pgprot, &pages); 125 if (ret) 126 goto end; 127 memset(vm_struct->addr, 0, PAGE_SIZE); 128 unmap_kernel_range((unsigned long)vm_struct->addr, 129 PAGE_SIZE); 130 } 131 } 132end: 133 free_vm_area(vm_struct); 134 return ret; 135} 136 137void ion_heap_free_page(struct ion_buffer *buffer, struct page *page, 138 unsigned int order) 139{ 140 int i; 141 142 if (!ion_buffer_fault_user_mappings(buffer)) { 143 __free_pages(page, order); 144 return; 145 } 146 for (i = 0; i < (1 << order); i++) 147 __free_page(page + i); 148} 149 150void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer) 151{ 152 rt_mutex_lock(&heap->lock); 153 list_add(&buffer->list, &heap->free_list); 154 heap->free_list_size += buffer->size; 155 rt_mutex_unlock(&heap->lock); 156 wake_up(&heap->waitqueue); 157} 158 159size_t ion_heap_freelist_size(struct ion_heap *heap) 160{ 161 size_t size; 162 163 rt_mutex_lock(&heap->lock); 164 size = heap->free_list_size; 165 rt_mutex_unlock(&heap->lock); 166 167 return size; 168} 169 170size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) 171{ 172 struct ion_buffer *buffer, *tmp; 173 size_t total_drained = 0; 174 175 if (ion_heap_freelist_size(heap) == 0) 176 return 0; 177 178 rt_mutex_lock(&heap->lock); 179 if (size == 0) 180 size = heap->free_list_size; 181 182 list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) { 183 if (total_drained >= size) 184 break; 185 list_del(&buffer->list); 186 ion_buffer_destroy(buffer); 187 heap->free_list_size -= buffer->size; 188 total_drained += buffer->size; 189 } 190 rt_mutex_unlock(&heap->lock); 191 192 return total_drained; 193} 194 195int ion_heap_deferred_free(void *data) 196{ 197 struct ion_heap *heap = data; 198 199 while (true) { 200 struct ion_buffer *buffer; 201 202 wait_event_freezable(heap->waitqueue, 203 ion_heap_freelist_size(heap) > 0); 204 205 rt_mutex_lock(&heap->lock); 206 if (list_empty(&heap->free_list)) { 207 rt_mutex_unlock(&heap->lock); 208 continue; 209 } 210 buffer = list_first_entry(&heap->free_list, struct ion_buffer, 211 list); 212 list_del(&buffer->list); 213 heap->free_list_size -= buffer->size; 214 rt_mutex_unlock(&heap->lock); 215 ion_buffer_destroy(buffer); 216 } 217 218 return 0; 219} 220 221int ion_heap_init_deferred_free(struct ion_heap *heap) 222{ 223 struct sched_param param = { .sched_priority = 0 }; 224 225 INIT_LIST_HEAD(&heap->free_list); 226 heap->free_list_size = 0; 227 rt_mutex_init(&heap->lock); 228 init_waitqueue_head(&heap->waitqueue); 229 heap->task = kthread_run(ion_heap_deferred_free, heap, 230 "%s", heap->name); 231 sched_setscheduler(heap->task, SCHED_IDLE, ¶m); 232 if (IS_ERR(heap->task)) { 233 pr_err("%s: creating thread for deferred free failed\n", 234 __func__); 235 return PTR_RET(heap->task); 236 } 237 return 0; 238} 239 240struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) 241{ 242 struct ion_heap *heap = NULL; 243 244 switch (heap_data->type) { 245 case ION_HEAP_TYPE_SYSTEM_CONTIG: 246 heap = ion_system_contig_heap_create(heap_data); 247 break; 248 case ION_HEAP_TYPE_SYSTEM: 249 heap = ion_system_heap_create(heap_data); 250 break; 251 case ION_HEAP_TYPE_CARVEOUT: 252 heap = ion_carveout_heap_create(heap_data); 253 break; 254 case ION_HEAP_TYPE_CHUNK: 255 heap = ion_chunk_heap_create(heap_data); 256 break; 257 default: 258 pr_err("%s: Invalid heap type %d\n", __func__, 259 heap_data->type); 260 return ERR_PTR(-EINVAL); 261 } 262 263 if (IS_ERR_OR_NULL(heap)) { 264 pr_err("%s: error creating heap %s type %d base %lu size %u\n", 265 __func__, heap_data->name, heap_data->type, 266 heap_data->base, heap_data->size); 267 return ERR_PTR(-EINVAL); 268 } 269 270 heap->name = heap_data->name; 271 heap->id = heap_data->id; 272 return heap; 273} 274 275void ion_heap_destroy(struct ion_heap *heap) 276{ 277 if (!heap) 278 return; 279 280 switch (heap->type) { 281 case ION_HEAP_TYPE_SYSTEM_CONTIG: 282 ion_system_contig_heap_destroy(heap); 283 break; 284 case ION_HEAP_TYPE_SYSTEM: 285 ion_system_heap_destroy(heap); 286 break; 287 case ION_HEAP_TYPE_CARVEOUT: 288 ion_carveout_heap_destroy(heap); 289 break; 290 case ION_HEAP_TYPE_CHUNK: 291 ion_chunk_heap_destroy(heap); 292 break; 293 default: 294 pr_err("%s: Invalid heap type %d\n", __func__, 295 heap->type); 296 } 297} 298