ion_heap.c revision e1d855b02f5ac4c3a6cbeaa253958b2708826b9f
1/* 2 * drivers/staging/android/ion/ion_heap.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/err.h> 18#include <linux/freezer.h> 19#include <linux/kthread.h> 20#include <linux/mm.h> 21#include <linux/rtmutex.h> 22#include <linux/sched.h> 23#include <linux/scatterlist.h> 24#include <linux/vmalloc.h> 25#include "ion.h" 26#include "ion_priv.h" 27 28void *ion_heap_map_kernel(struct ion_heap *heap, 29 struct ion_buffer *buffer) 30{ 31 struct scatterlist *sg; 32 int i, j; 33 void *vaddr; 34 pgprot_t pgprot; 35 struct sg_table *table = buffer->sg_table; 36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 37 struct page **pages = vmalloc(sizeof(struct page *) * npages); 38 struct page **tmp = pages; 39 40 if (!pages) 41 return NULL; 42 43 if (buffer->flags & ION_FLAG_CACHED) 44 pgprot = PAGE_KERNEL; 45 else 46 pgprot = pgprot_writecombine(PAGE_KERNEL); 47 48 for_each_sg(table->sgl, sg, table->nents, i) { 49 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; 50 struct page *page = sg_page(sg); 51 BUG_ON(i >= npages); 52 for (j = 0; j < npages_this_entry; j++) 53 *(tmp++) = page++; 54 } 55 vaddr = vmap(pages, npages, VM_MAP, pgprot); 56 vfree(pages); 57 58 if (vaddr == NULL) 59 return ERR_PTR(-ENOMEM); 60 61 return vaddr; 62} 63 64void ion_heap_unmap_kernel(struct ion_heap *heap, 65 struct ion_buffer *buffer) 66{ 67 vunmap(buffer->vaddr); 68} 69 70int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, 71 struct vm_area_struct *vma) 72{ 73 struct sg_table *table = buffer->sg_table; 74 unsigned long addr = vma->vm_start; 75 unsigned long offset = vma->vm_pgoff * PAGE_SIZE; 76 struct scatterlist *sg; 77 int i; 78 int ret; 79 80 for_each_sg(table->sgl, sg, table->nents, i) { 81 struct page *page = sg_page(sg); 82 unsigned long remainder = vma->vm_end - addr; 83 unsigned long len = sg->length; 84 85 if (offset >= sg->length) { 86 offset -= sg->length; 87 continue; 88 } else if (offset) { 89 page += offset / PAGE_SIZE; 90 len = sg->length - offset; 91 offset = 0; 92 } 93 len = min(len, remainder); 94 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, 95 vma->vm_page_prot); 96 if (ret) 97 return ret; 98 addr += len; 99 if (addr >= vma->vm_end) 100 return 0; 101 } 102 return 0; 103} 104 105static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) 106{ 107 void *addr = vm_map_ram(pages, num, -1, pgprot); 108 if (!addr) 109 return -ENOMEM; 110 memset(addr, 0, PAGE_SIZE * num); 111 vm_unmap_ram(addr, num); 112 113 return 0; 114} 115 116static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, 117 pgprot_t pgprot) 118{ 119 int p = 0; 120 int ret = 0; 121 struct sg_page_iter piter; 122 struct page *pages[32]; 123 124 for_each_sg_page(sgl, &piter, nents, 0) { 125 pages[p++] = sg_page_iter_page(&piter); 126 if (p == ARRAY_SIZE(pages)) { 127 ret = ion_heap_clear_pages(pages, p, pgprot); 128 if (ret) 129 return ret; 130 p = 0; 131 } 132 } 133 if (p) 134 ret = ion_heap_clear_pages(pages, p, pgprot); 135 136 return ret; 137} 138 139int ion_heap_buffer_zero(struct ion_buffer *buffer) 140{ 141 struct sg_table *table = buffer->sg_table; 142 pgprot_t pgprot; 143 144 if (buffer->flags & ION_FLAG_CACHED) 145 pgprot = PAGE_KERNEL; 146 else 147 pgprot = pgprot_writecombine(PAGE_KERNEL); 148 149 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); 150} 151 152int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) 153{ 154 struct scatterlist sg; 155 156 sg_init_table(&sg, 1); 157 sg_set_page(&sg, page, size, 0); 158 return ion_heap_sglist_zero(&sg, 1, pgprot); 159} 160 161void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) 162{ 163 rt_mutex_lock(&heap->lock); 164 list_add(&buffer->list, &heap->free_list); 165 heap->free_list_size += buffer->size; 166 rt_mutex_unlock(&heap->lock); 167 wake_up(&heap->waitqueue); 168} 169 170size_t ion_heap_freelist_size(struct ion_heap *heap) 171{ 172 size_t size; 173 174 rt_mutex_lock(&heap->lock); 175 size = heap->free_list_size; 176 rt_mutex_unlock(&heap->lock); 177 178 return size; 179} 180 181size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) 182{ 183 struct ion_buffer *buffer, *tmp; 184 size_t total_drained = 0; 185 186 if (ion_heap_freelist_size(heap) == 0) 187 return 0; 188 189 rt_mutex_lock(&heap->lock); 190 if (size == 0) 191 size = heap->free_list_size; 192 193 list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) { 194 if (total_drained >= size) 195 break; 196 list_del(&buffer->list); 197 heap->free_list_size -= buffer->size; 198 total_drained += buffer->size; 199 ion_buffer_destroy(buffer); 200 } 201 rt_mutex_unlock(&heap->lock); 202 203 return total_drained; 204} 205 206static int ion_heap_deferred_free(void *data) 207{ 208 struct ion_heap *heap = data; 209 210 while (true) { 211 struct ion_buffer *buffer; 212 213 wait_event_freezable(heap->waitqueue, 214 ion_heap_freelist_size(heap) > 0); 215 216 rt_mutex_lock(&heap->lock); 217 if (list_empty(&heap->free_list)) { 218 rt_mutex_unlock(&heap->lock); 219 continue; 220 } 221 buffer = list_first_entry(&heap->free_list, struct ion_buffer, 222 list); 223 list_del(&buffer->list); 224 heap->free_list_size -= buffer->size; 225 rt_mutex_unlock(&heap->lock); 226 ion_buffer_destroy(buffer); 227 } 228 229 return 0; 230} 231 232int ion_heap_init_deferred_free(struct ion_heap *heap) 233{ 234 struct sched_param param = { .sched_priority = 0 }; 235 236 INIT_LIST_HEAD(&heap->free_list); 237 heap->free_list_size = 0; 238 rt_mutex_init(&heap->lock); 239 init_waitqueue_head(&heap->waitqueue); 240 heap->task = kthread_run(ion_heap_deferred_free, heap, 241 "%s", heap->name); 242 sched_setscheduler(heap->task, SCHED_IDLE, ¶m); 243 if (IS_ERR(heap->task)) { 244 pr_err("%s: creating thread for deferred free failed\n", 245 __func__); 246 return PTR_RET(heap->task); 247 } 248 return 0; 249} 250 251struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) 252{ 253 struct ion_heap *heap = NULL; 254 255 switch (heap_data->type) { 256 case ION_HEAP_TYPE_SYSTEM_CONTIG: 257 heap = ion_system_contig_heap_create(heap_data); 258 break; 259 case ION_HEAP_TYPE_SYSTEM: 260 heap = ion_system_heap_create(heap_data); 261 break; 262 case ION_HEAP_TYPE_CARVEOUT: 263 heap = ion_carveout_heap_create(heap_data); 264 break; 265 case ION_HEAP_TYPE_CHUNK: 266 heap = ion_chunk_heap_create(heap_data); 267 break; 268 case ION_HEAP_TYPE_DMA: 269 heap = ion_cma_heap_create(heap_data); 270 break; 271 default: 272 pr_err("%s: Invalid heap type %d\n", __func__, 273 heap_data->type); 274 return ERR_PTR(-EINVAL); 275 } 276 277 if (IS_ERR_OR_NULL(heap)) { 278 pr_err("%s: error creating heap %s type %d base %lu size %zu\n", 279 __func__, heap_data->name, heap_data->type, 280 heap_data->base, heap_data->size); 281 return ERR_PTR(-EINVAL); 282 } 283 284 heap->name = heap_data->name; 285 heap->id = heap_data->id; 286 return heap; 287} 288 289void ion_heap_destroy(struct ion_heap *heap) 290{ 291 if (!heap) 292 return; 293 294 switch (heap->type) { 295 case ION_HEAP_TYPE_SYSTEM_CONTIG: 296 ion_system_contig_heap_destroy(heap); 297 break; 298 case ION_HEAP_TYPE_SYSTEM: 299 ion_system_heap_destroy(heap); 300 break; 301 case ION_HEAP_TYPE_CARVEOUT: 302 ion_carveout_heap_destroy(heap); 303 break; 304 case ION_HEAP_TYPE_CHUNK: 305 ion_chunk_heap_destroy(heap); 306 break; 307 case ION_HEAP_TYPE_DMA: 308 ion_cma_heap_destroy(heap); 309 break; 310 default: 311 pr_err("%s: Invalid heap type %d\n", __func__, 312 heap->type); 313 } 314} 315