ion_priv.h revision 349c9e13855109df99c5205a4e8d53d9fa169490
1/* 2 * drivers/staging/android/ion/ion_priv.h 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#ifndef _ION_PRIV_H 18#define _ION_PRIV_H 19 20#include <linux/kref.h> 21#include <linux/mm_types.h> 22#include <linux/mutex.h> 23#include <linux/rbtree.h> 24#include <linux/sched.h> 25#include <linux/shrinker.h> 26#include <linux/types.h> 27 28#include "ion.h" 29 30struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); 31 32/** 33 * struct ion_buffer - metadata for a particular buffer 34 * @ref: refernce count 35 * @node: node in the ion_device buffers tree 36 * @dev: back pointer to the ion_device 37 * @heap: back pointer to the heap the buffer came from 38 * @flags: buffer specific flags 39 * @size: size of the buffer 40 * @priv_virt: private data to the buffer representable as 41 * a void * 42 * @priv_phys: private data to the buffer representable as 43 * an ion_phys_addr_t (and someday a phys_addr_t) 44 * @lock: protects the buffers cnt fields 45 * @kmap_cnt: number of times the buffer is mapped to the kernel 46 * @vaddr: the kenrel mapping if kmap_cnt is not zero 47 * @dmap_cnt: number of times the buffer is mapped for dma 48 * @sg_table: the sg table for the buffer if dmap_cnt is not zero 49 * @dirty: bitmask representing which pages of this buffer have 50 * been dirtied by the cpu and need cache maintenance 51 * before dma 52 * @vmas: list of vma's mapping this buffer 53 * @handle_count: count of handles referencing this buffer 54 * @task_comm: taskcomm of last client to reference this buffer in a 55 * handle, used for debugging 56 * @pid: pid of last client to reference this buffer in a 57 * handle, used for debugging 58*/ 59struct ion_buffer { 60 struct kref ref; 61 union { 62 struct rb_node node; 63 struct list_head list; 64 }; 65 struct ion_device *dev; 66 struct ion_heap *heap; 67 unsigned long flags; 68 size_t size; 69 union { 70 void *priv_virt; 71 ion_phys_addr_t priv_phys; 72 }; 73 struct mutex lock; 74 int kmap_cnt; 75 void *vaddr; 76 int dmap_cnt; 77 struct sg_table *sg_table; 78 unsigned long *dirty; 79 struct list_head vmas; 80 /* used to track orphaned buffers */ 81 int handle_count; 82 char task_comm[TASK_COMM_LEN]; 83 pid_t pid; 84}; 85void ion_buffer_destroy(struct ion_buffer *buffer); 86 87/** 88 * struct ion_heap_ops - ops to operate on a given heap 89 * @allocate: allocate memory 90 * @free: free memory 91 * @phys get physical address of a buffer (only define on 92 * physically contiguous heaps) 93 * @map_dma map the memory for dma to a scatterlist 94 * @unmap_dma unmap the memory for dma 95 * @map_kernel map memory to the kernel 96 * @unmap_kernel unmap memory to the kernel 97 * @map_user map memory to userspace 98 */ 99struct ion_heap_ops { 100 int (*allocate) (struct ion_heap *heap, 101 struct ion_buffer *buffer, unsigned long len, 102 unsigned long align, unsigned long flags); 103 void (*free) (struct ion_buffer *buffer); 104 int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, 105 ion_phys_addr_t *addr, size_t *len); 106 struct sg_table *(*map_dma) (struct ion_heap *heap, 107 struct ion_buffer *buffer); 108 void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); 109 void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); 110 void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); 111 int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, 112 struct vm_area_struct *vma); 113}; 114 115/** 116 * heap flags - flags between the heaps and core ion code 117 */ 118#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) 119 120/** 121 * struct ion_heap - represents a heap in the system 122 * @node: rb node to put the heap on the device's tree of heaps 123 * @dev: back pointer to the ion_device 124 * @type: type of heap 125 * @ops: ops struct as above 126 * @flags: flags 127 * @id: id of heap, also indicates priority of this heap when 128 * allocating. These are specified by platform data and 129 * MUST be unique 130 * @name: used for debugging 131 * @shrinker: a shrinker for the heap, if the heap caches system 132 * memory, it must define a shrinker to return it on low 133 * memory conditions, this includes system memory cached 134 * in the deferred free lists for heaps that support it 135 * @free_list: free list head if deferred free is used 136 * @free_list_size size of the deferred free list in bytes 137 * @lock: protects the free list 138 * @waitqueue: queue to wait on from deferred free thread 139 * @task: task struct of deferred free thread 140 * @debug_show: called when heap debug file is read to add any 141 * heap specific debug info to output 142 * 143 * Represents a pool of memory from which buffers can be made. In some 144 * systems the only heap is regular system memory allocated via vmalloc. 145 * On others, some blocks might require large physically contiguous buffers 146 * that are allocated from a specially reserved heap. 147 */ 148struct ion_heap { 149 struct plist_node node; 150 struct ion_device *dev; 151 enum ion_heap_type type; 152 struct ion_heap_ops *ops; 153 unsigned long flags; 154 unsigned int id; 155 const char *name; 156 struct shrinker shrinker; 157 struct list_head free_list; 158 size_t free_list_size; 159 struct rt_mutex lock; 160 wait_queue_head_t waitqueue; 161 struct task_struct *task; 162 int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); 163}; 164 165/** 166 * ion_buffer_cached - this ion buffer is cached 167 * @buffer: buffer 168 * 169 * indicates whether this ion buffer is cached 170 */ 171bool ion_buffer_cached(struct ion_buffer *buffer); 172 173/** 174 * ion_buffer_fault_user_mappings - fault in user mappings of this buffer 175 * @buffer: buffer 176 * 177 * indicates whether userspace mappings of this buffer will be faulted 178 * in, this can affect how buffers are allocated from the heap. 179 */ 180bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); 181 182/** 183 * ion_device_create - allocates and returns an ion device 184 * @custom_ioctl: arch specific ioctl function if applicable 185 * 186 * returns a valid device or -PTR_ERR 187 */ 188struct ion_device *ion_device_create(long (*custom_ioctl) 189 (struct ion_client *client, 190 unsigned int cmd, 191 unsigned long arg)); 192 193/** 194 * ion_device_destroy - free and device and it's resource 195 * @dev: the device 196 */ 197void ion_device_destroy(struct ion_device *dev); 198 199/** 200 * ion_device_add_heap - adds a heap to the ion device 201 * @dev: the device 202 * @heap: the heap to add 203 */ 204void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); 205 206/** 207 * some helpers for common operations on buffers using the sg_table 208 * and vaddr fields 209 */ 210void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); 211void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); 212int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, 213 struct vm_area_struct *); 214int ion_heap_buffer_zero(struct ion_buffer *buffer); 215 216/** 217 * ion_heap_init_deferred_free -- initialize deferred free functionality 218 * @heap: the heap 219 * 220 * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will 221 * be called to setup deferred frees. Calls to free the buffer will 222 * return immediately and the actual free will occur some time later 223 */ 224int ion_heap_init_deferred_free(struct ion_heap *heap); 225 226/** 227 * ion_heap_freelist_add - add a buffer to the deferred free list 228 * @heap: the heap 229 * @buffer: the buffer 230 * 231 * Adds an item to the deferred freelist. 232 */ 233void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); 234 235/** 236 * ion_heap_freelist_drain - drain the deferred free list 237 * @heap: the heap 238 * @size: ammount of memory to drain in bytes 239 * 240 * Drains the indicated amount of memory from the deferred freelist immediately. 241 * Returns the total amount freed. The total freed may be higher depending 242 * on the size of the items in the list, or lower if there is insufficient 243 * total memory on the freelist. 244 */ 245size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); 246 247/** 248 * ion_heap_freelist_size - returns the size of the freelist in bytes 249 * @heap: the heap 250 */ 251size_t ion_heap_freelist_size(struct ion_heap *heap); 252 253 254/** 255 * functions for creating and destroying the built in ion heaps. 256 * architectures can add their own custom architecture specific 257 * heaps as appropriate. 258 */ 259 260struct ion_heap *ion_heap_create(struct ion_platform_heap *); 261void ion_heap_destroy(struct ion_heap *); 262struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); 263void ion_system_heap_destroy(struct ion_heap *); 264 265struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); 266void ion_system_contig_heap_destroy(struct ion_heap *); 267 268struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); 269void ion_carveout_heap_destroy(struct ion_heap *); 270 271struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); 272void ion_chunk_heap_destroy(struct ion_heap *); 273struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); 274void ion_cma_heap_destroy(struct ion_heap *); 275 276/** 277 * kernel api to allocate/free from carveout -- used when carveout is 278 * used to back an architecture specific custom heap 279 */ 280ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, 281 unsigned long align); 282void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, 283 unsigned long size); 284/** 285 * The carveout heap returns physical addresses, since 0 may be a valid 286 * physical address, this is used to indicate allocation failed 287 */ 288#define ION_CARVEOUT_ALLOCATE_FAIL -1 289 290/** 291 * functions for creating and destroying a heap pool -- allows you 292 * to keep a pool of pre allocated memory to use from your heap. Keeping 293 * a pool of memory that is ready for dma, ie any cached mapping have been 294 * invalidated from the cache, provides a significant peformance benefit on 295 * many systems */ 296 297/** 298 * struct ion_page_pool - pagepool struct 299 * @high_count: number of highmem items in the pool 300 * @low_count: number of lowmem items in the pool 301 * @high_items: list of highmem items 302 * @low_items: list of lowmem items 303 * @shrinker: a shrinker for the items 304 * @mutex: lock protecting this struct and especially the count 305 * item list 306 * @alloc: function to be used to allocate pageory when the pool 307 * is empty 308 * @free: function to be used to free pageory back to the system 309 * when the shrinker fires 310 * @gfp_mask: gfp_mask to use from alloc 311 * @order: order of pages in the pool 312 * @list: plist node for list of pools 313 * 314 * Allows you to keep a pool of pre allocated pages to use from your heap. 315 * Keeping a pool of pages that is ready for dma, ie any cached mapping have 316 * been invalidated from the cache, provides a significant peformance benefit 317 * on many systems 318 */ 319struct ion_page_pool { 320 int high_count; 321 int low_count; 322 struct list_head high_items; 323 struct list_head low_items; 324 struct mutex mutex; 325 gfp_t gfp_mask; 326 unsigned int order; 327 struct plist_node list; 328}; 329 330struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); 331void ion_page_pool_destroy(struct ion_page_pool *); 332void *ion_page_pool_alloc(struct ion_page_pool *); 333void ion_page_pool_free(struct ion_page_pool *, struct page *); 334 335/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool 336 * @pool: the pool 337 * @gfp_mask: the memory type to reclaim 338 * @nr_to_scan: number of items to shrink in pages 339 * 340 * returns the number of items freed in pages 341 */ 342int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, 343 int nr_to_scan); 344 345#endif /* _ION_PRIV_H */ 346