ion.c revision ae5cbf4a5a117549717d1da12ac1bd84f10dac59
1/* 2 3 * drivers/staging/android/ion/ion.c 4 * 5 * Copyright (C) 2011 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#include <linux/device.h> 19#include <linux/err.h> 20#include <linux/file.h> 21#include <linux/freezer.h> 22#include <linux/fs.h> 23#include <linux/anon_inodes.h> 24#include <linux/kthread.h> 25#include <linux/list.h> 26#include <linux/memblock.h> 27#include <linux/miscdevice.h> 28#include <linux/export.h> 29#include <linux/mm.h> 30#include <linux/mm_types.h> 31#include <linux/rbtree.h> 32#include <linux/slab.h> 33#include <linux/seq_file.h> 34#include <linux/uaccess.h> 35#include <linux/vmalloc.h> 36#include <linux/debugfs.h> 37#include <linux/dma-buf.h> 38#include <linux/idr.h> 39 40#include "ion.h" 41#include "ion_priv.h" 42#include "compat_ion.h" 43 44/** 45 * struct ion_device - the metadata of the ion device node 46 * @dev: the actual misc device 47 * @buffers: an rb tree of all the existing buffers 48 * @buffer_lock: lock protecting the tree of buffers 49 * @lock: rwsem protecting the tree of heaps and clients 50 * @heaps: list of all the heaps in the system 51 * @user_clients: list of all the clients created from userspace 52 */ 53struct ion_device { 54 struct miscdevice dev; 55 struct rb_root buffers; 56 struct mutex buffer_lock; 57 struct rw_semaphore lock; 58 struct plist_head heaps; 59 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, 60 unsigned long arg); 61 struct rb_root clients; 62 struct dentry *debug_root; 63 struct dentry *heaps_debug_root; 64 struct dentry *clients_debug_root; 65}; 66 67/** 68 * struct ion_client - a process/hw block local address space 69 * @node: node in the tree of all clients 70 * @dev: backpointer to ion device 71 * @handles: an rb tree of all the handles in this client 72 * @idr: an idr space for allocating handle ids 73 * @lock: lock protecting the tree of handles 74 * @name: used for debugging 75 * @task: used for debugging 76 * 77 * A client represents a list of buffers this client may access. 78 * The mutex stored here is used to protect both handles tree 79 * as well as the handles themselves, and should be held while modifying either. 80 */ 81struct ion_client { 82 struct rb_node node; 83 struct ion_device *dev; 84 struct rb_root handles; 85 struct idr idr; 86 struct mutex lock; 87 const char *name; 88 struct task_struct *task; 89 pid_t pid; 90 struct dentry *debug_root; 91}; 92 93/** 94 * ion_handle - a client local reference to a buffer 95 * @ref: reference count 96 * @client: back pointer to the client the buffer resides in 97 * @buffer: pointer to the buffer 98 * @node: node in the client's handle rbtree 99 * @kmap_cnt: count of times this client has mapped to kernel 100 * @id: client-unique id allocated by client->idr 101 * 102 * Modifications to node, map_cnt or mapping should be protected by the 103 * lock in the client. Other fields are never changed after initialization. 104 */ 105struct ion_handle { 106 struct kref ref; 107 struct ion_client *client; 108 struct ion_buffer *buffer; 109 struct rb_node node; 110 unsigned int kmap_cnt; 111 int id; 112}; 113 114bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) 115{ 116 return (buffer->flags & ION_FLAG_CACHED) && 117 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); 118} 119 120bool ion_buffer_cached(struct ion_buffer *buffer) 121{ 122 return !!(buffer->flags & ION_FLAG_CACHED); 123} 124 125static inline struct page *ion_buffer_page(struct page *page) 126{ 127 return (struct page *)((unsigned long)page & ~(1UL)); 128} 129 130static inline bool ion_buffer_page_is_dirty(struct page *page) 131{ 132 return !!((unsigned long)page & 1UL); 133} 134 135static inline void ion_buffer_page_dirty(struct page **page) 136{ 137 *page = (struct page *)((unsigned long)(*page) | 1UL); 138} 139 140static inline void ion_buffer_page_clean(struct page **page) 141{ 142 *page = (struct page *)((unsigned long)(*page) & ~(1UL)); 143} 144 145/* this function should only be called while dev->lock is held */ 146static void ion_buffer_add(struct ion_device *dev, 147 struct ion_buffer *buffer) 148{ 149 struct rb_node **p = &dev->buffers.rb_node; 150 struct rb_node *parent = NULL; 151 struct ion_buffer *entry; 152 153 while (*p) { 154 parent = *p; 155 entry = rb_entry(parent, struct ion_buffer, node); 156 157 if (buffer < entry) { 158 p = &(*p)->rb_left; 159 } else if (buffer > entry) { 160 p = &(*p)->rb_right; 161 } else { 162 pr_err("%s: buffer already found.", __func__); 163 BUG(); 164 } 165 } 166 167 rb_link_node(&buffer->node, parent, p); 168 rb_insert_color(&buffer->node, &dev->buffers); 169} 170 171/* this function should only be called while dev->lock is held */ 172static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 173 struct ion_device *dev, 174 unsigned long len, 175 unsigned long align, 176 unsigned long flags) 177{ 178 struct ion_buffer *buffer; 179 struct sg_table *table; 180 struct scatterlist *sg; 181 int i, ret; 182 183 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 184 if (!buffer) 185 return ERR_PTR(-ENOMEM); 186 187 buffer->heap = heap; 188 buffer->flags = flags; 189 kref_init(&buffer->ref); 190 191 ret = heap->ops->allocate(heap, buffer, len, align, flags); 192 193 if (ret) { 194 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) 195 goto err2; 196 197 ion_heap_freelist_drain(heap, 0); 198 ret = heap->ops->allocate(heap, buffer, len, align, 199 flags); 200 if (ret) 201 goto err2; 202 } 203 204 buffer->dev = dev; 205 buffer->size = len; 206 207 table = heap->ops->map_dma(heap, buffer); 208 if (WARN_ONCE(table == NULL, 209 "heap->ops->map_dma should return ERR_PTR on error")) 210 table = ERR_PTR(-EINVAL); 211 if (IS_ERR(table)) { 212 heap->ops->free(buffer); 213 kfree(buffer); 214 return ERR_PTR(PTR_ERR(table)); 215 } 216 buffer->sg_table = table; 217 if (ion_buffer_fault_user_mappings(buffer)) { 218 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 219 struct scatterlist *sg; 220 int i, j, k = 0; 221 222 buffer->pages = vmalloc(sizeof(struct page *) * num_pages); 223 if (!buffer->pages) { 224 ret = -ENOMEM; 225 goto err1; 226 } 227 228 for_each_sg(table->sgl, sg, table->nents, i) { 229 struct page *page = sg_page(sg); 230 231 for (j = 0; j < sg->length / PAGE_SIZE; j++) 232 buffer->pages[k++] = page++; 233 } 234 235 if (ret) 236 goto err; 237 } 238 239 buffer->dev = dev; 240 buffer->size = len; 241 INIT_LIST_HEAD(&buffer->vmas); 242 mutex_init(&buffer->lock); 243 /* this will set up dma addresses for the sglist -- it is not 244 technically correct as per the dma api -- a specific 245 device isn't really taking ownership here. However, in practice on 246 our systems the only dma_address space is physical addresses. 247 Additionally, we can't afford the overhead of invalidating every 248 allocation via dma_map_sg. The implicit contract here is that 249 memory comming from the heaps is ready for dma, ie if it has a 250 cached mapping that mapping has been invalidated */ 251 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) 252 sg_dma_address(sg) = sg_phys(sg); 253 mutex_lock(&dev->buffer_lock); 254 ion_buffer_add(dev, buffer); 255 mutex_unlock(&dev->buffer_lock); 256 return buffer; 257 258err: 259 heap->ops->unmap_dma(heap, buffer); 260 heap->ops->free(buffer); 261err1: 262 if (buffer->pages) 263 vfree(buffer->pages); 264err2: 265 kfree(buffer); 266 return ERR_PTR(ret); 267} 268 269void ion_buffer_destroy(struct ion_buffer *buffer) 270{ 271 if (WARN_ON(buffer->kmap_cnt > 0)) 272 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 273 buffer->heap->ops->unmap_dma(buffer->heap, buffer); 274 buffer->heap->ops->free(buffer); 275 if (buffer->pages) 276 vfree(buffer->pages); 277 kfree(buffer); 278} 279 280static void _ion_buffer_destroy(struct kref *kref) 281{ 282 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 283 struct ion_heap *heap = buffer->heap; 284 struct ion_device *dev = buffer->dev; 285 286 mutex_lock(&dev->buffer_lock); 287 rb_erase(&buffer->node, &dev->buffers); 288 mutex_unlock(&dev->buffer_lock); 289 290 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 291 ion_heap_freelist_add(heap, buffer); 292 else 293 ion_buffer_destroy(buffer); 294} 295 296static void ion_buffer_get(struct ion_buffer *buffer) 297{ 298 kref_get(&buffer->ref); 299} 300 301static int ion_buffer_put(struct ion_buffer *buffer) 302{ 303 return kref_put(&buffer->ref, _ion_buffer_destroy); 304} 305 306static void ion_buffer_add_to_handle(struct ion_buffer *buffer) 307{ 308 mutex_lock(&buffer->lock); 309 buffer->handle_count++; 310 mutex_unlock(&buffer->lock); 311} 312 313static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) 314{ 315 /* 316 * when a buffer is removed from a handle, if it is not in 317 * any other handles, copy the taskcomm and the pid of the 318 * process it's being removed from into the buffer. At this 319 * point there will be no way to track what processes this buffer is 320 * being used by, it only exists as a dma_buf file descriptor. 321 * The taskcomm and pid can provide a debug hint as to where this fd 322 * is in the system 323 */ 324 mutex_lock(&buffer->lock); 325 buffer->handle_count--; 326 BUG_ON(buffer->handle_count < 0); 327 if (!buffer->handle_count) { 328 struct task_struct *task; 329 330 task = current->group_leader; 331 get_task_comm(buffer->task_comm, task); 332 buffer->pid = task_pid_nr(task); 333 } 334 mutex_unlock(&buffer->lock); 335} 336 337static struct ion_handle *ion_handle_create(struct ion_client *client, 338 struct ion_buffer *buffer) 339{ 340 struct ion_handle *handle; 341 342 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 343 if (!handle) 344 return ERR_PTR(-ENOMEM); 345 kref_init(&handle->ref); 346 RB_CLEAR_NODE(&handle->node); 347 handle->client = client; 348 ion_buffer_get(buffer); 349 ion_buffer_add_to_handle(buffer); 350 handle->buffer = buffer; 351 352 return handle; 353} 354 355static void ion_handle_kmap_put(struct ion_handle *); 356 357static void ion_handle_destroy(struct kref *kref) 358{ 359 struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 360 struct ion_client *client = handle->client; 361 struct ion_buffer *buffer = handle->buffer; 362 363 mutex_lock(&buffer->lock); 364 while (handle->kmap_cnt) 365 ion_handle_kmap_put(handle); 366 mutex_unlock(&buffer->lock); 367 368 idr_remove(&client->idr, handle->id); 369 if (!RB_EMPTY_NODE(&handle->node)) 370 rb_erase(&handle->node, &client->handles); 371 372 ion_buffer_remove_from_handle(buffer); 373 ion_buffer_put(buffer); 374 375 kfree(handle); 376} 377 378struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) 379{ 380 return handle->buffer; 381} 382 383static void ion_handle_get(struct ion_handle *handle) 384{ 385 kref_get(&handle->ref); 386} 387 388static int ion_handle_put(struct ion_handle *handle) 389{ 390 struct ion_client *client = handle->client; 391 int ret; 392 393 mutex_lock(&client->lock); 394 ret = kref_put(&handle->ref, ion_handle_destroy); 395 mutex_unlock(&client->lock); 396 397 return ret; 398} 399 400static struct ion_handle *ion_handle_lookup(struct ion_client *client, 401 struct ion_buffer *buffer) 402{ 403 struct rb_node *n = client->handles.rb_node; 404 405 while (n) { 406 struct ion_handle *entry = rb_entry(n, struct ion_handle, node); 407 if (buffer < entry->buffer) 408 n = n->rb_left; 409 else if (buffer > entry->buffer) 410 n = n->rb_right; 411 else 412 return entry; 413 } 414 return ERR_PTR(-EINVAL); 415} 416 417static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, 418 int id) 419{ 420 struct ion_handle *handle; 421 422 mutex_lock(&client->lock); 423 handle = idr_find(&client->idr, id); 424 if (handle) 425 ion_handle_get(handle); 426 mutex_unlock(&client->lock); 427 428 return handle ? handle : ERR_PTR(-EINVAL); 429} 430 431static bool ion_handle_validate(struct ion_client *client, 432 struct ion_handle *handle) 433{ 434 WARN_ON(!mutex_is_locked(&client->lock)); 435 return idr_find(&client->idr, handle->id) == handle; 436} 437 438static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) 439{ 440 int id; 441 struct rb_node **p = &client->handles.rb_node; 442 struct rb_node *parent = NULL; 443 struct ion_handle *entry; 444 445 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); 446 if (id < 0) 447 return id; 448 449 handle->id = id; 450 451 while (*p) { 452 parent = *p; 453 entry = rb_entry(parent, struct ion_handle, node); 454 455 if (handle->buffer < entry->buffer) 456 p = &(*p)->rb_left; 457 else if (handle->buffer > entry->buffer) 458 p = &(*p)->rb_right; 459 else 460 WARN(1, "%s: buffer already found.", __func__); 461 } 462 463 rb_link_node(&handle->node, parent, p); 464 rb_insert_color(&handle->node, &client->handles); 465 466 return 0; 467} 468 469struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 470 size_t align, unsigned int heap_id_mask, 471 unsigned int flags) 472{ 473 struct ion_handle *handle; 474 struct ion_device *dev = client->dev; 475 struct ion_buffer *buffer = NULL; 476 struct ion_heap *heap; 477 int ret; 478 479 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, 480 len, align, heap_id_mask, flags); 481 /* 482 * traverse the list of heaps available in this system in priority 483 * order. If the heap type is supported by the client, and matches the 484 * request of the caller allocate from it. Repeat until allocate has 485 * succeeded or all heaps have been tried 486 */ 487 len = PAGE_ALIGN(len); 488 489 if (!len) 490 return ERR_PTR(-EINVAL); 491 492 down_read(&dev->lock); 493 plist_for_each_entry(heap, &dev->heaps, node) { 494 /* if the caller didn't specify this heap id */ 495 if (!((1 << heap->id) & heap_id_mask)) 496 continue; 497 buffer = ion_buffer_create(heap, dev, len, align, flags); 498 if (!IS_ERR(buffer)) 499 break; 500 } 501 up_read(&dev->lock); 502 503 if (buffer == NULL) 504 return ERR_PTR(-ENODEV); 505 506 if (IS_ERR(buffer)) 507 return ERR_PTR(PTR_ERR(buffer)); 508 509 handle = ion_handle_create(client, buffer); 510 511 /* 512 * ion_buffer_create will create a buffer with a ref_cnt of 1, 513 * and ion_handle_create will take a second reference, drop one here 514 */ 515 ion_buffer_put(buffer); 516 517 if (IS_ERR(handle)) 518 return handle; 519 520 mutex_lock(&client->lock); 521 ret = ion_handle_add(client, handle); 522 mutex_unlock(&client->lock); 523 if (ret) { 524 ion_handle_put(handle); 525 handle = ERR_PTR(ret); 526 } 527 528 return handle; 529} 530EXPORT_SYMBOL(ion_alloc); 531 532void ion_free(struct ion_client *client, struct ion_handle *handle) 533{ 534 bool valid_handle; 535 536 BUG_ON(client != handle->client); 537 538 mutex_lock(&client->lock); 539 valid_handle = ion_handle_validate(client, handle); 540 541 if (!valid_handle) { 542 WARN(1, "%s: invalid handle passed to free.\n", __func__); 543 mutex_unlock(&client->lock); 544 return; 545 } 546 mutex_unlock(&client->lock); 547 ion_handle_put(handle); 548} 549EXPORT_SYMBOL(ion_free); 550 551int ion_phys(struct ion_client *client, struct ion_handle *handle, 552 ion_phys_addr_t *addr, size_t *len) 553{ 554 struct ion_buffer *buffer; 555 int ret; 556 557 mutex_lock(&client->lock); 558 if (!ion_handle_validate(client, handle)) { 559 mutex_unlock(&client->lock); 560 return -EINVAL; 561 } 562 563 buffer = handle->buffer; 564 565 if (!buffer->heap->ops->phys) { 566 pr_err("%s: ion_phys is not implemented by this heap.\n", 567 __func__); 568 mutex_unlock(&client->lock); 569 return -ENODEV; 570 } 571 mutex_unlock(&client->lock); 572 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); 573 return ret; 574} 575EXPORT_SYMBOL(ion_phys); 576 577static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 578{ 579 void *vaddr; 580 581 if (buffer->kmap_cnt) { 582 buffer->kmap_cnt++; 583 return buffer->vaddr; 584 } 585 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 586 if (WARN_ONCE(vaddr == NULL, 587 "heap->ops->map_kernel should return ERR_PTR on error")) 588 return ERR_PTR(-EINVAL); 589 if (IS_ERR(vaddr)) 590 return vaddr; 591 buffer->vaddr = vaddr; 592 buffer->kmap_cnt++; 593 return vaddr; 594} 595 596static void *ion_handle_kmap_get(struct ion_handle *handle) 597{ 598 struct ion_buffer *buffer = handle->buffer; 599 void *vaddr; 600 601 if (handle->kmap_cnt) { 602 handle->kmap_cnt++; 603 return buffer->vaddr; 604 } 605 vaddr = ion_buffer_kmap_get(buffer); 606 if (IS_ERR(vaddr)) 607 return vaddr; 608 handle->kmap_cnt++; 609 return vaddr; 610} 611 612static void ion_buffer_kmap_put(struct ion_buffer *buffer) 613{ 614 buffer->kmap_cnt--; 615 if (!buffer->kmap_cnt) { 616 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 617 buffer->vaddr = NULL; 618 } 619} 620 621static void ion_handle_kmap_put(struct ion_handle *handle) 622{ 623 struct ion_buffer *buffer = handle->buffer; 624 625 handle->kmap_cnt--; 626 if (!handle->kmap_cnt) 627 ion_buffer_kmap_put(buffer); 628} 629 630void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 631{ 632 struct ion_buffer *buffer; 633 void *vaddr; 634 635 mutex_lock(&client->lock); 636 if (!ion_handle_validate(client, handle)) { 637 pr_err("%s: invalid handle passed to map_kernel.\n", 638 __func__); 639 mutex_unlock(&client->lock); 640 return ERR_PTR(-EINVAL); 641 } 642 643 buffer = handle->buffer; 644 645 if (!handle->buffer->heap->ops->map_kernel) { 646 pr_err("%s: map_kernel is not implemented by this heap.\n", 647 __func__); 648 mutex_unlock(&client->lock); 649 return ERR_PTR(-ENODEV); 650 } 651 652 mutex_lock(&buffer->lock); 653 vaddr = ion_handle_kmap_get(handle); 654 mutex_unlock(&buffer->lock); 655 mutex_unlock(&client->lock); 656 return vaddr; 657} 658EXPORT_SYMBOL(ion_map_kernel); 659 660void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 661{ 662 struct ion_buffer *buffer; 663 664 mutex_lock(&client->lock); 665 buffer = handle->buffer; 666 mutex_lock(&buffer->lock); 667 ion_handle_kmap_put(handle); 668 mutex_unlock(&buffer->lock); 669 mutex_unlock(&client->lock); 670} 671EXPORT_SYMBOL(ion_unmap_kernel); 672 673static int ion_debug_client_show(struct seq_file *s, void *unused) 674{ 675 struct ion_client *client = s->private; 676 struct rb_node *n; 677 size_t sizes[ION_NUM_HEAP_IDS] = {0}; 678 const char *names[ION_NUM_HEAP_IDS] = {NULL}; 679 int i; 680 681 mutex_lock(&client->lock); 682 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 683 struct ion_handle *handle = rb_entry(n, struct ion_handle, 684 node); 685 unsigned int id = handle->buffer->heap->id; 686 687 if (!names[id]) 688 names[id] = handle->buffer->heap->name; 689 sizes[id] += handle->buffer->size; 690 } 691 mutex_unlock(&client->lock); 692 693 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 694 for (i = 0; i < ION_NUM_HEAP_IDS; i++) { 695 if (!names[i]) 696 continue; 697 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); 698 } 699 return 0; 700} 701 702static int ion_debug_client_open(struct inode *inode, struct file *file) 703{ 704 return single_open(file, ion_debug_client_show, inode->i_private); 705} 706 707static const struct file_operations debug_client_fops = { 708 .open = ion_debug_client_open, 709 .read = seq_read, 710 .llseek = seq_lseek, 711 .release = single_release, 712}; 713 714struct ion_client *ion_client_create(struct ion_device *dev, 715 const char *name) 716{ 717 struct ion_client *client; 718 struct task_struct *task; 719 struct rb_node **p; 720 struct rb_node *parent = NULL; 721 struct ion_client *entry; 722 pid_t pid; 723 724 get_task_struct(current->group_leader); 725 task_lock(current->group_leader); 726 pid = task_pid_nr(current->group_leader); 727 /* don't bother to store task struct for kernel threads, 728 they can't be killed anyway */ 729 if (current->group_leader->flags & PF_KTHREAD) { 730 put_task_struct(current->group_leader); 731 task = NULL; 732 } else { 733 task = current->group_leader; 734 } 735 task_unlock(current->group_leader); 736 737 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 738 if (!client) 739 goto err_put_task_struct; 740 741 client->dev = dev; 742 client->handles = RB_ROOT; 743 idr_init(&client->idr); 744 mutex_init(&client->lock); 745 client->task = task; 746 client->pid = pid; 747 client->name = kstrdup(name, GFP_KERNEL); 748 if (!client->name) 749 goto err_free_client; 750 751 down_write(&dev->lock); 752 p = &dev->clients.rb_node; 753 while (*p) { 754 parent = *p; 755 entry = rb_entry(parent, struct ion_client, node); 756 757 if (client < entry) 758 p = &(*p)->rb_left; 759 else if (client > entry) 760 p = &(*p)->rb_right; 761 } 762 rb_link_node(&client->node, parent, p); 763 rb_insert_color(&client->node, &dev->clients); 764 765 client->debug_root = debugfs_create_file(name, 0664, 766 dev->clients_debug_root, 767 client, &debug_client_fops); 768 if (!client->debug_root) { 769 char buf[256], *path; 770 path = dentry_path(dev->clients_debug_root, buf, 256); 771 pr_err("Failed to create client debugfs at %s/%s\n", 772 path, name); 773 } 774 775 up_write(&dev->lock); 776 777 return client; 778 779err_free_client: 780 kfree(client); 781err_put_task_struct: 782 if (task) 783 put_task_struct(current->group_leader); 784 return ERR_PTR(-ENOMEM); 785} 786EXPORT_SYMBOL(ion_client_create); 787 788void ion_client_destroy(struct ion_client *client) 789{ 790 struct ion_device *dev = client->dev; 791 struct rb_node *n; 792 793 pr_debug("%s: %d\n", __func__, __LINE__); 794 while ((n = rb_first(&client->handles))) { 795 struct ion_handle *handle = rb_entry(n, struct ion_handle, 796 node); 797 ion_handle_destroy(&handle->ref); 798 } 799 800 idr_destroy(&client->idr); 801 802 down_write(&dev->lock); 803 if (client->task) 804 put_task_struct(client->task); 805 rb_erase(&client->node, &dev->clients); 806 debugfs_remove_recursive(client->debug_root); 807 up_write(&dev->lock); 808 809 kfree(client->name); 810 kfree(client); 811} 812EXPORT_SYMBOL(ion_client_destroy); 813 814struct sg_table *ion_sg_table(struct ion_client *client, 815 struct ion_handle *handle) 816{ 817 struct ion_buffer *buffer; 818 struct sg_table *table; 819 820 mutex_lock(&client->lock); 821 if (!ion_handle_validate(client, handle)) { 822 pr_err("%s: invalid handle passed to map_dma.\n", 823 __func__); 824 mutex_unlock(&client->lock); 825 return ERR_PTR(-EINVAL); 826 } 827 buffer = handle->buffer; 828 table = buffer->sg_table; 829 mutex_unlock(&client->lock); 830 return table; 831} 832EXPORT_SYMBOL(ion_sg_table); 833 834static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 835 struct device *dev, 836 enum dma_data_direction direction); 837 838static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 839 enum dma_data_direction direction) 840{ 841 struct dma_buf *dmabuf = attachment->dmabuf; 842 struct ion_buffer *buffer = dmabuf->priv; 843 844 ion_buffer_sync_for_device(buffer, attachment->dev, direction); 845 return buffer->sg_table; 846} 847 848static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, 849 struct sg_table *table, 850 enum dma_data_direction direction) 851{ 852} 853 854void ion_pages_sync_for_device(struct device *dev, struct page *page, 855 size_t size, enum dma_data_direction dir) 856{ 857 struct scatterlist sg; 858 859 sg_init_table(&sg, 1); 860 sg_set_page(&sg, page, size, 0); 861 /* 862 * This is not correct - sg_dma_address needs a dma_addr_t that is valid 863 * for the the targeted device, but this works on the currently targeted 864 * hardware. 865 */ 866 sg_dma_address(&sg) = page_to_phys(page); 867 dma_sync_sg_for_device(dev, &sg, 1, dir); 868} 869 870struct ion_vma_list { 871 struct list_head list; 872 struct vm_area_struct *vma; 873}; 874 875static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 876 struct device *dev, 877 enum dma_data_direction dir) 878{ 879 struct ion_vma_list *vma_list; 880 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 881 int i; 882 883 pr_debug("%s: syncing for device %s\n", __func__, 884 dev ? dev_name(dev) : "null"); 885 886 if (!ion_buffer_fault_user_mappings(buffer)) 887 return; 888 889 mutex_lock(&buffer->lock); 890 for (i = 0; i < pages; i++) { 891 struct page *page = buffer->pages[i]; 892 893 if (ion_buffer_page_is_dirty(page)) 894 ion_pages_sync_for_device(dev, ion_buffer_page(page), 895 PAGE_SIZE, dir); 896 897 ion_buffer_page_clean(buffer->pages + i); 898 } 899 list_for_each_entry(vma_list, &buffer->vmas, list) { 900 struct vm_area_struct *vma = vma_list->vma; 901 902 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, 903 NULL); 904 } 905 mutex_unlock(&buffer->lock); 906} 907 908static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 909{ 910 struct ion_buffer *buffer = vma->vm_private_data; 911 unsigned long pfn; 912 int ret; 913 914 mutex_lock(&buffer->lock); 915 ion_buffer_page_dirty(buffer->pages + vmf->pgoff); 916 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); 917 918 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); 919 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 920 mutex_unlock(&buffer->lock); 921 if (ret) 922 return VM_FAULT_ERROR; 923 924 return VM_FAULT_NOPAGE; 925} 926 927static void ion_vm_open(struct vm_area_struct *vma) 928{ 929 struct ion_buffer *buffer = vma->vm_private_data; 930 struct ion_vma_list *vma_list; 931 932 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); 933 if (!vma_list) 934 return; 935 vma_list->vma = vma; 936 mutex_lock(&buffer->lock); 937 list_add(&vma_list->list, &buffer->vmas); 938 mutex_unlock(&buffer->lock); 939 pr_debug("%s: adding %p\n", __func__, vma); 940} 941 942static void ion_vm_close(struct vm_area_struct *vma) 943{ 944 struct ion_buffer *buffer = vma->vm_private_data; 945 struct ion_vma_list *vma_list, *tmp; 946 947 pr_debug("%s\n", __func__); 948 mutex_lock(&buffer->lock); 949 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { 950 if (vma_list->vma != vma) 951 continue; 952 list_del(&vma_list->list); 953 kfree(vma_list); 954 pr_debug("%s: deleting %p\n", __func__, vma); 955 break; 956 } 957 mutex_unlock(&buffer->lock); 958} 959 960static struct vm_operations_struct ion_vma_ops = { 961 .open = ion_vm_open, 962 .close = ion_vm_close, 963 .fault = ion_vm_fault, 964}; 965 966static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 967{ 968 struct ion_buffer *buffer = dmabuf->priv; 969 int ret = 0; 970 971 if (!buffer->heap->ops->map_user) { 972 pr_err("%s: this heap does not define a method for mapping " 973 "to userspace\n", __func__); 974 return -EINVAL; 975 } 976 977 if (ion_buffer_fault_user_mappings(buffer)) { 978 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | 979 VM_DONTDUMP; 980 vma->vm_private_data = buffer; 981 vma->vm_ops = &ion_vma_ops; 982 ion_vm_open(vma); 983 return 0; 984 } 985 986 if (!(buffer->flags & ION_FLAG_CACHED)) 987 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 988 989 mutex_lock(&buffer->lock); 990 /* now map it to userspace */ 991 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 992 mutex_unlock(&buffer->lock); 993 994 if (ret) 995 pr_err("%s: failure mapping buffer to userspace\n", 996 __func__); 997 998 return ret; 999} 1000 1001static void ion_dma_buf_release(struct dma_buf *dmabuf) 1002{ 1003 struct ion_buffer *buffer = dmabuf->priv; 1004 ion_buffer_put(buffer); 1005} 1006 1007static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 1008{ 1009 struct ion_buffer *buffer = dmabuf->priv; 1010 return buffer->vaddr + offset * PAGE_SIZE; 1011} 1012 1013static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, 1014 void *ptr) 1015{ 1016 return; 1017} 1018 1019static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, 1020 size_t len, 1021 enum dma_data_direction direction) 1022{ 1023 struct ion_buffer *buffer = dmabuf->priv; 1024 void *vaddr; 1025 1026 if (!buffer->heap->ops->map_kernel) { 1027 pr_err("%s: map kernel is not implemented by this heap.\n", 1028 __func__); 1029 return -ENODEV; 1030 } 1031 1032 mutex_lock(&buffer->lock); 1033 vaddr = ion_buffer_kmap_get(buffer); 1034 mutex_unlock(&buffer->lock); 1035 return PTR_ERR_OR_ZERO(vaddr); 1036} 1037 1038static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, 1039 size_t len, 1040 enum dma_data_direction direction) 1041{ 1042 struct ion_buffer *buffer = dmabuf->priv; 1043 1044 mutex_lock(&buffer->lock); 1045 ion_buffer_kmap_put(buffer); 1046 mutex_unlock(&buffer->lock); 1047} 1048 1049static struct dma_buf_ops dma_buf_ops = { 1050 .map_dma_buf = ion_map_dma_buf, 1051 .unmap_dma_buf = ion_unmap_dma_buf, 1052 .mmap = ion_mmap, 1053 .release = ion_dma_buf_release, 1054 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 1055 .end_cpu_access = ion_dma_buf_end_cpu_access, 1056 .kmap_atomic = ion_dma_buf_kmap, 1057 .kunmap_atomic = ion_dma_buf_kunmap, 1058 .kmap = ion_dma_buf_kmap, 1059 .kunmap = ion_dma_buf_kunmap, 1060}; 1061 1062struct dma_buf *ion_share_dma_buf(struct ion_client *client, 1063 struct ion_handle *handle) 1064{ 1065 struct ion_buffer *buffer; 1066 struct dma_buf *dmabuf; 1067 bool valid_handle; 1068 1069 mutex_lock(&client->lock); 1070 valid_handle = ion_handle_validate(client, handle); 1071 if (!valid_handle) { 1072 WARN(1, "%s: invalid handle passed to share.\n", __func__); 1073 mutex_unlock(&client->lock); 1074 return ERR_PTR(-EINVAL); 1075 } 1076 buffer = handle->buffer; 1077 ion_buffer_get(buffer); 1078 mutex_unlock(&client->lock); 1079 1080 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); 1081 if (IS_ERR(dmabuf)) { 1082 ion_buffer_put(buffer); 1083 return dmabuf; 1084 } 1085 1086 return dmabuf; 1087} 1088EXPORT_SYMBOL(ion_share_dma_buf); 1089 1090int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) 1091{ 1092 struct dma_buf *dmabuf; 1093 int fd; 1094 1095 dmabuf = ion_share_dma_buf(client, handle); 1096 if (IS_ERR(dmabuf)) 1097 return PTR_ERR(dmabuf); 1098 1099 fd = dma_buf_fd(dmabuf, O_CLOEXEC); 1100 if (fd < 0) 1101 dma_buf_put(dmabuf); 1102 1103 return fd; 1104} 1105EXPORT_SYMBOL(ion_share_dma_buf_fd); 1106 1107struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) 1108{ 1109 struct dma_buf *dmabuf; 1110 struct ion_buffer *buffer; 1111 struct ion_handle *handle; 1112 int ret; 1113 1114 dmabuf = dma_buf_get(fd); 1115 if (IS_ERR(dmabuf)) 1116 return ERR_PTR(PTR_ERR(dmabuf)); 1117 /* if this memory came from ion */ 1118 1119 if (dmabuf->ops != &dma_buf_ops) { 1120 pr_err("%s: can not import dmabuf from another exporter\n", 1121 __func__); 1122 dma_buf_put(dmabuf); 1123 return ERR_PTR(-EINVAL); 1124 } 1125 buffer = dmabuf->priv; 1126 1127 mutex_lock(&client->lock); 1128 /* if a handle exists for this buffer just take a reference to it */ 1129 handle = ion_handle_lookup(client, buffer); 1130 if (!IS_ERR(handle)) { 1131 ion_handle_get(handle); 1132 mutex_unlock(&client->lock); 1133 goto end; 1134 } 1135 mutex_unlock(&client->lock); 1136 1137 handle = ion_handle_create(client, buffer); 1138 if (IS_ERR(handle)) 1139 goto end; 1140 1141 mutex_lock(&client->lock); 1142 ret = ion_handle_add(client, handle); 1143 mutex_unlock(&client->lock); 1144 if (ret) { 1145 ion_handle_put(handle); 1146 handle = ERR_PTR(ret); 1147 } 1148 1149end: 1150 dma_buf_put(dmabuf); 1151 return handle; 1152} 1153EXPORT_SYMBOL(ion_import_dma_buf); 1154 1155static int ion_sync_for_device(struct ion_client *client, int fd) 1156{ 1157 struct dma_buf *dmabuf; 1158 struct ion_buffer *buffer; 1159 1160 dmabuf = dma_buf_get(fd); 1161 if (IS_ERR(dmabuf)) 1162 return PTR_ERR(dmabuf); 1163 1164 /* if this memory came from ion */ 1165 if (dmabuf->ops != &dma_buf_ops) { 1166 pr_err("%s: can not sync dmabuf from another exporter\n", 1167 __func__); 1168 dma_buf_put(dmabuf); 1169 return -EINVAL; 1170 } 1171 buffer = dmabuf->priv; 1172 1173 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, 1174 buffer->sg_table->nents, DMA_BIDIRECTIONAL); 1175 dma_buf_put(dmabuf); 1176 return 0; 1177} 1178 1179/* fix up the cases where the ioctl direction bits are incorrect */ 1180static unsigned int ion_ioctl_dir(unsigned int cmd) 1181{ 1182 switch (cmd) { 1183 case ION_IOC_SYNC: 1184 case ION_IOC_FREE: 1185 case ION_IOC_CUSTOM: 1186 return _IOC_WRITE; 1187 default: 1188 return _IOC_DIR(cmd); 1189 } 1190} 1191 1192static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1193{ 1194 struct ion_client *client = filp->private_data; 1195 struct ion_device *dev = client->dev; 1196 struct ion_handle *cleanup_handle = NULL; 1197 int ret = 0; 1198 unsigned int dir; 1199 1200 union { 1201 struct ion_fd_data fd; 1202 struct ion_allocation_data allocation; 1203 struct ion_handle_data handle; 1204 struct ion_custom_data custom; 1205 } data; 1206 1207 dir = ion_ioctl_dir(cmd); 1208 1209 if (_IOC_SIZE(cmd) > sizeof(data)) 1210 return -EINVAL; 1211 1212 if (dir & _IOC_WRITE) 1213 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) 1214 return -EFAULT; 1215 1216 switch (cmd) { 1217 case ION_IOC_ALLOC: 1218 { 1219 struct ion_handle *handle; 1220 1221 handle = ion_alloc(client, data.allocation.len, 1222 data.allocation.align, 1223 data.allocation.heap_id_mask, 1224 data.allocation.flags); 1225 if (IS_ERR(handle)) 1226 return PTR_ERR(handle); 1227 1228 data.allocation.handle = handle->id; 1229 1230 cleanup_handle = handle; 1231 break; 1232 } 1233 case ION_IOC_FREE: 1234 { 1235 struct ion_handle *handle; 1236 1237 handle = ion_handle_get_by_id(client, data.handle.handle); 1238 if (IS_ERR(handle)) 1239 return PTR_ERR(handle); 1240 ion_free(client, handle); 1241 ion_handle_put(handle); 1242 break; 1243 } 1244 case ION_IOC_SHARE: 1245 case ION_IOC_MAP: 1246 { 1247 struct ion_handle *handle; 1248 1249 handle = ion_handle_get_by_id(client, data.handle.handle); 1250 if (IS_ERR(handle)) 1251 return PTR_ERR(handle); 1252 data.fd.fd = ion_share_dma_buf_fd(client, handle); 1253 ion_handle_put(handle); 1254 if (data.fd.fd < 0) 1255 ret = data.fd.fd; 1256 break; 1257 } 1258 case ION_IOC_IMPORT: 1259 { 1260 struct ion_handle *handle; 1261 handle = ion_import_dma_buf(client, data.fd.fd); 1262 if (IS_ERR(handle)) 1263 ret = PTR_ERR(handle); 1264 else 1265 data.handle.handle = handle->id; 1266 break; 1267 } 1268 case ION_IOC_SYNC: 1269 { 1270 ret = ion_sync_for_device(client, data.fd.fd); 1271 break; 1272 } 1273 case ION_IOC_CUSTOM: 1274 { 1275 if (!dev->custom_ioctl) 1276 return -ENOTTY; 1277 ret = dev->custom_ioctl(client, data.custom.cmd, 1278 data.custom.arg); 1279 break; 1280 } 1281 default: 1282 return -ENOTTY; 1283 } 1284 1285 if (dir & _IOC_READ) { 1286 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { 1287 if (cleanup_handle) 1288 ion_free(client, cleanup_handle); 1289 return -EFAULT; 1290 } 1291 } 1292 return ret; 1293} 1294 1295static int ion_release(struct inode *inode, struct file *file) 1296{ 1297 struct ion_client *client = file->private_data; 1298 1299 pr_debug("%s: %d\n", __func__, __LINE__); 1300 ion_client_destroy(client); 1301 return 0; 1302} 1303 1304static int ion_open(struct inode *inode, struct file *file) 1305{ 1306 struct miscdevice *miscdev = file->private_data; 1307 struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 1308 struct ion_client *client; 1309 char debug_name[64]; 1310 1311 pr_debug("%s: %d\n", __func__, __LINE__); 1312 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); 1313 client = ion_client_create(dev, debug_name); 1314 if (IS_ERR(client)) 1315 return PTR_ERR(client); 1316 file->private_data = client; 1317 1318 return 0; 1319} 1320 1321static const struct file_operations ion_fops = { 1322 .owner = THIS_MODULE, 1323 .open = ion_open, 1324 .release = ion_release, 1325 .unlocked_ioctl = ion_ioctl, 1326 .compat_ioctl = compat_ion_ioctl, 1327}; 1328 1329static size_t ion_debug_heap_total(struct ion_client *client, 1330 unsigned int id) 1331{ 1332 size_t size = 0; 1333 struct rb_node *n; 1334 1335 mutex_lock(&client->lock); 1336 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 1337 struct ion_handle *handle = rb_entry(n, 1338 struct ion_handle, 1339 node); 1340 if (handle->buffer->heap->id == id) 1341 size += handle->buffer->size; 1342 } 1343 mutex_unlock(&client->lock); 1344 return size; 1345} 1346 1347static int ion_debug_heap_show(struct seq_file *s, void *unused) 1348{ 1349 struct ion_heap *heap = s->private; 1350 struct ion_device *dev = heap->dev; 1351 struct rb_node *n; 1352 size_t total_size = 0; 1353 size_t total_orphaned_size = 0; 1354 1355 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); 1356 seq_printf(s, "----------------------------------------------------\n"); 1357 1358 for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 1359 struct ion_client *client = rb_entry(n, struct ion_client, 1360 node); 1361 size_t size = ion_debug_heap_total(client, heap->id); 1362 if (!size) 1363 continue; 1364 if (client->task) { 1365 char task_comm[TASK_COMM_LEN]; 1366 1367 get_task_comm(task_comm, client->task); 1368 seq_printf(s, "%16.s %16u %16zu\n", task_comm, 1369 client->pid, size); 1370 } else { 1371 seq_printf(s, "%16.s %16u %16zu\n", client->name, 1372 client->pid, size); 1373 } 1374 } 1375 seq_printf(s, "----------------------------------------------------\n"); 1376 seq_printf(s, "orphaned allocations (info is from last known client):" 1377 "\n"); 1378 mutex_lock(&dev->buffer_lock); 1379 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { 1380 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, 1381 node); 1382 if (buffer->heap->id != heap->id) 1383 continue; 1384 total_size += buffer->size; 1385 if (!buffer->handle_count) { 1386 seq_printf(s, "%16.s %16u %16zu %d %d\n", 1387 buffer->task_comm, buffer->pid, 1388 buffer->size, buffer->kmap_cnt, 1389 atomic_read(&buffer->ref.refcount)); 1390 total_orphaned_size += buffer->size; 1391 } 1392 } 1393 mutex_unlock(&dev->buffer_lock); 1394 seq_printf(s, "----------------------------------------------------\n"); 1395 seq_printf(s, "%16.s %16zu\n", "total orphaned", 1396 total_orphaned_size); 1397 seq_printf(s, "%16.s %16zu\n", "total ", total_size); 1398 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1399 seq_printf(s, "%16.s %16zu\n", "deferred free", 1400 heap->free_list_size); 1401 seq_printf(s, "----------------------------------------------------\n"); 1402 1403 if (heap->debug_show) 1404 heap->debug_show(heap, s, unused); 1405 1406 return 0; 1407} 1408 1409static int ion_debug_heap_open(struct inode *inode, struct file *file) 1410{ 1411 return single_open(file, ion_debug_heap_show, inode->i_private); 1412} 1413 1414static const struct file_operations debug_heap_fops = { 1415 .open = ion_debug_heap_open, 1416 .read = seq_read, 1417 .llseek = seq_lseek, 1418 .release = single_release, 1419}; 1420 1421#ifdef DEBUG_HEAP_SHRINKER 1422static int debug_shrink_set(void *data, u64 val) 1423{ 1424 struct ion_heap *heap = data; 1425 struct shrink_control sc; 1426 int objs; 1427 1428 sc.gfp_mask = -1; 1429 sc.nr_to_scan = 0; 1430 1431 if (!val) 1432 return 0; 1433 1434 objs = heap->shrinker.shrink(&heap->shrinker, &sc); 1435 sc.nr_to_scan = objs; 1436 1437 heap->shrinker.shrink(&heap->shrinker, &sc); 1438 return 0; 1439} 1440 1441static int debug_shrink_get(void *data, u64 *val) 1442{ 1443 struct ion_heap *heap = data; 1444 struct shrink_control sc; 1445 int objs; 1446 1447 sc.gfp_mask = -1; 1448 sc.nr_to_scan = 0; 1449 1450 objs = heap->shrinker.shrink(&heap->shrinker, &sc); 1451 *val = objs; 1452 return 0; 1453} 1454 1455DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, 1456 debug_shrink_set, "%llu\n"); 1457#endif 1458 1459void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 1460{ 1461 struct dentry *debug_file; 1462 1463 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || 1464 !heap->ops->unmap_dma) 1465 pr_err("%s: can not add heap with invalid ops struct.\n", 1466 __func__); 1467 1468 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1469 ion_heap_init_deferred_free(heap); 1470 1471 heap->dev = dev; 1472 down_write(&dev->lock); 1473 /* use negative heap->id to reverse the priority -- when traversing 1474 the list later attempt higher id numbers first */ 1475 plist_node_init(&heap->node, -heap->id); 1476 plist_add(&heap->node, &dev->heaps); 1477 debug_file = debugfs_create_file(heap->name, 0664, 1478 dev->heaps_debug_root, heap, 1479 &debug_heap_fops); 1480 1481 if (!debug_file) { 1482 char buf[256], *path; 1483 path = dentry_path(dev->heaps_debug_root, buf, 256); 1484 pr_err("Failed to create heap debugfs at %s/%s\n", 1485 path, heap->name); 1486 } 1487 1488#ifdef DEBUG_HEAP_SHRINKER 1489 if (heap->shrinker.shrink) { 1490 char debug_name[64]; 1491 1492 snprintf(debug_name, 64, "%s_shrink", heap->name); 1493 debug_file = debugfs_create_file( 1494 debug_name, 0644, dev->heaps_debug_root, heap, 1495 &debug_shrink_fops); 1496 if (!debug_file) { 1497 char buf[256], *path; 1498 path = dentry_path(dev->heaps_debug_root, buf, 256); 1499 pr_err("Failed to create heap shrinker debugfs at %s/%s\n", 1500 path, debug_name); 1501 } 1502 } 1503#endif 1504 up_write(&dev->lock); 1505} 1506 1507struct ion_device *ion_device_create(long (*custom_ioctl) 1508 (struct ion_client *client, 1509 unsigned int cmd, 1510 unsigned long arg)) 1511{ 1512 struct ion_device *idev; 1513 int ret; 1514 1515 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1516 if (!idev) 1517 return ERR_PTR(-ENOMEM); 1518 1519 idev->dev.minor = MISC_DYNAMIC_MINOR; 1520 idev->dev.name = "ion"; 1521 idev->dev.fops = &ion_fops; 1522 idev->dev.parent = NULL; 1523 ret = misc_register(&idev->dev); 1524 if (ret) { 1525 pr_err("ion: failed to register misc device.\n"); 1526 return ERR_PTR(ret); 1527 } 1528 1529 idev->debug_root = debugfs_create_dir("ion", NULL); 1530 if (!idev->debug_root) { 1531 pr_err("ion: failed to create debugfs root directory.\n"); 1532 goto debugfs_done; 1533 } 1534 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); 1535 if (!idev->heaps_debug_root) { 1536 pr_err("ion: failed to create debugfs heaps directory.\n"); 1537 goto debugfs_done; 1538 } 1539 idev->clients_debug_root = debugfs_create_dir("clients", 1540 idev->debug_root); 1541 if (!idev->clients_debug_root) 1542 pr_err("ion: failed to create debugfs clients directory.\n"); 1543 1544debugfs_done: 1545 1546 idev->custom_ioctl = custom_ioctl; 1547 idev->buffers = RB_ROOT; 1548 mutex_init(&idev->buffer_lock); 1549 init_rwsem(&idev->lock); 1550 plist_head_init(&idev->heaps); 1551 idev->clients = RB_ROOT; 1552 return idev; 1553} 1554 1555void ion_device_destroy(struct ion_device *dev) 1556{ 1557 misc_deregister(&dev->dev); 1558 debugfs_remove_recursive(dev->debug_root); 1559 /* XXX need to free the heaps and clients ? */ 1560 kfree(dev); 1561} 1562 1563void __init ion_reserve(struct ion_platform_data *data) 1564{ 1565 int i; 1566 1567 for (i = 0; i < data->nr; i++) { 1568 if (data->heaps[i].size == 0) 1569 continue; 1570 1571 if (data->heaps[i].base == 0) { 1572 phys_addr_t paddr; 1573 paddr = memblock_alloc_base(data->heaps[i].size, 1574 data->heaps[i].align, 1575 MEMBLOCK_ALLOC_ANYWHERE); 1576 if (!paddr) { 1577 pr_err("%s: error allocating memblock for heap %d\n", 1578 __func__, i); 1579 continue; 1580 } 1581 data->heaps[i].base = paddr; 1582 } else { 1583 int ret = memblock_reserve(data->heaps[i].base, 1584 data->heaps[i].size); 1585 if (ret) 1586 pr_err("memblock reserve of %zx@%lx failed\n", 1587 data->heaps[i].size, 1588 data->heaps[i].base); 1589 } 1590 pr_info("%s: %s reserved base %lx size %zu\n", __func__, 1591 data->heaps[i].name, 1592 data->heaps[i].base, 1593 data->heaps[i].size); 1594 } 1595} 1596