ion.c revision 7287bb5258800d93dc8f5eb5115025c75a69014b
1/* 2 3 * drivers/staging/android/ion/ion.c 4 * 5 * Copyright (C) 2011 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#include <linux/device.h> 19#include <linux/err.h> 20#include <linux/file.h> 21#include <linux/freezer.h> 22#include <linux/fs.h> 23#include <linux/anon_inodes.h> 24#include <linux/kthread.h> 25#include <linux/list.h> 26#include <linux/memblock.h> 27#include <linux/miscdevice.h> 28#include <linux/export.h> 29#include <linux/mm.h> 30#include <linux/mm_types.h> 31#include <linux/rbtree.h> 32#include <linux/slab.h> 33#include <linux/seq_file.h> 34#include <linux/uaccess.h> 35#include <linux/vmalloc.h> 36#include <linux/debugfs.h> 37#include <linux/dma-buf.h> 38#include <linux/idr.h> 39 40#include "ion.h" 41#include "ion_priv.h" 42#include "compat_ion.h" 43 44/** 45 * struct ion_device - the metadata of the ion device node 46 * @dev: the actual misc device 47 * @buffers: an rb tree of all the existing buffers 48 * @buffer_lock: lock protecting the tree of buffers 49 * @lock: rwsem protecting the tree of heaps and clients 50 * @heaps: list of all the heaps in the system 51 * @user_clients: list of all the clients created from userspace 52 */ 53struct ion_device { 54 struct miscdevice dev; 55 struct rb_root buffers; 56 struct mutex buffer_lock; 57 struct rw_semaphore lock; 58 struct plist_head heaps; 59 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, 60 unsigned long arg); 61 struct rb_root clients; 62 struct dentry *debug_root; 63 struct dentry *heaps_debug_root; 64 struct dentry *clients_debug_root; 65}; 66 67/** 68 * struct ion_client - a process/hw block local address space 69 * @node: node in the tree of all clients 70 * @dev: backpointer to ion device 71 * @handles: an rb tree of all the handles in this client 72 * @idr: an idr space for allocating handle ids 73 * @lock: lock protecting the tree of handles 74 * @name: used for debugging 75 * @display_name: used for debugging (unique version of @name) 76 * @display_serial: used for debugging (to make display_name unique) 77 * @task: used for debugging 78 * 79 * A client represents a list of buffers this client may access. 80 * The mutex stored here is used to protect both handles tree 81 * as well as the handles themselves, and should be held while modifying either. 82 */ 83struct ion_client { 84 struct rb_node node; 85 struct ion_device *dev; 86 struct rb_root handles; 87 struct idr idr; 88 struct mutex lock; 89 const char *name; 90 char *display_name; 91 int display_serial; 92 struct task_struct *task; 93 pid_t pid; 94 struct dentry *debug_root; 95}; 96 97/** 98 * ion_handle - a client local reference to a buffer 99 * @ref: reference count 100 * @client: back pointer to the client the buffer resides in 101 * @buffer: pointer to the buffer 102 * @node: node in the client's handle rbtree 103 * @kmap_cnt: count of times this client has mapped to kernel 104 * @id: client-unique id allocated by client->idr 105 * 106 * Modifications to node, map_cnt or mapping should be protected by the 107 * lock in the client. Other fields are never changed after initialization. 108 */ 109struct ion_handle { 110 struct kref ref; 111 struct ion_client *client; 112 struct ion_buffer *buffer; 113 struct rb_node node; 114 unsigned int kmap_cnt; 115 int id; 116}; 117 118bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) 119{ 120 return (buffer->flags & ION_FLAG_CACHED) && 121 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); 122} 123 124bool ion_buffer_cached(struct ion_buffer *buffer) 125{ 126 return !!(buffer->flags & ION_FLAG_CACHED); 127} 128 129static inline struct page *ion_buffer_page(struct page *page) 130{ 131 return (struct page *)((unsigned long)page & ~(1UL)); 132} 133 134static inline bool ion_buffer_page_is_dirty(struct page *page) 135{ 136 return !!((unsigned long)page & 1UL); 137} 138 139static inline void ion_buffer_page_dirty(struct page **page) 140{ 141 *page = (struct page *)((unsigned long)(*page) | 1UL); 142} 143 144static inline void ion_buffer_page_clean(struct page **page) 145{ 146 *page = (struct page *)((unsigned long)(*page) & ~(1UL)); 147} 148 149/* this function should only be called while dev->lock is held */ 150static void ion_buffer_add(struct ion_device *dev, 151 struct ion_buffer *buffer) 152{ 153 struct rb_node **p = &dev->buffers.rb_node; 154 struct rb_node *parent = NULL; 155 struct ion_buffer *entry; 156 157 while (*p) { 158 parent = *p; 159 entry = rb_entry(parent, struct ion_buffer, node); 160 161 if (buffer < entry) { 162 p = &(*p)->rb_left; 163 } else if (buffer > entry) { 164 p = &(*p)->rb_right; 165 } else { 166 pr_err("%s: buffer already found.", __func__); 167 BUG(); 168 } 169 } 170 171 rb_link_node(&buffer->node, parent, p); 172 rb_insert_color(&buffer->node, &dev->buffers); 173} 174 175/* this function should only be called while dev->lock is held */ 176static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 177 struct ion_device *dev, 178 unsigned long len, 179 unsigned long align, 180 unsigned long flags) 181{ 182 struct ion_buffer *buffer; 183 struct sg_table *table; 184 struct scatterlist *sg; 185 int i, ret; 186 187 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 188 if (!buffer) 189 return ERR_PTR(-ENOMEM); 190 191 buffer->heap = heap; 192 buffer->flags = flags; 193 kref_init(&buffer->ref); 194 195 ret = heap->ops->allocate(heap, buffer, len, align, flags); 196 197 if (ret) { 198 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) 199 goto err2; 200 201 ion_heap_freelist_drain(heap, 0); 202 ret = heap->ops->allocate(heap, buffer, len, align, 203 flags); 204 if (ret) 205 goto err2; 206 } 207 208 buffer->dev = dev; 209 buffer->size = len; 210 211 table = heap->ops->map_dma(heap, buffer); 212 if (WARN_ONCE(table == NULL, 213 "heap->ops->map_dma should return ERR_PTR on error")) 214 table = ERR_PTR(-EINVAL); 215 if (IS_ERR(table)) { 216 heap->ops->free(buffer); 217 kfree(buffer); 218 return ERR_PTR(PTR_ERR(table)); 219 } 220 buffer->sg_table = table; 221 if (ion_buffer_fault_user_mappings(buffer)) { 222 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 223 struct scatterlist *sg; 224 int i, j, k = 0; 225 226 buffer->pages = vmalloc(sizeof(struct page *) * num_pages); 227 if (!buffer->pages) { 228 ret = -ENOMEM; 229 goto err1; 230 } 231 232 for_each_sg(table->sgl, sg, table->nents, i) { 233 struct page *page = sg_page(sg); 234 235 for (j = 0; j < sg->length / PAGE_SIZE; j++) 236 buffer->pages[k++] = page++; 237 } 238 239 if (ret) 240 goto err; 241 } 242 243 buffer->dev = dev; 244 buffer->size = len; 245 INIT_LIST_HEAD(&buffer->vmas); 246 mutex_init(&buffer->lock); 247 /* this will set up dma addresses for the sglist -- it is not 248 technically correct as per the dma api -- a specific 249 device isn't really taking ownership here. However, in practice on 250 our systems the only dma_address space is physical addresses. 251 Additionally, we can't afford the overhead of invalidating every 252 allocation via dma_map_sg. The implicit contract here is that 253 memory comming from the heaps is ready for dma, ie if it has a 254 cached mapping that mapping has been invalidated */ 255 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) 256 sg_dma_address(sg) = sg_phys(sg); 257 mutex_lock(&dev->buffer_lock); 258 ion_buffer_add(dev, buffer); 259 mutex_unlock(&dev->buffer_lock); 260 return buffer; 261 262err: 263 heap->ops->unmap_dma(heap, buffer); 264 heap->ops->free(buffer); 265err1: 266 if (buffer->pages) 267 vfree(buffer->pages); 268err2: 269 kfree(buffer); 270 return ERR_PTR(ret); 271} 272 273void ion_buffer_destroy(struct ion_buffer *buffer) 274{ 275 if (WARN_ON(buffer->kmap_cnt > 0)) 276 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 277 buffer->heap->ops->unmap_dma(buffer->heap, buffer); 278 buffer->heap->ops->free(buffer); 279 if (buffer->pages) 280 vfree(buffer->pages); 281 kfree(buffer); 282} 283 284static void _ion_buffer_destroy(struct kref *kref) 285{ 286 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 287 struct ion_heap *heap = buffer->heap; 288 struct ion_device *dev = buffer->dev; 289 290 mutex_lock(&dev->buffer_lock); 291 rb_erase(&buffer->node, &dev->buffers); 292 mutex_unlock(&dev->buffer_lock); 293 294 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 295 ion_heap_freelist_add(heap, buffer); 296 else 297 ion_buffer_destroy(buffer); 298} 299 300static void ion_buffer_get(struct ion_buffer *buffer) 301{ 302 kref_get(&buffer->ref); 303} 304 305static int ion_buffer_put(struct ion_buffer *buffer) 306{ 307 return kref_put(&buffer->ref, _ion_buffer_destroy); 308} 309 310static void ion_buffer_add_to_handle(struct ion_buffer *buffer) 311{ 312 mutex_lock(&buffer->lock); 313 buffer->handle_count++; 314 mutex_unlock(&buffer->lock); 315} 316 317static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) 318{ 319 /* 320 * when a buffer is removed from a handle, if it is not in 321 * any other handles, copy the taskcomm and the pid of the 322 * process it's being removed from into the buffer. At this 323 * point there will be no way to track what processes this buffer is 324 * being used by, it only exists as a dma_buf file descriptor. 325 * The taskcomm and pid can provide a debug hint as to where this fd 326 * is in the system 327 */ 328 mutex_lock(&buffer->lock); 329 buffer->handle_count--; 330 BUG_ON(buffer->handle_count < 0); 331 if (!buffer->handle_count) { 332 struct task_struct *task; 333 334 task = current->group_leader; 335 get_task_comm(buffer->task_comm, task); 336 buffer->pid = task_pid_nr(task); 337 } 338 mutex_unlock(&buffer->lock); 339} 340 341static struct ion_handle *ion_handle_create(struct ion_client *client, 342 struct ion_buffer *buffer) 343{ 344 struct ion_handle *handle; 345 346 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 347 if (!handle) 348 return ERR_PTR(-ENOMEM); 349 kref_init(&handle->ref); 350 RB_CLEAR_NODE(&handle->node); 351 handle->client = client; 352 ion_buffer_get(buffer); 353 ion_buffer_add_to_handle(buffer); 354 handle->buffer = buffer; 355 356 return handle; 357} 358 359static void ion_handle_kmap_put(struct ion_handle *); 360 361static void ion_handle_destroy(struct kref *kref) 362{ 363 struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 364 struct ion_client *client = handle->client; 365 struct ion_buffer *buffer = handle->buffer; 366 367 mutex_lock(&buffer->lock); 368 while (handle->kmap_cnt) 369 ion_handle_kmap_put(handle); 370 mutex_unlock(&buffer->lock); 371 372 idr_remove(&client->idr, handle->id); 373 if (!RB_EMPTY_NODE(&handle->node)) 374 rb_erase(&handle->node, &client->handles); 375 376 ion_buffer_remove_from_handle(buffer); 377 ion_buffer_put(buffer); 378 379 kfree(handle); 380} 381 382struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) 383{ 384 return handle->buffer; 385} 386 387static void ion_handle_get(struct ion_handle *handle) 388{ 389 kref_get(&handle->ref); 390} 391 392static int ion_handle_put(struct ion_handle *handle) 393{ 394 struct ion_client *client = handle->client; 395 int ret; 396 397 mutex_lock(&client->lock); 398 ret = kref_put(&handle->ref, ion_handle_destroy); 399 mutex_unlock(&client->lock); 400 401 return ret; 402} 403 404static struct ion_handle *ion_handle_lookup(struct ion_client *client, 405 struct ion_buffer *buffer) 406{ 407 struct rb_node *n = client->handles.rb_node; 408 409 while (n) { 410 struct ion_handle *entry = rb_entry(n, struct ion_handle, node); 411 if (buffer < entry->buffer) 412 n = n->rb_left; 413 else if (buffer > entry->buffer) 414 n = n->rb_right; 415 else 416 return entry; 417 } 418 return ERR_PTR(-EINVAL); 419} 420 421static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, 422 int id) 423{ 424 struct ion_handle *handle; 425 426 mutex_lock(&client->lock); 427 handle = idr_find(&client->idr, id); 428 if (handle) 429 ion_handle_get(handle); 430 mutex_unlock(&client->lock); 431 432 return handle ? handle : ERR_PTR(-EINVAL); 433} 434 435static bool ion_handle_validate(struct ion_client *client, 436 struct ion_handle *handle) 437{ 438 WARN_ON(!mutex_is_locked(&client->lock)); 439 return idr_find(&client->idr, handle->id) == handle; 440} 441 442static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) 443{ 444 int id; 445 struct rb_node **p = &client->handles.rb_node; 446 struct rb_node *parent = NULL; 447 struct ion_handle *entry; 448 449 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); 450 if (id < 0) 451 return id; 452 453 handle->id = id; 454 455 while (*p) { 456 parent = *p; 457 entry = rb_entry(parent, struct ion_handle, node); 458 459 if (handle->buffer < entry->buffer) 460 p = &(*p)->rb_left; 461 else if (handle->buffer > entry->buffer) 462 p = &(*p)->rb_right; 463 else 464 WARN(1, "%s: buffer already found.", __func__); 465 } 466 467 rb_link_node(&handle->node, parent, p); 468 rb_insert_color(&handle->node, &client->handles); 469 470 return 0; 471} 472 473struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 474 size_t align, unsigned int heap_id_mask, 475 unsigned int flags) 476{ 477 struct ion_handle *handle; 478 struct ion_device *dev = client->dev; 479 struct ion_buffer *buffer = NULL; 480 struct ion_heap *heap; 481 int ret; 482 483 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, 484 len, align, heap_id_mask, flags); 485 /* 486 * traverse the list of heaps available in this system in priority 487 * order. If the heap type is supported by the client, and matches the 488 * request of the caller allocate from it. Repeat until allocate has 489 * succeeded or all heaps have been tried 490 */ 491 len = PAGE_ALIGN(len); 492 493 if (!len) 494 return ERR_PTR(-EINVAL); 495 496 down_read(&dev->lock); 497 plist_for_each_entry(heap, &dev->heaps, node) { 498 /* if the caller didn't specify this heap id */ 499 if (!((1 << heap->id) & heap_id_mask)) 500 continue; 501 buffer = ion_buffer_create(heap, dev, len, align, flags); 502 if (!IS_ERR(buffer)) 503 break; 504 } 505 up_read(&dev->lock); 506 507 if (buffer == NULL) 508 return ERR_PTR(-ENODEV); 509 510 if (IS_ERR(buffer)) 511 return ERR_PTR(PTR_ERR(buffer)); 512 513 handle = ion_handle_create(client, buffer); 514 515 /* 516 * ion_buffer_create will create a buffer with a ref_cnt of 1, 517 * and ion_handle_create will take a second reference, drop one here 518 */ 519 ion_buffer_put(buffer); 520 521 if (IS_ERR(handle)) 522 return handle; 523 524 mutex_lock(&client->lock); 525 ret = ion_handle_add(client, handle); 526 mutex_unlock(&client->lock); 527 if (ret) { 528 ion_handle_put(handle); 529 handle = ERR_PTR(ret); 530 } 531 532 return handle; 533} 534EXPORT_SYMBOL(ion_alloc); 535 536void ion_free(struct ion_client *client, struct ion_handle *handle) 537{ 538 bool valid_handle; 539 540 BUG_ON(client != handle->client); 541 542 mutex_lock(&client->lock); 543 valid_handle = ion_handle_validate(client, handle); 544 545 if (!valid_handle) { 546 WARN(1, "%s: invalid handle passed to free.\n", __func__); 547 mutex_unlock(&client->lock); 548 return; 549 } 550 mutex_unlock(&client->lock); 551 ion_handle_put(handle); 552} 553EXPORT_SYMBOL(ion_free); 554 555int ion_phys(struct ion_client *client, struct ion_handle *handle, 556 ion_phys_addr_t *addr, size_t *len) 557{ 558 struct ion_buffer *buffer; 559 int ret; 560 561 mutex_lock(&client->lock); 562 if (!ion_handle_validate(client, handle)) { 563 mutex_unlock(&client->lock); 564 return -EINVAL; 565 } 566 567 buffer = handle->buffer; 568 569 if (!buffer->heap->ops->phys) { 570 pr_err("%s: ion_phys is not implemented by this heap.\n", 571 __func__); 572 mutex_unlock(&client->lock); 573 return -ENODEV; 574 } 575 mutex_unlock(&client->lock); 576 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); 577 return ret; 578} 579EXPORT_SYMBOL(ion_phys); 580 581static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 582{ 583 void *vaddr; 584 585 if (buffer->kmap_cnt) { 586 buffer->kmap_cnt++; 587 return buffer->vaddr; 588 } 589 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 590 if (WARN_ONCE(vaddr == NULL, 591 "heap->ops->map_kernel should return ERR_PTR on error")) 592 return ERR_PTR(-EINVAL); 593 if (IS_ERR(vaddr)) 594 return vaddr; 595 buffer->vaddr = vaddr; 596 buffer->kmap_cnt++; 597 return vaddr; 598} 599 600static void *ion_handle_kmap_get(struct ion_handle *handle) 601{ 602 struct ion_buffer *buffer = handle->buffer; 603 void *vaddr; 604 605 if (handle->kmap_cnt) { 606 handle->kmap_cnt++; 607 return buffer->vaddr; 608 } 609 vaddr = ion_buffer_kmap_get(buffer); 610 if (IS_ERR(vaddr)) 611 return vaddr; 612 handle->kmap_cnt++; 613 return vaddr; 614} 615 616static void ion_buffer_kmap_put(struct ion_buffer *buffer) 617{ 618 buffer->kmap_cnt--; 619 if (!buffer->kmap_cnt) { 620 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 621 buffer->vaddr = NULL; 622 } 623} 624 625static void ion_handle_kmap_put(struct ion_handle *handle) 626{ 627 struct ion_buffer *buffer = handle->buffer; 628 629 handle->kmap_cnt--; 630 if (!handle->kmap_cnt) 631 ion_buffer_kmap_put(buffer); 632} 633 634void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 635{ 636 struct ion_buffer *buffer; 637 void *vaddr; 638 639 mutex_lock(&client->lock); 640 if (!ion_handle_validate(client, handle)) { 641 pr_err("%s: invalid handle passed to map_kernel.\n", 642 __func__); 643 mutex_unlock(&client->lock); 644 return ERR_PTR(-EINVAL); 645 } 646 647 buffer = handle->buffer; 648 649 if (!handle->buffer->heap->ops->map_kernel) { 650 pr_err("%s: map_kernel is not implemented by this heap.\n", 651 __func__); 652 mutex_unlock(&client->lock); 653 return ERR_PTR(-ENODEV); 654 } 655 656 mutex_lock(&buffer->lock); 657 vaddr = ion_handle_kmap_get(handle); 658 mutex_unlock(&buffer->lock); 659 mutex_unlock(&client->lock); 660 return vaddr; 661} 662EXPORT_SYMBOL(ion_map_kernel); 663 664void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 665{ 666 struct ion_buffer *buffer; 667 668 mutex_lock(&client->lock); 669 buffer = handle->buffer; 670 mutex_lock(&buffer->lock); 671 ion_handle_kmap_put(handle); 672 mutex_unlock(&buffer->lock); 673 mutex_unlock(&client->lock); 674} 675EXPORT_SYMBOL(ion_unmap_kernel); 676 677static int ion_debug_client_show(struct seq_file *s, void *unused) 678{ 679 struct ion_client *client = s->private; 680 struct rb_node *n; 681 size_t sizes[ION_NUM_HEAP_IDS] = {0}; 682 const char *names[ION_NUM_HEAP_IDS] = {NULL}; 683 int i; 684 685 mutex_lock(&client->lock); 686 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 687 struct ion_handle *handle = rb_entry(n, struct ion_handle, 688 node); 689 unsigned int id = handle->buffer->heap->id; 690 691 if (!names[id]) 692 names[id] = handle->buffer->heap->name; 693 sizes[id] += handle->buffer->size; 694 } 695 mutex_unlock(&client->lock); 696 697 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 698 for (i = 0; i < ION_NUM_HEAP_IDS; i++) { 699 if (!names[i]) 700 continue; 701 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); 702 } 703 return 0; 704} 705 706static int ion_debug_client_open(struct inode *inode, struct file *file) 707{ 708 return single_open(file, ion_debug_client_show, inode->i_private); 709} 710 711static const struct file_operations debug_client_fops = { 712 .open = ion_debug_client_open, 713 .read = seq_read, 714 .llseek = seq_lseek, 715 .release = single_release, 716}; 717 718static int ion_get_client_serial(const struct rb_root *root, 719 const unsigned char *name) 720{ 721 int serial = -1; 722 struct rb_node *node; 723 for (node = rb_first(root); node; node = rb_next(node)) { 724 struct ion_client *client = rb_entry(node, struct ion_client, 725 node); 726 if (strcmp(client->name, name)) 727 continue; 728 serial = max(serial, client->display_serial); 729 } 730 return serial + 1; 731} 732 733struct ion_client *ion_client_create(struct ion_device *dev, 734 const char *name) 735{ 736 struct ion_client *client; 737 struct task_struct *task; 738 struct rb_node **p; 739 struct rb_node *parent = NULL; 740 struct ion_client *entry; 741 pid_t pid; 742 743 if (!name) { 744 pr_err("%s: Name cannot be null\n", __func__); 745 return ERR_PTR(-EINVAL); 746 } 747 748 get_task_struct(current->group_leader); 749 task_lock(current->group_leader); 750 pid = task_pid_nr(current->group_leader); 751 /* don't bother to store task struct for kernel threads, 752 they can't be killed anyway */ 753 if (current->group_leader->flags & PF_KTHREAD) { 754 put_task_struct(current->group_leader); 755 task = NULL; 756 } else { 757 task = current->group_leader; 758 } 759 task_unlock(current->group_leader); 760 761 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 762 if (!client) 763 goto err_put_task_struct; 764 765 client->dev = dev; 766 client->handles = RB_ROOT; 767 idr_init(&client->idr); 768 mutex_init(&client->lock); 769 client->task = task; 770 client->pid = pid; 771 client->name = kstrdup(name, GFP_KERNEL); 772 if (!client->name) 773 goto err_free_client; 774 775 down_write(&dev->lock); 776 client->display_serial = ion_get_client_serial(&dev->clients, name); 777 client->display_name = kasprintf( 778 GFP_KERNEL, "%s-%d", name, client->display_serial); 779 if (!client->display_name) { 780 up_write(&dev->lock); 781 goto err_free_client_name; 782 } 783 p = &dev->clients.rb_node; 784 while (*p) { 785 parent = *p; 786 entry = rb_entry(parent, struct ion_client, node); 787 788 if (client < entry) 789 p = &(*p)->rb_left; 790 else if (client > entry) 791 p = &(*p)->rb_right; 792 } 793 rb_link_node(&client->node, parent, p); 794 rb_insert_color(&client->node, &dev->clients); 795 796 client->debug_root = debugfs_create_file(client->display_name, 0664, 797 dev->clients_debug_root, 798 client, &debug_client_fops); 799 if (!client->debug_root) { 800 char buf[256], *path; 801 path = dentry_path(dev->clients_debug_root, buf, 256); 802 pr_err("Failed to create client debugfs at %s/%s\n", 803 path, client->display_name); 804 } 805 806 up_write(&dev->lock); 807 808 return client; 809 810err_free_client_name: 811 kfree(client->name); 812err_free_client: 813 kfree(client); 814err_put_task_struct: 815 if (task) 816 put_task_struct(current->group_leader); 817 return ERR_PTR(-ENOMEM); 818} 819EXPORT_SYMBOL(ion_client_create); 820 821void ion_client_destroy(struct ion_client *client) 822{ 823 struct ion_device *dev = client->dev; 824 struct rb_node *n; 825 826 pr_debug("%s: %d\n", __func__, __LINE__); 827 while ((n = rb_first(&client->handles))) { 828 struct ion_handle *handle = rb_entry(n, struct ion_handle, 829 node); 830 ion_handle_destroy(&handle->ref); 831 } 832 833 idr_destroy(&client->idr); 834 835 down_write(&dev->lock); 836 if (client->task) 837 put_task_struct(client->task); 838 rb_erase(&client->node, &dev->clients); 839 debugfs_remove_recursive(client->debug_root); 840 up_write(&dev->lock); 841 842 kfree(client->display_name); 843 kfree(client->name); 844 kfree(client); 845} 846EXPORT_SYMBOL(ion_client_destroy); 847 848struct sg_table *ion_sg_table(struct ion_client *client, 849 struct ion_handle *handle) 850{ 851 struct ion_buffer *buffer; 852 struct sg_table *table; 853 854 mutex_lock(&client->lock); 855 if (!ion_handle_validate(client, handle)) { 856 pr_err("%s: invalid handle passed to map_dma.\n", 857 __func__); 858 mutex_unlock(&client->lock); 859 return ERR_PTR(-EINVAL); 860 } 861 buffer = handle->buffer; 862 table = buffer->sg_table; 863 mutex_unlock(&client->lock); 864 return table; 865} 866EXPORT_SYMBOL(ion_sg_table); 867 868static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 869 struct device *dev, 870 enum dma_data_direction direction); 871 872static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 873 enum dma_data_direction direction) 874{ 875 struct dma_buf *dmabuf = attachment->dmabuf; 876 struct ion_buffer *buffer = dmabuf->priv; 877 878 ion_buffer_sync_for_device(buffer, attachment->dev, direction); 879 return buffer->sg_table; 880} 881 882static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, 883 struct sg_table *table, 884 enum dma_data_direction direction) 885{ 886} 887 888void ion_pages_sync_for_device(struct device *dev, struct page *page, 889 size_t size, enum dma_data_direction dir) 890{ 891 struct scatterlist sg; 892 893 sg_init_table(&sg, 1); 894 sg_set_page(&sg, page, size, 0); 895 /* 896 * This is not correct - sg_dma_address needs a dma_addr_t that is valid 897 * for the the targeted device, but this works on the currently targeted 898 * hardware. 899 */ 900 sg_dma_address(&sg) = page_to_phys(page); 901 dma_sync_sg_for_device(dev, &sg, 1, dir); 902} 903 904struct ion_vma_list { 905 struct list_head list; 906 struct vm_area_struct *vma; 907}; 908 909static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 910 struct device *dev, 911 enum dma_data_direction dir) 912{ 913 struct ion_vma_list *vma_list; 914 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 915 int i; 916 917 pr_debug("%s: syncing for device %s\n", __func__, 918 dev ? dev_name(dev) : "null"); 919 920 if (!ion_buffer_fault_user_mappings(buffer)) 921 return; 922 923 mutex_lock(&buffer->lock); 924 for (i = 0; i < pages; i++) { 925 struct page *page = buffer->pages[i]; 926 927 if (ion_buffer_page_is_dirty(page)) 928 ion_pages_sync_for_device(dev, ion_buffer_page(page), 929 PAGE_SIZE, dir); 930 931 ion_buffer_page_clean(buffer->pages + i); 932 } 933 list_for_each_entry(vma_list, &buffer->vmas, list) { 934 struct vm_area_struct *vma = vma_list->vma; 935 936 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, 937 NULL); 938 } 939 mutex_unlock(&buffer->lock); 940} 941 942static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 943{ 944 struct ion_buffer *buffer = vma->vm_private_data; 945 unsigned long pfn; 946 int ret; 947 948 mutex_lock(&buffer->lock); 949 ion_buffer_page_dirty(buffer->pages + vmf->pgoff); 950 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); 951 952 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); 953 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 954 mutex_unlock(&buffer->lock); 955 if (ret) 956 return VM_FAULT_ERROR; 957 958 return VM_FAULT_NOPAGE; 959} 960 961static void ion_vm_open(struct vm_area_struct *vma) 962{ 963 struct ion_buffer *buffer = vma->vm_private_data; 964 struct ion_vma_list *vma_list; 965 966 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); 967 if (!vma_list) 968 return; 969 vma_list->vma = vma; 970 mutex_lock(&buffer->lock); 971 list_add(&vma_list->list, &buffer->vmas); 972 mutex_unlock(&buffer->lock); 973 pr_debug("%s: adding %p\n", __func__, vma); 974} 975 976static void ion_vm_close(struct vm_area_struct *vma) 977{ 978 struct ion_buffer *buffer = vma->vm_private_data; 979 struct ion_vma_list *vma_list, *tmp; 980 981 pr_debug("%s\n", __func__); 982 mutex_lock(&buffer->lock); 983 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { 984 if (vma_list->vma != vma) 985 continue; 986 list_del(&vma_list->list); 987 kfree(vma_list); 988 pr_debug("%s: deleting %p\n", __func__, vma); 989 break; 990 } 991 mutex_unlock(&buffer->lock); 992} 993 994static struct vm_operations_struct ion_vma_ops = { 995 .open = ion_vm_open, 996 .close = ion_vm_close, 997 .fault = ion_vm_fault, 998}; 999 1000static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 1001{ 1002 struct ion_buffer *buffer = dmabuf->priv; 1003 int ret = 0; 1004 1005 if (!buffer->heap->ops->map_user) { 1006 pr_err("%s: this heap does not define a method for mapping to userspace\n", 1007 __func__); 1008 return -EINVAL; 1009 } 1010 1011 if (ion_buffer_fault_user_mappings(buffer)) { 1012 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | 1013 VM_DONTDUMP; 1014 vma->vm_private_data = buffer; 1015 vma->vm_ops = &ion_vma_ops; 1016 ion_vm_open(vma); 1017 return 0; 1018 } 1019 1020 if (!(buffer->flags & ION_FLAG_CACHED)) 1021 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1022 1023 mutex_lock(&buffer->lock); 1024 /* now map it to userspace */ 1025 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 1026 mutex_unlock(&buffer->lock); 1027 1028 if (ret) 1029 pr_err("%s: failure mapping buffer to userspace\n", 1030 __func__); 1031 1032 return ret; 1033} 1034 1035static void ion_dma_buf_release(struct dma_buf *dmabuf) 1036{ 1037 struct ion_buffer *buffer = dmabuf->priv; 1038 ion_buffer_put(buffer); 1039} 1040 1041static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 1042{ 1043 struct ion_buffer *buffer = dmabuf->priv; 1044 return buffer->vaddr + offset * PAGE_SIZE; 1045} 1046 1047static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, 1048 void *ptr) 1049{ 1050 return; 1051} 1052 1053static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, 1054 size_t len, 1055 enum dma_data_direction direction) 1056{ 1057 struct ion_buffer *buffer = dmabuf->priv; 1058 void *vaddr; 1059 1060 if (!buffer->heap->ops->map_kernel) { 1061 pr_err("%s: map kernel is not implemented by this heap.\n", 1062 __func__); 1063 return -ENODEV; 1064 } 1065 1066 mutex_lock(&buffer->lock); 1067 vaddr = ion_buffer_kmap_get(buffer); 1068 mutex_unlock(&buffer->lock); 1069 return PTR_ERR_OR_ZERO(vaddr); 1070} 1071 1072static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, 1073 size_t len, 1074 enum dma_data_direction direction) 1075{ 1076 struct ion_buffer *buffer = dmabuf->priv; 1077 1078 mutex_lock(&buffer->lock); 1079 ion_buffer_kmap_put(buffer); 1080 mutex_unlock(&buffer->lock); 1081} 1082 1083static struct dma_buf_ops dma_buf_ops = { 1084 .map_dma_buf = ion_map_dma_buf, 1085 .unmap_dma_buf = ion_unmap_dma_buf, 1086 .mmap = ion_mmap, 1087 .release = ion_dma_buf_release, 1088 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 1089 .end_cpu_access = ion_dma_buf_end_cpu_access, 1090 .kmap_atomic = ion_dma_buf_kmap, 1091 .kunmap_atomic = ion_dma_buf_kunmap, 1092 .kmap = ion_dma_buf_kmap, 1093 .kunmap = ion_dma_buf_kunmap, 1094}; 1095 1096struct dma_buf *ion_share_dma_buf(struct ion_client *client, 1097 struct ion_handle *handle) 1098{ 1099 struct ion_buffer *buffer; 1100 struct dma_buf *dmabuf; 1101 bool valid_handle; 1102 1103 mutex_lock(&client->lock); 1104 valid_handle = ion_handle_validate(client, handle); 1105 if (!valid_handle) { 1106 WARN(1, "%s: invalid handle passed to share.\n", __func__); 1107 mutex_unlock(&client->lock); 1108 return ERR_PTR(-EINVAL); 1109 } 1110 buffer = handle->buffer; 1111 ion_buffer_get(buffer); 1112 mutex_unlock(&client->lock); 1113 1114 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); 1115 if (IS_ERR(dmabuf)) { 1116 ion_buffer_put(buffer); 1117 return dmabuf; 1118 } 1119 1120 return dmabuf; 1121} 1122EXPORT_SYMBOL(ion_share_dma_buf); 1123 1124int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) 1125{ 1126 struct dma_buf *dmabuf; 1127 int fd; 1128 1129 dmabuf = ion_share_dma_buf(client, handle); 1130 if (IS_ERR(dmabuf)) 1131 return PTR_ERR(dmabuf); 1132 1133 fd = dma_buf_fd(dmabuf, O_CLOEXEC); 1134 if (fd < 0) 1135 dma_buf_put(dmabuf); 1136 1137 return fd; 1138} 1139EXPORT_SYMBOL(ion_share_dma_buf_fd); 1140 1141struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) 1142{ 1143 struct dma_buf *dmabuf; 1144 struct ion_buffer *buffer; 1145 struct ion_handle *handle; 1146 int ret; 1147 1148 dmabuf = dma_buf_get(fd); 1149 if (IS_ERR(dmabuf)) 1150 return ERR_PTR(PTR_ERR(dmabuf)); 1151 /* if this memory came from ion */ 1152 1153 if (dmabuf->ops != &dma_buf_ops) { 1154 pr_err("%s: can not import dmabuf from another exporter\n", 1155 __func__); 1156 dma_buf_put(dmabuf); 1157 return ERR_PTR(-EINVAL); 1158 } 1159 buffer = dmabuf->priv; 1160 1161 mutex_lock(&client->lock); 1162 /* if a handle exists for this buffer just take a reference to it */ 1163 handle = ion_handle_lookup(client, buffer); 1164 if (!IS_ERR(handle)) { 1165 ion_handle_get(handle); 1166 mutex_unlock(&client->lock); 1167 goto end; 1168 } 1169 mutex_unlock(&client->lock); 1170 1171 handle = ion_handle_create(client, buffer); 1172 if (IS_ERR(handle)) 1173 goto end; 1174 1175 mutex_lock(&client->lock); 1176 ret = ion_handle_add(client, handle); 1177 mutex_unlock(&client->lock); 1178 if (ret) { 1179 ion_handle_put(handle); 1180 handle = ERR_PTR(ret); 1181 } 1182 1183end: 1184 dma_buf_put(dmabuf); 1185 return handle; 1186} 1187EXPORT_SYMBOL(ion_import_dma_buf); 1188 1189static int ion_sync_for_device(struct ion_client *client, int fd) 1190{ 1191 struct dma_buf *dmabuf; 1192 struct ion_buffer *buffer; 1193 1194 dmabuf = dma_buf_get(fd); 1195 if (IS_ERR(dmabuf)) 1196 return PTR_ERR(dmabuf); 1197 1198 /* if this memory came from ion */ 1199 if (dmabuf->ops != &dma_buf_ops) { 1200 pr_err("%s: can not sync dmabuf from another exporter\n", 1201 __func__); 1202 dma_buf_put(dmabuf); 1203 return -EINVAL; 1204 } 1205 buffer = dmabuf->priv; 1206 1207 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, 1208 buffer->sg_table->nents, DMA_BIDIRECTIONAL); 1209 dma_buf_put(dmabuf); 1210 return 0; 1211} 1212 1213/* fix up the cases where the ioctl direction bits are incorrect */ 1214static unsigned int ion_ioctl_dir(unsigned int cmd) 1215{ 1216 switch (cmd) { 1217 case ION_IOC_SYNC: 1218 case ION_IOC_FREE: 1219 case ION_IOC_CUSTOM: 1220 return _IOC_WRITE; 1221 default: 1222 return _IOC_DIR(cmd); 1223 } 1224} 1225 1226static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1227{ 1228 struct ion_client *client = filp->private_data; 1229 struct ion_device *dev = client->dev; 1230 struct ion_handle *cleanup_handle = NULL; 1231 int ret = 0; 1232 unsigned int dir; 1233 1234 union { 1235 struct ion_fd_data fd; 1236 struct ion_allocation_data allocation; 1237 struct ion_handle_data handle; 1238 struct ion_custom_data custom; 1239 } data; 1240 1241 dir = ion_ioctl_dir(cmd); 1242 1243 if (_IOC_SIZE(cmd) > sizeof(data)) 1244 return -EINVAL; 1245 1246 if (dir & _IOC_WRITE) 1247 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) 1248 return -EFAULT; 1249 1250 switch (cmd) { 1251 case ION_IOC_ALLOC: 1252 { 1253 struct ion_handle *handle; 1254 1255 handle = ion_alloc(client, data.allocation.len, 1256 data.allocation.align, 1257 data.allocation.heap_id_mask, 1258 data.allocation.flags); 1259 if (IS_ERR(handle)) 1260 return PTR_ERR(handle); 1261 1262 data.allocation.handle = handle->id; 1263 1264 cleanup_handle = handle; 1265 break; 1266 } 1267 case ION_IOC_FREE: 1268 { 1269 struct ion_handle *handle; 1270 1271 handle = ion_handle_get_by_id(client, data.handle.handle); 1272 if (IS_ERR(handle)) 1273 return PTR_ERR(handle); 1274 ion_free(client, handle); 1275 ion_handle_put(handle); 1276 break; 1277 } 1278 case ION_IOC_SHARE: 1279 case ION_IOC_MAP: 1280 { 1281 struct ion_handle *handle; 1282 1283 handle = ion_handle_get_by_id(client, data.handle.handle); 1284 if (IS_ERR(handle)) 1285 return PTR_ERR(handle); 1286 data.fd.fd = ion_share_dma_buf_fd(client, handle); 1287 ion_handle_put(handle); 1288 if (data.fd.fd < 0) 1289 ret = data.fd.fd; 1290 break; 1291 } 1292 case ION_IOC_IMPORT: 1293 { 1294 struct ion_handle *handle; 1295 handle = ion_import_dma_buf(client, data.fd.fd); 1296 if (IS_ERR(handle)) 1297 ret = PTR_ERR(handle); 1298 else 1299 data.handle.handle = handle->id; 1300 break; 1301 } 1302 case ION_IOC_SYNC: 1303 { 1304 ret = ion_sync_for_device(client, data.fd.fd); 1305 break; 1306 } 1307 case ION_IOC_CUSTOM: 1308 { 1309 if (!dev->custom_ioctl) 1310 return -ENOTTY; 1311 ret = dev->custom_ioctl(client, data.custom.cmd, 1312 data.custom.arg); 1313 break; 1314 } 1315 default: 1316 return -ENOTTY; 1317 } 1318 1319 if (dir & _IOC_READ) { 1320 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { 1321 if (cleanup_handle) 1322 ion_free(client, cleanup_handle); 1323 return -EFAULT; 1324 } 1325 } 1326 return ret; 1327} 1328 1329static int ion_release(struct inode *inode, struct file *file) 1330{ 1331 struct ion_client *client = file->private_data; 1332 1333 pr_debug("%s: %d\n", __func__, __LINE__); 1334 ion_client_destroy(client); 1335 return 0; 1336} 1337 1338static int ion_open(struct inode *inode, struct file *file) 1339{ 1340 struct miscdevice *miscdev = file->private_data; 1341 struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 1342 struct ion_client *client; 1343 char debug_name[64]; 1344 1345 pr_debug("%s: %d\n", __func__, __LINE__); 1346 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); 1347 client = ion_client_create(dev, debug_name); 1348 if (IS_ERR(client)) 1349 return PTR_ERR(client); 1350 file->private_data = client; 1351 1352 return 0; 1353} 1354 1355static const struct file_operations ion_fops = { 1356 .owner = THIS_MODULE, 1357 .open = ion_open, 1358 .release = ion_release, 1359 .unlocked_ioctl = ion_ioctl, 1360 .compat_ioctl = compat_ion_ioctl, 1361}; 1362 1363static size_t ion_debug_heap_total(struct ion_client *client, 1364 unsigned int id) 1365{ 1366 size_t size = 0; 1367 struct rb_node *n; 1368 1369 mutex_lock(&client->lock); 1370 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 1371 struct ion_handle *handle = rb_entry(n, 1372 struct ion_handle, 1373 node); 1374 if (handle->buffer->heap->id == id) 1375 size += handle->buffer->size; 1376 } 1377 mutex_unlock(&client->lock); 1378 return size; 1379} 1380 1381static int ion_debug_heap_show(struct seq_file *s, void *unused) 1382{ 1383 struct ion_heap *heap = s->private; 1384 struct ion_device *dev = heap->dev; 1385 struct rb_node *n; 1386 size_t total_size = 0; 1387 size_t total_orphaned_size = 0; 1388 1389 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); 1390 seq_printf(s, "----------------------------------------------------\n"); 1391 1392 for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 1393 struct ion_client *client = rb_entry(n, struct ion_client, 1394 node); 1395 size_t size = ion_debug_heap_total(client, heap->id); 1396 if (!size) 1397 continue; 1398 if (client->task) { 1399 char task_comm[TASK_COMM_LEN]; 1400 1401 get_task_comm(task_comm, client->task); 1402 seq_printf(s, "%16.s %16u %16zu\n", task_comm, 1403 client->pid, size); 1404 } else { 1405 seq_printf(s, "%16.s %16u %16zu\n", client->name, 1406 client->pid, size); 1407 } 1408 } 1409 seq_printf(s, "----------------------------------------------------\n"); 1410 seq_printf(s, "orphaned allocations (info is from last known client):\n"); 1411 mutex_lock(&dev->buffer_lock); 1412 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { 1413 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, 1414 node); 1415 if (buffer->heap->id != heap->id) 1416 continue; 1417 total_size += buffer->size; 1418 if (!buffer->handle_count) { 1419 seq_printf(s, "%16.s %16u %16zu %d %d\n", 1420 buffer->task_comm, buffer->pid, 1421 buffer->size, buffer->kmap_cnt, 1422 atomic_read(&buffer->ref.refcount)); 1423 total_orphaned_size += buffer->size; 1424 } 1425 } 1426 mutex_unlock(&dev->buffer_lock); 1427 seq_printf(s, "----------------------------------------------------\n"); 1428 seq_printf(s, "%16.s %16zu\n", "total orphaned", 1429 total_orphaned_size); 1430 seq_printf(s, "%16.s %16zu\n", "total ", total_size); 1431 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1432 seq_printf(s, "%16.s %16zu\n", "deferred free", 1433 heap->free_list_size); 1434 seq_printf(s, "----------------------------------------------------\n"); 1435 1436 if (heap->debug_show) 1437 heap->debug_show(heap, s, unused); 1438 1439 return 0; 1440} 1441 1442static int ion_debug_heap_open(struct inode *inode, struct file *file) 1443{ 1444 return single_open(file, ion_debug_heap_show, inode->i_private); 1445} 1446 1447static const struct file_operations debug_heap_fops = { 1448 .open = ion_debug_heap_open, 1449 .read = seq_read, 1450 .llseek = seq_lseek, 1451 .release = single_release, 1452}; 1453 1454#ifdef DEBUG_HEAP_SHRINKER 1455static int debug_shrink_set(void *data, u64 val) 1456{ 1457 struct ion_heap *heap = data; 1458 struct shrink_control sc; 1459 int objs; 1460 1461 sc.gfp_mask = -1; 1462 sc.nr_to_scan = 0; 1463 1464 if (!val) 1465 return 0; 1466 1467 objs = heap->shrinker.shrink(&heap->shrinker, &sc); 1468 sc.nr_to_scan = objs; 1469 1470 heap->shrinker.shrink(&heap->shrinker, &sc); 1471 return 0; 1472} 1473 1474static int debug_shrink_get(void *data, u64 *val) 1475{ 1476 struct ion_heap *heap = data; 1477 struct shrink_control sc; 1478 int objs; 1479 1480 sc.gfp_mask = -1; 1481 sc.nr_to_scan = 0; 1482 1483 objs = heap->shrinker.shrink(&heap->shrinker, &sc); 1484 *val = objs; 1485 return 0; 1486} 1487 1488DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, 1489 debug_shrink_set, "%llu\n"); 1490#endif 1491 1492void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 1493{ 1494 struct dentry *debug_file; 1495 1496 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || 1497 !heap->ops->unmap_dma) 1498 pr_err("%s: can not add heap with invalid ops struct.\n", 1499 __func__); 1500 1501 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1502 ion_heap_init_deferred_free(heap); 1503 1504 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) 1505 ion_heap_init_shrinker(heap); 1506 1507 heap->dev = dev; 1508 down_write(&dev->lock); 1509 /* use negative heap->id to reverse the priority -- when traversing 1510 the list later attempt higher id numbers first */ 1511 plist_node_init(&heap->node, -heap->id); 1512 plist_add(&heap->node, &dev->heaps); 1513 debug_file = debugfs_create_file(heap->name, 0664, 1514 dev->heaps_debug_root, heap, 1515 &debug_heap_fops); 1516 1517 if (!debug_file) { 1518 char buf[256], *path; 1519 path = dentry_path(dev->heaps_debug_root, buf, 256); 1520 pr_err("Failed to create heap debugfs at %s/%s\n", 1521 path, heap->name); 1522 } 1523 1524#ifdef DEBUG_HEAP_SHRINKER 1525 if (heap->shrinker.shrink) { 1526 char debug_name[64]; 1527 1528 snprintf(debug_name, 64, "%s_shrink", heap->name); 1529 debug_file = debugfs_create_file( 1530 debug_name, 0644, dev->heaps_debug_root, heap, 1531 &debug_shrink_fops); 1532 if (!debug_file) { 1533 char buf[256], *path; 1534 path = dentry_path(dev->heaps_debug_root, buf, 256); 1535 pr_err("Failed to create heap shrinker debugfs at %s/%s\n", 1536 path, debug_name); 1537 } 1538 } 1539#endif 1540 up_write(&dev->lock); 1541} 1542 1543struct ion_device *ion_device_create(long (*custom_ioctl) 1544 (struct ion_client *client, 1545 unsigned int cmd, 1546 unsigned long arg)) 1547{ 1548 struct ion_device *idev; 1549 int ret; 1550 1551 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1552 if (!idev) 1553 return ERR_PTR(-ENOMEM); 1554 1555 idev->dev.minor = MISC_DYNAMIC_MINOR; 1556 idev->dev.name = "ion"; 1557 idev->dev.fops = &ion_fops; 1558 idev->dev.parent = NULL; 1559 ret = misc_register(&idev->dev); 1560 if (ret) { 1561 pr_err("ion: failed to register misc device.\n"); 1562 return ERR_PTR(ret); 1563 } 1564 1565 idev->debug_root = debugfs_create_dir("ion", NULL); 1566 if (!idev->debug_root) { 1567 pr_err("ion: failed to create debugfs root directory.\n"); 1568 goto debugfs_done; 1569 } 1570 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); 1571 if (!idev->heaps_debug_root) { 1572 pr_err("ion: failed to create debugfs heaps directory.\n"); 1573 goto debugfs_done; 1574 } 1575 idev->clients_debug_root = debugfs_create_dir("clients", 1576 idev->debug_root); 1577 if (!idev->clients_debug_root) 1578 pr_err("ion: failed to create debugfs clients directory.\n"); 1579 1580debugfs_done: 1581 1582 idev->custom_ioctl = custom_ioctl; 1583 idev->buffers = RB_ROOT; 1584 mutex_init(&idev->buffer_lock); 1585 init_rwsem(&idev->lock); 1586 plist_head_init(&idev->heaps); 1587 idev->clients = RB_ROOT; 1588 return idev; 1589} 1590 1591void ion_device_destroy(struct ion_device *dev) 1592{ 1593 misc_deregister(&dev->dev); 1594 debugfs_remove_recursive(dev->debug_root); 1595 /* XXX need to free the heaps and clients ? */ 1596 kfree(dev); 1597} 1598 1599void __init ion_reserve(struct ion_platform_data *data) 1600{ 1601 int i; 1602 1603 for (i = 0; i < data->nr; i++) { 1604 if (data->heaps[i].size == 0) 1605 continue; 1606 1607 if (data->heaps[i].base == 0) { 1608 phys_addr_t paddr; 1609 paddr = memblock_alloc_base(data->heaps[i].size, 1610 data->heaps[i].align, 1611 MEMBLOCK_ALLOC_ANYWHERE); 1612 if (!paddr) { 1613 pr_err("%s: error allocating memblock for heap %d\n", 1614 __func__, i); 1615 continue; 1616 } 1617 data->heaps[i].base = paddr; 1618 } else { 1619 int ret = memblock_reserve(data->heaps[i].base, 1620 data->heaps[i].size); 1621 if (ret) 1622 pr_err("memblock reserve of %zx@%lx failed\n", 1623 data->heaps[i].size, 1624 data->heaps[i].base); 1625 } 1626 pr_info("%s: %s reserved base %lx size %zu\n", __func__, 1627 data->heaps[i].name, 1628 data->heaps[i].base, 1629 data->heaps[i].size); 1630 } 1631} 1632