ion.c revision 056be396881d6cc69e5946a3b4bfbd0f58d8c45a
1/* 2 * drivers/staging/android/ion/ion.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/device.h> 18#include <linux/file.h> 19#include <linux/fs.h> 20#include <linux/anon_inodes.h> 21#include <linux/list.h> 22#include <linux/memblock.h> 23#include <linux/miscdevice.h> 24#include <linux/export.h> 25#include <linux/mm.h> 26#include <linux/mm_types.h> 27#include <linux/rbtree.h> 28#include <linux/sched.h> 29#include <linux/slab.h> 30#include <linux/seq_file.h> 31#include <linux/uaccess.h> 32#include <linux/debugfs.h> 33#include <linux/dma-buf.h> 34 35#include "ion.h" 36#include "ion_priv.h" 37#define DEBUG 38 39/** 40 * struct ion_device - the metadata of the ion device node 41 * @dev: the actual misc device 42 * @buffers: an rb tree of all the existing buffers 43 * @lock: lock protecting the buffers & heaps trees 44 * @heaps: list of all the heaps in the system 45 * @user_clients: list of all the clients created from userspace 46 */ 47struct ion_device { 48 struct miscdevice dev; 49 struct rb_root buffers; 50 struct mutex lock; 51 struct rb_root heaps; 52 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, 53 unsigned long arg); 54 struct rb_root clients; 55 struct dentry *debug_root; 56}; 57 58/** 59 * struct ion_client - a process/hw block local address space 60 * @node: node in the tree of all clients 61 * @dev: backpointer to ion device 62 * @handles: an rb tree of all the handles in this client 63 * @lock: lock protecting the tree of handles 64 * @heap_mask: mask of all supported heaps 65 * @name: used for debugging 66 * @task: used for debugging 67 * 68 * A client represents a list of buffers this client may access. 69 * The mutex stored here is used to protect both handles tree 70 * as well as the handles themselves, and should be held while modifying either. 71 */ 72struct ion_client { 73 struct rb_node node; 74 struct ion_device *dev; 75 struct rb_root handles; 76 struct mutex lock; 77 unsigned int heap_mask; 78 const char *name; 79 struct task_struct *task; 80 pid_t pid; 81 struct dentry *debug_root; 82}; 83 84/** 85 * ion_handle - a client local reference to a buffer 86 * @ref: reference count 87 * @client: back pointer to the client the buffer resides in 88 * @buffer: pointer to the buffer 89 * @node: node in the client's handle rbtree 90 * @kmap_cnt: count of times this client has mapped to kernel 91 * @dmap_cnt: count of times this client has mapped for dma 92 * 93 * Modifications to node, map_cnt or mapping should be protected by the 94 * lock in the client. Other fields are never changed after initialization. 95 */ 96struct ion_handle { 97 struct kref ref; 98 struct ion_client *client; 99 struct ion_buffer *buffer; 100 struct rb_node node; 101 unsigned int kmap_cnt; 102}; 103 104/* this function should only be called while dev->lock is held */ 105static void ion_buffer_add(struct ion_device *dev, 106 struct ion_buffer *buffer) 107{ 108 struct rb_node **p = &dev->buffers.rb_node; 109 struct rb_node *parent = NULL; 110 struct ion_buffer *entry; 111 112 while (*p) { 113 parent = *p; 114 entry = rb_entry(parent, struct ion_buffer, node); 115 116 if (buffer < entry) { 117 p = &(*p)->rb_left; 118 } else if (buffer > entry) { 119 p = &(*p)->rb_right; 120 } else { 121 pr_err("%s: buffer already found.", __func__); 122 BUG(); 123 } 124 } 125 126 rb_link_node(&buffer->node, parent, p); 127 rb_insert_color(&buffer->node, &dev->buffers); 128} 129 130/* this function should only be called while dev->lock is held */ 131static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 132 struct ion_device *dev, 133 unsigned long len, 134 unsigned long align, 135 unsigned long flags) 136{ 137 struct ion_buffer *buffer; 138 struct sg_table *table; 139 int ret; 140 141 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 142 if (!buffer) 143 return ERR_PTR(-ENOMEM); 144 145 buffer->heap = heap; 146 kref_init(&buffer->ref); 147 148 ret = heap->ops->allocate(heap, buffer, len, align, flags); 149 if (ret) { 150 kfree(buffer); 151 return ERR_PTR(ret); 152 } 153 154 buffer->dev = dev; 155 buffer->size = len; 156 157 table = buffer->heap->ops->map_dma(buffer->heap, buffer); 158 if (IS_ERR_OR_NULL(table)) { 159 heap->ops->free(buffer); 160 kfree(buffer); 161 return ERR_PTR(PTR_ERR(table)); 162 } 163 buffer->sg_table = table; 164 165 mutex_init(&buffer->lock); 166 ion_buffer_add(dev, buffer); 167 return buffer; 168} 169 170static void ion_buffer_destroy(struct kref *kref) 171{ 172 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 173 struct ion_device *dev = buffer->dev; 174 175 if (WARN_ON(buffer->kmap_cnt > 0)) 176 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 177 178 buffer->heap->ops->unmap_dma(buffer->heap, buffer); 179 buffer->heap->ops->free(buffer); 180 mutex_lock(&dev->lock); 181 rb_erase(&buffer->node, &dev->buffers); 182 mutex_unlock(&dev->lock); 183 kfree(buffer); 184} 185 186static void ion_buffer_get(struct ion_buffer *buffer) 187{ 188 kref_get(&buffer->ref); 189} 190 191static int ion_buffer_put(struct ion_buffer *buffer) 192{ 193 return kref_put(&buffer->ref, ion_buffer_destroy); 194} 195 196static struct ion_handle *ion_handle_create(struct ion_client *client, 197 struct ion_buffer *buffer) 198{ 199 struct ion_handle *handle; 200 201 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 202 if (!handle) 203 return ERR_PTR(-ENOMEM); 204 kref_init(&handle->ref); 205 RB_CLEAR_NODE(&handle->node); 206 handle->client = client; 207 ion_buffer_get(buffer); 208 handle->buffer = buffer; 209 210 return handle; 211} 212 213static void ion_handle_kmap_put(struct ion_handle *); 214 215static void ion_handle_destroy(struct kref *kref) 216{ 217 struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 218 struct ion_client *client = handle->client; 219 struct ion_buffer *buffer = handle->buffer; 220 221 mutex_lock(&client->lock); 222 223 mutex_lock(&buffer->lock); 224 while (buffer->kmap_cnt) 225 ion_handle_kmap_put(handle); 226 mutex_unlock(&buffer->lock); 227 228 if (!RB_EMPTY_NODE(&handle->node)) 229 rb_erase(&handle->node, &client->handles); 230 mutex_unlock(&client->lock); 231 232 ion_buffer_put(buffer); 233 kfree(handle); 234} 235 236struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) 237{ 238 return handle->buffer; 239} 240 241static void ion_handle_get(struct ion_handle *handle) 242{ 243 kref_get(&handle->ref); 244} 245 246static int ion_handle_put(struct ion_handle *handle) 247{ 248 return kref_put(&handle->ref, ion_handle_destroy); 249} 250 251static struct ion_handle *ion_handle_lookup(struct ion_client *client, 252 struct ion_buffer *buffer) 253{ 254 struct rb_node *n; 255 256 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 257 struct ion_handle *handle = rb_entry(n, struct ion_handle, 258 node); 259 if (handle->buffer == buffer) 260 return handle; 261 } 262 return NULL; 263} 264 265static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) 266{ 267 struct rb_node *n = client->handles.rb_node; 268 269 while (n) { 270 struct ion_handle *handle_node = rb_entry(n, struct ion_handle, 271 node); 272 if (handle < handle_node) 273 n = n->rb_left; 274 else if (handle > handle_node) 275 n = n->rb_right; 276 else 277 return true; 278 } 279 return false; 280} 281 282static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) 283{ 284 struct rb_node **p = &client->handles.rb_node; 285 struct rb_node *parent = NULL; 286 struct ion_handle *entry; 287 288 while (*p) { 289 parent = *p; 290 entry = rb_entry(parent, struct ion_handle, node); 291 292 if (handle < entry) 293 p = &(*p)->rb_left; 294 else if (handle > entry) 295 p = &(*p)->rb_right; 296 else 297 WARN(1, "%s: buffer already found.", __func__); 298 } 299 300 rb_link_node(&handle->node, parent, p); 301 rb_insert_color(&handle->node, &client->handles); 302} 303 304struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 305 size_t align, unsigned int flags) 306{ 307 struct rb_node *n; 308 struct ion_handle *handle; 309 struct ion_device *dev = client->dev; 310 struct ion_buffer *buffer = NULL; 311 312 /* 313 * traverse the list of heaps available in this system in priority 314 * order. If the heap type is supported by the client, and matches the 315 * request of the caller allocate from it. Repeat until allocate has 316 * succeeded or all heaps have been tried 317 */ 318 if (WARN_ON(!len)) 319 return ERR_PTR(-EINVAL); 320 321 len = PAGE_ALIGN(len); 322 323 mutex_lock(&dev->lock); 324 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { 325 struct ion_heap *heap = rb_entry(n, struct ion_heap, node); 326 /* if the client doesn't support this heap type */ 327 if (!((1 << heap->type) & client->heap_mask)) 328 continue; 329 /* if the caller didn't specify this heap type */ 330 if (!((1 << heap->id) & flags)) 331 continue; 332 buffer = ion_buffer_create(heap, dev, len, align, flags); 333 if (!IS_ERR_OR_NULL(buffer)) 334 break; 335 } 336 mutex_unlock(&dev->lock); 337 338 if (buffer == NULL) 339 return ERR_PTR(-ENODEV); 340 341 if (IS_ERR(buffer)) 342 return ERR_PTR(PTR_ERR(buffer)); 343 344 handle = ion_handle_create(client, buffer); 345 346 /* 347 * ion_buffer_create will create a buffer with a ref_cnt of 1, 348 * and ion_handle_create will take a second reference, drop one here 349 */ 350 ion_buffer_put(buffer); 351 352 if (!IS_ERR(handle)) { 353 mutex_lock(&client->lock); 354 ion_handle_add(client, handle); 355 mutex_unlock(&client->lock); 356 } 357 358 359 return handle; 360} 361 362void ion_free(struct ion_client *client, struct ion_handle *handle) 363{ 364 bool valid_handle; 365 366 BUG_ON(client != handle->client); 367 368 mutex_lock(&client->lock); 369 valid_handle = ion_handle_validate(client, handle); 370 mutex_unlock(&client->lock); 371 372 if (!valid_handle) { 373 WARN("%s: invalid handle passed to free.\n", __func__); 374 return; 375 } 376 ion_handle_put(handle); 377} 378 379int ion_phys(struct ion_client *client, struct ion_handle *handle, 380 ion_phys_addr_t *addr, size_t *len) 381{ 382 struct ion_buffer *buffer; 383 int ret; 384 385 mutex_lock(&client->lock); 386 if (!ion_handle_validate(client, handle)) { 387 mutex_unlock(&client->lock); 388 return -EINVAL; 389 } 390 391 buffer = handle->buffer; 392 393 if (!buffer->heap->ops->phys) { 394 pr_err("%s: ion_phys is not implemented by this heap.\n", 395 __func__); 396 mutex_unlock(&client->lock); 397 return -ENODEV; 398 } 399 mutex_unlock(&client->lock); 400 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); 401 return ret; 402} 403 404static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 405{ 406 void *vaddr; 407 408 if (buffer->kmap_cnt) { 409 buffer->kmap_cnt++; 410 return buffer->vaddr; 411 } 412 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 413 if (IS_ERR_OR_NULL(vaddr)) 414 return vaddr; 415 buffer->vaddr = vaddr; 416 buffer->kmap_cnt++; 417 return vaddr; 418} 419 420static void *ion_handle_kmap_get(struct ion_handle *handle) 421{ 422 struct ion_buffer *buffer = handle->buffer; 423 void *vaddr; 424 425 if (handle->kmap_cnt) { 426 handle->kmap_cnt++; 427 return buffer->vaddr; 428 } 429 vaddr = ion_buffer_kmap_get(buffer); 430 if (IS_ERR_OR_NULL(vaddr)) 431 return vaddr; 432 handle->kmap_cnt++; 433 return vaddr; 434} 435 436static void ion_buffer_kmap_put(struct ion_buffer *buffer) 437{ 438 buffer->kmap_cnt--; 439 if (!buffer->kmap_cnt) { 440 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 441 buffer->vaddr = NULL; 442 } 443} 444 445static void ion_handle_kmap_put(struct ion_handle *handle) 446{ 447 struct ion_buffer *buffer = handle->buffer; 448 449 handle->kmap_cnt--; 450 if (!handle->kmap_cnt) 451 ion_buffer_kmap_put(buffer); 452} 453 454void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 455{ 456 struct ion_buffer *buffer; 457 void *vaddr; 458 459 mutex_lock(&client->lock); 460 if (!ion_handle_validate(client, handle)) { 461 pr_err("%s: invalid handle passed to map_kernel.\n", 462 __func__); 463 mutex_unlock(&client->lock); 464 return ERR_PTR(-EINVAL); 465 } 466 467 buffer = handle->buffer; 468 469 if (!handle->buffer->heap->ops->map_kernel) { 470 pr_err("%s: map_kernel is not implemented by this heap.\n", 471 __func__); 472 mutex_unlock(&client->lock); 473 return ERR_PTR(-ENODEV); 474 } 475 476 mutex_lock(&buffer->lock); 477 vaddr = ion_handle_kmap_get(handle); 478 mutex_unlock(&buffer->lock); 479 mutex_unlock(&client->lock); 480 return vaddr; 481} 482 483void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 484{ 485 struct ion_buffer *buffer; 486 487 mutex_lock(&client->lock); 488 buffer = handle->buffer; 489 mutex_lock(&buffer->lock); 490 ion_handle_kmap_put(handle); 491 mutex_unlock(&buffer->lock); 492 mutex_unlock(&client->lock); 493} 494 495static int ion_debug_client_show(struct seq_file *s, void *unused) 496{ 497 struct ion_client *client = s->private; 498 struct rb_node *n; 499 size_t sizes[ION_NUM_HEAPS] = {0}; 500 const char *names[ION_NUM_HEAPS] = {0}; 501 int i; 502 503 mutex_lock(&client->lock); 504 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 505 struct ion_handle *handle = rb_entry(n, struct ion_handle, 506 node); 507 enum ion_heap_type type = handle->buffer->heap->type; 508 509 if (!names[type]) 510 names[type] = handle->buffer->heap->name; 511 sizes[type] += handle->buffer->size; 512 } 513 mutex_unlock(&client->lock); 514 515 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 516 for (i = 0; i < ION_NUM_HEAPS; i++) { 517 if (!names[i]) 518 continue; 519 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); 520 } 521 return 0; 522} 523 524static int ion_debug_client_open(struct inode *inode, struct file *file) 525{ 526 return single_open(file, ion_debug_client_show, inode->i_private); 527} 528 529static const struct file_operations debug_client_fops = { 530 .open = ion_debug_client_open, 531 .read = seq_read, 532 .llseek = seq_lseek, 533 .release = single_release, 534}; 535 536struct ion_client *ion_client_create(struct ion_device *dev, 537 unsigned int heap_mask, 538 const char *name) 539{ 540 struct ion_client *client; 541 struct task_struct *task; 542 struct rb_node **p; 543 struct rb_node *parent = NULL; 544 struct ion_client *entry; 545 char debug_name[64]; 546 pid_t pid; 547 548 get_task_struct(current->group_leader); 549 task_lock(current->group_leader); 550 pid = task_pid_nr(current->group_leader); 551 /* don't bother to store task struct for kernel threads, 552 they can't be killed anyway */ 553 if (current->group_leader->flags & PF_KTHREAD) { 554 put_task_struct(current->group_leader); 555 task = NULL; 556 } else { 557 task = current->group_leader; 558 } 559 task_unlock(current->group_leader); 560 561 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 562 if (!client) { 563 if (task) 564 put_task_struct(current->group_leader); 565 return ERR_PTR(-ENOMEM); 566 } 567 568 client->dev = dev; 569 client->handles = RB_ROOT; 570 mutex_init(&client->lock); 571 client->name = name; 572 client->heap_mask = heap_mask; 573 client->task = task; 574 client->pid = pid; 575 576 mutex_lock(&dev->lock); 577 p = &dev->clients.rb_node; 578 while (*p) { 579 parent = *p; 580 entry = rb_entry(parent, struct ion_client, node); 581 582 if (client < entry) 583 p = &(*p)->rb_left; 584 else if (client > entry) 585 p = &(*p)->rb_right; 586 } 587 rb_link_node(&client->node, parent, p); 588 rb_insert_color(&client->node, &dev->clients); 589 590 snprintf(debug_name, 64, "%u", client->pid); 591 client->debug_root = debugfs_create_file(debug_name, 0664, 592 dev->debug_root, client, 593 &debug_client_fops); 594 mutex_unlock(&dev->lock); 595 596 return client; 597} 598 599void ion_client_destroy(struct ion_client *client) 600{ 601 struct ion_device *dev = client->dev; 602 struct rb_node *n; 603 604 pr_debug("%s: %d\n", __func__, __LINE__); 605 while ((n = rb_first(&client->handles))) { 606 struct ion_handle *handle = rb_entry(n, struct ion_handle, 607 node); 608 ion_handle_destroy(&handle->ref); 609 } 610 mutex_lock(&dev->lock); 611 if (client->task) 612 put_task_struct(client->task); 613 rb_erase(&client->node, &dev->clients); 614 debugfs_remove_recursive(client->debug_root); 615 mutex_unlock(&dev->lock); 616 617 kfree(client); 618} 619 620struct sg_table *ion_sg_table(struct ion_client *client, 621 struct ion_handle *handle) 622{ 623 struct ion_buffer *buffer; 624 struct sg_table *table; 625 626 mutex_lock(&client->lock); 627 if (!ion_handle_validate(client, handle)) { 628 pr_err("%s: invalid handle passed to map_dma.\n", 629 __func__); 630 mutex_unlock(&client->lock); 631 return ERR_PTR(-EINVAL); 632 } 633 buffer = handle->buffer; 634 table = buffer->sg_table; 635 mutex_unlock(&client->lock); 636 return table; 637} 638 639static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 640 enum dma_data_direction direction) 641{ 642 struct dma_buf *dmabuf = attachment->dmabuf; 643 struct ion_buffer *buffer = dmabuf->priv; 644 645 return buffer->sg_table; 646} 647 648static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, 649 struct sg_table *table, 650 enum dma_data_direction direction) 651{ 652} 653 654static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 655{ 656 struct ion_buffer *buffer = dmabuf->priv; 657 int ret; 658 659 if (!buffer->heap->ops->map_user) { 660 pr_err("%s: this heap does not define a method for mapping " 661 "to userspace\n", __func__); 662 return -EINVAL; 663 } 664 665 mutex_lock(&buffer->lock); 666 /* now map it to userspace */ 667 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 668 mutex_unlock(&buffer->lock); 669 670 if (ret) 671 pr_err("%s: failure mapping buffer to userspace\n", 672 __func__); 673 674 return ret; 675} 676 677static void ion_dma_buf_release(struct dma_buf *dmabuf) 678{ 679 struct ion_buffer *buffer = dmabuf->priv; 680 ion_buffer_put(buffer); 681} 682 683static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 684{ 685 struct ion_buffer *buffer = dmabuf->priv; 686 return buffer->vaddr + offset; 687} 688 689static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, 690 void *ptr) 691{ 692 return; 693} 694 695static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, 696 size_t len, 697 enum dma_data_direction direction) 698{ 699 struct ion_buffer *buffer = dmabuf->priv; 700 void *vaddr; 701 702 if (!buffer->heap->ops->map_kernel) { 703 pr_err("%s: map kernel is not implemented by this heap.\n", 704 __func__); 705 return -ENODEV; 706 } 707 708 mutex_lock(&buffer->lock); 709 vaddr = ion_buffer_kmap_get(buffer); 710 mutex_unlock(&buffer->lock); 711 if (IS_ERR(vaddr)) 712 return PTR_ERR(vaddr); 713 if (!vaddr) 714 return -ENOMEM; 715 return 0; 716} 717 718static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, 719 size_t len, 720 enum dma_data_direction direction) 721{ 722 struct ion_buffer *buffer = dmabuf->priv; 723 724 mutex_lock(&buffer->lock); 725 ion_buffer_kmap_put(buffer); 726 mutex_unlock(&buffer->lock); 727} 728 729struct dma_buf_ops dma_buf_ops = { 730 .map_dma_buf = ion_map_dma_buf, 731 .unmap_dma_buf = ion_unmap_dma_buf, 732 .mmap = ion_mmap, 733 .release = ion_dma_buf_release, 734 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 735 .end_cpu_access = ion_dma_buf_end_cpu_access, 736 .kmap_atomic = ion_dma_buf_kmap, 737 .kunmap_atomic = ion_dma_buf_kunmap, 738 .kmap = ion_dma_buf_kmap, 739 .kunmap = ion_dma_buf_kunmap, 740}; 741 742int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) 743{ 744 struct ion_buffer *buffer; 745 struct dma_buf *dmabuf; 746 bool valid_handle; 747 int fd; 748 749 mutex_lock(&client->lock); 750 valid_handle = ion_handle_validate(client, handle); 751 mutex_unlock(&client->lock); 752 if (!valid_handle) { 753 WARN("%s: invalid handle passed to share.\n", __func__); 754 return -EINVAL; 755 } 756 757 buffer = handle->buffer; 758 ion_buffer_get(buffer); 759 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); 760 if (IS_ERR(dmabuf)) { 761 ion_buffer_put(buffer); 762 return PTR_ERR(dmabuf); 763 } 764 fd = dma_buf_fd(dmabuf, O_CLOEXEC); 765 if (fd < 0) { 766 dma_buf_put(dmabuf); 767 ion_buffer_put(buffer); 768 } 769 return fd; 770} 771 772struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) 773{ 774 struct dma_buf *dmabuf; 775 struct ion_buffer *buffer; 776 struct ion_handle *handle; 777 778 dmabuf = dma_buf_get(fd); 779 if (IS_ERR_OR_NULL(dmabuf)) 780 return ERR_PTR(PTR_ERR(dmabuf)); 781 /* if this memory came from ion */ 782 783 if (dmabuf->ops != &dma_buf_ops) { 784 pr_err("%s: can not import dmabuf from another exporter\n", 785 __func__); 786 dma_buf_put(dmabuf); 787 return ERR_PTR(-EINVAL); 788 } 789 buffer = dmabuf->priv; 790 791 mutex_lock(&client->lock); 792 /* if a handle exists for this buffer just take a reference to it */ 793 handle = ion_handle_lookup(client, buffer); 794 if (!IS_ERR_OR_NULL(handle)) { 795 ion_handle_get(handle); 796 goto end; 797 } 798 handle = ion_handle_create(client, buffer); 799 if (IS_ERR_OR_NULL(handle)) 800 goto end; 801 ion_handle_add(client, handle); 802end: 803 mutex_unlock(&client->lock); 804 dma_buf_put(dmabuf); 805 return handle; 806} 807 808static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 809{ 810 struct ion_client *client = filp->private_data; 811 812 switch (cmd) { 813 case ION_IOC_ALLOC: 814 { 815 struct ion_allocation_data data; 816 817 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 818 return -EFAULT; 819 data.handle = ion_alloc(client, data.len, data.align, 820 data.flags); 821 822 if (IS_ERR(data.handle)) 823 return PTR_ERR(data.handle); 824 825 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 826 ion_free(client, data.handle); 827 return -EFAULT; 828 } 829 break; 830 } 831 case ION_IOC_FREE: 832 { 833 struct ion_handle_data data; 834 bool valid; 835 836 if (copy_from_user(&data, (void __user *)arg, 837 sizeof(struct ion_handle_data))) 838 return -EFAULT; 839 mutex_lock(&client->lock); 840 valid = ion_handle_validate(client, data.handle); 841 mutex_unlock(&client->lock); 842 if (!valid) 843 return -EINVAL; 844 ion_free(client, data.handle); 845 break; 846 } 847 case ION_IOC_SHARE: 848 { 849 struct ion_fd_data data; 850 851 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 852 return -EFAULT; 853 data.fd = ion_share_dma_buf(client, data.handle); 854 if (copy_to_user((void __user *)arg, &data, sizeof(data))) 855 return -EFAULT; 856 break; 857 } 858 case ION_IOC_IMPORT: 859 { 860 struct ion_fd_data data; 861 if (copy_from_user(&data, (void __user *)arg, 862 sizeof(struct ion_fd_data))) 863 return -EFAULT; 864 data.handle = ion_import_dma_buf(client, data.fd); 865 if (IS_ERR(data.handle)) 866 data.handle = NULL; 867 if (copy_to_user((void __user *)arg, &data, 868 sizeof(struct ion_fd_data))) 869 return -EFAULT; 870 break; 871 } 872 case ION_IOC_CUSTOM: 873 { 874 struct ion_device *dev = client->dev; 875 struct ion_custom_data data; 876 877 if (!dev->custom_ioctl) 878 return -ENOTTY; 879 if (copy_from_user(&data, (void __user *)arg, 880 sizeof(struct ion_custom_data))) 881 return -EFAULT; 882 return dev->custom_ioctl(client, data.cmd, data.arg); 883 } 884 default: 885 return -ENOTTY; 886 } 887 return 0; 888} 889 890static int ion_release(struct inode *inode, struct file *file) 891{ 892 struct ion_client *client = file->private_data; 893 894 pr_debug("%s: %d\n", __func__, __LINE__); 895 ion_client_destroy(client); 896 return 0; 897} 898 899static int ion_open(struct inode *inode, struct file *file) 900{ 901 struct miscdevice *miscdev = file->private_data; 902 struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 903 struct ion_client *client; 904 905 pr_debug("%s: %d\n", __func__, __LINE__); 906 client = ion_client_create(dev, -1, "user"); 907 if (IS_ERR_OR_NULL(client)) 908 return PTR_ERR(client); 909 file->private_data = client; 910 911 return 0; 912} 913 914static const struct file_operations ion_fops = { 915 .owner = THIS_MODULE, 916 .open = ion_open, 917 .release = ion_release, 918 .unlocked_ioctl = ion_ioctl, 919}; 920 921static size_t ion_debug_heap_total(struct ion_client *client, 922 enum ion_heap_type type) 923{ 924 size_t size = 0; 925 struct rb_node *n; 926 927 mutex_lock(&client->lock); 928 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 929 struct ion_handle *handle = rb_entry(n, 930 struct ion_handle, 931 node); 932 if (handle->buffer->heap->type == type) 933 size += handle->buffer->size; 934 } 935 mutex_unlock(&client->lock); 936 return size; 937} 938 939static int ion_debug_heap_show(struct seq_file *s, void *unused) 940{ 941 struct ion_heap *heap = s->private; 942 struct ion_device *dev = heap->dev; 943 struct rb_node *n; 944 945 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); 946 947 for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 948 struct ion_client *client = rb_entry(n, struct ion_client, 949 node); 950 size_t size = ion_debug_heap_total(client, heap->type); 951 if (!size) 952 continue; 953 if (client->task) { 954 char task_comm[TASK_COMM_LEN]; 955 956 get_task_comm(task_comm, client->task); 957 seq_printf(s, "%16.s %16u %16u\n", task_comm, 958 client->pid, size); 959 } else { 960 seq_printf(s, "%16.s %16u %16u\n", client->name, 961 client->pid, size); 962 } 963 } 964 return 0; 965} 966 967static int ion_debug_heap_open(struct inode *inode, struct file *file) 968{ 969 return single_open(file, ion_debug_heap_show, inode->i_private); 970} 971 972static const struct file_operations debug_heap_fops = { 973 .open = ion_debug_heap_open, 974 .read = seq_read, 975 .llseek = seq_lseek, 976 .release = single_release, 977}; 978 979void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 980{ 981 struct rb_node **p = &dev->heaps.rb_node; 982 struct rb_node *parent = NULL; 983 struct ion_heap *entry; 984 985 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || 986 !heap->ops->unmap_dma) 987 pr_err("%s: can not add heap with invalid ops struct.\n", 988 __func__); 989 990 heap->dev = dev; 991 mutex_lock(&dev->lock); 992 while (*p) { 993 parent = *p; 994 entry = rb_entry(parent, struct ion_heap, node); 995 996 if (heap->id < entry->id) { 997 p = &(*p)->rb_left; 998 } else if (heap->id > entry->id ) { 999 p = &(*p)->rb_right; 1000 } else { 1001 pr_err("%s: can not insert multiple heaps with " 1002 "id %d\n", __func__, heap->id); 1003 goto end; 1004 } 1005 } 1006 1007 rb_link_node(&heap->node, parent, p); 1008 rb_insert_color(&heap->node, &dev->heaps); 1009 debugfs_create_file(heap->name, 0664, dev->debug_root, heap, 1010 &debug_heap_fops); 1011end: 1012 mutex_unlock(&dev->lock); 1013} 1014 1015struct ion_device *ion_device_create(long (*custom_ioctl) 1016 (struct ion_client *client, 1017 unsigned int cmd, 1018 unsigned long arg)) 1019{ 1020 struct ion_device *idev; 1021 int ret; 1022 1023 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1024 if (!idev) 1025 return ERR_PTR(-ENOMEM); 1026 1027 idev->dev.minor = MISC_DYNAMIC_MINOR; 1028 idev->dev.name = "ion"; 1029 idev->dev.fops = &ion_fops; 1030 idev->dev.parent = NULL; 1031 ret = misc_register(&idev->dev); 1032 if (ret) { 1033 pr_err("ion: failed to register misc device.\n"); 1034 return ERR_PTR(ret); 1035 } 1036 1037 idev->debug_root = debugfs_create_dir("ion", NULL); 1038 if (IS_ERR_OR_NULL(idev->debug_root)) 1039 pr_err("ion: failed to create debug files.\n"); 1040 1041 idev->custom_ioctl = custom_ioctl; 1042 idev->buffers = RB_ROOT; 1043 mutex_init(&idev->lock); 1044 idev->heaps = RB_ROOT; 1045 idev->clients = RB_ROOT; 1046 return idev; 1047} 1048 1049void ion_device_destroy(struct ion_device *dev) 1050{ 1051 misc_deregister(&dev->dev); 1052 /* XXX need to free the heaps and clients ? */ 1053 kfree(dev); 1054} 1055 1056void __init ion_reserve(struct ion_platform_data *data) 1057{ 1058 int i, ret; 1059 1060 for (i = 0; i < data->nr; i++) { 1061 if (data->heaps[i].size == 0) 1062 continue; 1063 ret = memblock_reserve(data->heaps[i].base, 1064 data->heaps[i].size); 1065 if (ret) 1066 pr_err("memblock reserve of %x@%lx failed\n", 1067 data->heaps[i].size, 1068 data->heaps[i].base); 1069 } 1070} 1071