ion.c revision a14baf71b91845a7f68438d48f462cb754699ae2
1/* 2 3 * drivers/staging/android/ion/ion.c 4 * 5 * Copyright (C) 2011 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#include <linux/device.h> 19#include <linux/file.h> 20#include <linux/freezer.h> 21#include <linux/fs.h> 22#include <linux/anon_inodes.h> 23#include <linux/kthread.h> 24#include <linux/list.h> 25#include <linux/memblock.h> 26#include <linux/miscdevice.h> 27#include <linux/export.h> 28#include <linux/mm.h> 29#include <linux/mm_types.h> 30#include <linux/rbtree.h> 31#include <linux/slab.h> 32#include <linux/seq_file.h> 33#include <linux/uaccess.h> 34#include <linux/vmalloc.h> 35#include <linux/debugfs.h> 36#include <linux/dma-buf.h> 37#include <linux/idr.h> 38 39#include "ion.h" 40#include "ion_priv.h" 41#include "compat_ion.h" 42 43/** 44 * struct ion_device - the metadata of the ion device node 45 * @dev: the actual misc device 46 * @buffers: an rb tree of all the existing buffers 47 * @buffer_lock: lock protecting the tree of buffers 48 * @lock: rwsem protecting the tree of heaps and clients 49 * @heaps: list of all the heaps in the system 50 * @user_clients: list of all the clients created from userspace 51 */ 52struct ion_device { 53 struct miscdevice dev; 54 struct rb_root buffers; 55 struct mutex buffer_lock; 56 struct rw_semaphore lock; 57 struct plist_head heaps; 58 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, 59 unsigned long arg); 60 struct rb_root clients; 61 struct dentry *debug_root; 62}; 63 64/** 65 * struct ion_client - a process/hw block local address space 66 * @node: node in the tree of all clients 67 * @dev: backpointer to ion device 68 * @handles: an rb tree of all the handles in this client 69 * @idr: an idr space for allocating handle ids 70 * @lock: lock protecting the tree of handles 71 * @name: used for debugging 72 * @task: used for debugging 73 * 74 * A client represents a list of buffers this client may access. 75 * The mutex stored here is used to protect both handles tree 76 * as well as the handles themselves, and should be held while modifying either. 77 */ 78struct ion_client { 79 struct rb_node node; 80 struct ion_device *dev; 81 struct rb_root handles; 82 struct idr idr; 83 struct mutex lock; 84 const char *name; 85 struct task_struct *task; 86 pid_t pid; 87 struct dentry *debug_root; 88}; 89 90/** 91 * ion_handle - a client local reference to a buffer 92 * @ref: reference count 93 * @client: back pointer to the client the buffer resides in 94 * @buffer: pointer to the buffer 95 * @node: node in the client's handle rbtree 96 * @kmap_cnt: count of times this client has mapped to kernel 97 * @id: client-unique id allocated by client->idr 98 * 99 * Modifications to node, map_cnt or mapping should be protected by the 100 * lock in the client. Other fields are never changed after initialization. 101 */ 102struct ion_handle { 103 struct kref ref; 104 struct ion_client *client; 105 struct ion_buffer *buffer; 106 struct rb_node node; 107 unsigned int kmap_cnt; 108 int id; 109}; 110 111bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) 112{ 113 return ((buffer->flags & ION_FLAG_CACHED) && 114 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)); 115} 116 117bool ion_buffer_cached(struct ion_buffer *buffer) 118{ 119 return !!(buffer->flags & ION_FLAG_CACHED); 120} 121 122static inline struct page *ion_buffer_page(struct page *page) 123{ 124 return (struct page *)((unsigned long)page & ~(1UL)); 125} 126 127static inline bool ion_buffer_page_is_dirty(struct page *page) 128{ 129 return !!((unsigned long)page & 1UL); 130} 131 132static inline void ion_buffer_page_dirty(struct page **page) 133{ 134 *page = (struct page *)((unsigned long)(*page) | 1UL); 135} 136 137static inline void ion_buffer_page_clean(struct page **page) 138{ 139 *page = (struct page *)((unsigned long)(*page) & ~(1UL)); 140} 141 142/* this function should only be called while dev->lock is held */ 143static void ion_buffer_add(struct ion_device *dev, 144 struct ion_buffer *buffer) 145{ 146 struct rb_node **p = &dev->buffers.rb_node; 147 struct rb_node *parent = NULL; 148 struct ion_buffer *entry; 149 150 while (*p) { 151 parent = *p; 152 entry = rb_entry(parent, struct ion_buffer, node); 153 154 if (buffer < entry) { 155 p = &(*p)->rb_left; 156 } else if (buffer > entry) { 157 p = &(*p)->rb_right; 158 } else { 159 pr_err("%s: buffer already found.", __func__); 160 BUG(); 161 } 162 } 163 164 rb_link_node(&buffer->node, parent, p); 165 rb_insert_color(&buffer->node, &dev->buffers); 166} 167 168/* this function should only be called while dev->lock is held */ 169static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 170 struct ion_device *dev, 171 unsigned long len, 172 unsigned long align, 173 unsigned long flags) 174{ 175 struct ion_buffer *buffer; 176 struct sg_table *table; 177 struct scatterlist *sg; 178 int i, ret; 179 180 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 181 if (!buffer) 182 return ERR_PTR(-ENOMEM); 183 184 buffer->heap = heap; 185 buffer->flags = flags; 186 kref_init(&buffer->ref); 187 188 ret = heap->ops->allocate(heap, buffer, len, align, flags); 189 190 if (ret) { 191 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) 192 goto err2; 193 194 ion_heap_freelist_drain(heap, 0); 195 ret = heap->ops->allocate(heap, buffer, len, align, 196 flags); 197 if (ret) 198 goto err2; 199 } 200 201 buffer->dev = dev; 202 buffer->size = len; 203 204 table = heap->ops->map_dma(heap, buffer); 205 if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error")) 206 table = ERR_PTR(-EINVAL); 207 if (IS_ERR(table)) { 208 heap->ops->free(buffer); 209 kfree(buffer); 210 return ERR_PTR(PTR_ERR(table)); 211 } 212 buffer->sg_table = table; 213 if (ion_buffer_fault_user_mappings(buffer)) { 214 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 215 struct scatterlist *sg; 216 int i, j, k = 0; 217 218 buffer->pages = vmalloc(sizeof(struct page *) * num_pages); 219 if (!buffer->pages) { 220 ret = -ENOMEM; 221 goto err1; 222 } 223 224 for_each_sg(table->sgl, sg, table->nents, i) { 225 struct page *page = sg_page(sg); 226 227 for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++) 228 buffer->pages[k++] = page++; 229 } 230 231 if (ret) 232 goto err; 233 } 234 235 buffer->dev = dev; 236 buffer->size = len; 237 INIT_LIST_HEAD(&buffer->vmas); 238 mutex_init(&buffer->lock); 239 /* this will set up dma addresses for the sglist -- it is not 240 technically correct as per the dma api -- a specific 241 device isn't really taking ownership here. However, in practice on 242 our systems the only dma_address space is physical addresses. 243 Additionally, we can't afford the overhead of invalidating every 244 allocation via dma_map_sg. The implicit contract here is that 245 memory comming from the heaps is ready for dma, ie if it has a 246 cached mapping that mapping has been invalidated */ 247 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) 248 sg_dma_address(sg) = sg_phys(sg); 249 mutex_lock(&dev->buffer_lock); 250 ion_buffer_add(dev, buffer); 251 mutex_unlock(&dev->buffer_lock); 252 return buffer; 253 254err: 255 heap->ops->unmap_dma(heap, buffer); 256 heap->ops->free(buffer); 257err1: 258 if (buffer->pages) 259 vfree(buffer->pages); 260err2: 261 kfree(buffer); 262 return ERR_PTR(ret); 263} 264 265void ion_buffer_destroy(struct ion_buffer *buffer) 266{ 267 if (WARN_ON(buffer->kmap_cnt > 0)) 268 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 269 buffer->heap->ops->unmap_dma(buffer->heap, buffer); 270 buffer->heap->ops->free(buffer); 271 if (buffer->pages) 272 vfree(buffer->pages); 273 kfree(buffer); 274} 275 276static void _ion_buffer_destroy(struct kref *kref) 277{ 278 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 279 struct ion_heap *heap = buffer->heap; 280 struct ion_device *dev = buffer->dev; 281 282 mutex_lock(&dev->buffer_lock); 283 rb_erase(&buffer->node, &dev->buffers); 284 mutex_unlock(&dev->buffer_lock); 285 286 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 287 ion_heap_freelist_add(heap, buffer); 288 else 289 ion_buffer_destroy(buffer); 290} 291 292static void ion_buffer_get(struct ion_buffer *buffer) 293{ 294 kref_get(&buffer->ref); 295} 296 297static int ion_buffer_put(struct ion_buffer *buffer) 298{ 299 return kref_put(&buffer->ref, _ion_buffer_destroy); 300} 301 302static void ion_buffer_add_to_handle(struct ion_buffer *buffer) 303{ 304 mutex_lock(&buffer->lock); 305 buffer->handle_count++; 306 mutex_unlock(&buffer->lock); 307} 308 309static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) 310{ 311 /* 312 * when a buffer is removed from a handle, if it is not in 313 * any other handles, copy the taskcomm and the pid of the 314 * process it's being removed from into the buffer. At this 315 * point there will be no way to track what processes this buffer is 316 * being used by, it only exists as a dma_buf file descriptor. 317 * The taskcomm and pid can provide a debug hint as to where this fd 318 * is in the system 319 */ 320 mutex_lock(&buffer->lock); 321 buffer->handle_count--; 322 BUG_ON(buffer->handle_count < 0); 323 if (!buffer->handle_count) { 324 struct task_struct *task; 325 326 task = current->group_leader; 327 get_task_comm(buffer->task_comm, task); 328 buffer->pid = task_pid_nr(task); 329 } 330 mutex_unlock(&buffer->lock); 331} 332 333static struct ion_handle *ion_handle_create(struct ion_client *client, 334 struct ion_buffer *buffer) 335{ 336 struct ion_handle *handle; 337 338 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 339 if (!handle) 340 return ERR_PTR(-ENOMEM); 341 kref_init(&handle->ref); 342 RB_CLEAR_NODE(&handle->node); 343 handle->client = client; 344 ion_buffer_get(buffer); 345 ion_buffer_add_to_handle(buffer); 346 handle->buffer = buffer; 347 348 return handle; 349} 350 351static void ion_handle_kmap_put(struct ion_handle *); 352 353static void ion_handle_destroy(struct kref *kref) 354{ 355 struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 356 struct ion_client *client = handle->client; 357 struct ion_buffer *buffer = handle->buffer; 358 359 mutex_lock(&buffer->lock); 360 while (handle->kmap_cnt) 361 ion_handle_kmap_put(handle); 362 mutex_unlock(&buffer->lock); 363 364 idr_remove(&client->idr, handle->id); 365 if (!RB_EMPTY_NODE(&handle->node)) 366 rb_erase(&handle->node, &client->handles); 367 368 ion_buffer_remove_from_handle(buffer); 369 ion_buffer_put(buffer); 370 371 kfree(handle); 372} 373 374struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) 375{ 376 return handle->buffer; 377} 378 379static void ion_handle_get(struct ion_handle *handle) 380{ 381 kref_get(&handle->ref); 382} 383 384static int ion_handle_put(struct ion_handle *handle) 385{ 386 struct ion_client *client = handle->client; 387 int ret; 388 389 mutex_lock(&client->lock); 390 ret = kref_put(&handle->ref, ion_handle_destroy); 391 mutex_unlock(&client->lock); 392 393 return ret; 394} 395 396static struct ion_handle *ion_handle_lookup(struct ion_client *client, 397 struct ion_buffer *buffer) 398{ 399 struct rb_node *n = client->handles.rb_node; 400 401 while (n) { 402 struct ion_handle *entry = rb_entry(n, struct ion_handle, node); 403 if (buffer < entry->buffer) 404 n = n->rb_left; 405 else if (buffer > entry->buffer) 406 n = n->rb_right; 407 else 408 return entry; 409 } 410 return ERR_PTR(-EINVAL); 411} 412 413static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, 414 int id) 415{ 416 struct ion_handle *handle; 417 418 mutex_lock(&client->lock); 419 handle = idr_find(&client->idr, id); 420 if (handle) 421 ion_handle_get(handle); 422 mutex_unlock(&client->lock); 423 424 return handle ? handle : ERR_PTR(-EINVAL); 425} 426 427static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) 428{ 429 WARN_ON(!mutex_is_locked(&client->lock)); 430 return (idr_find(&client->idr, handle->id) == handle); 431} 432 433static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) 434{ 435 int rc; 436 struct rb_node **p = &client->handles.rb_node; 437 struct rb_node *parent = NULL; 438 struct ion_handle *entry; 439 440 do { 441 int id; 442 rc = idr_pre_get(&client->idr, GFP_KERNEL); 443 if (!rc) 444 return -ENOMEM; 445 rc = idr_get_new_above(&client->idr, handle, 1, &id); 446 handle->id = id; 447 } while (rc == -EAGAIN); 448 449 if (rc < 0) 450 return rc; 451 452 while (*p) { 453 parent = *p; 454 entry = rb_entry(parent, struct ion_handle, node); 455 456 if (handle->buffer < entry->buffer) 457 p = &(*p)->rb_left; 458 else if (handle->buffer > entry->buffer) 459 p = &(*p)->rb_right; 460 else 461 WARN(1, "%s: buffer already found.", __func__); 462 } 463 464 rb_link_node(&handle->node, parent, p); 465 rb_insert_color(&handle->node, &client->handles); 466 467 return 0; 468} 469 470struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 471 size_t align, unsigned int heap_id_mask, 472 unsigned int flags) 473{ 474 struct ion_handle *handle; 475 struct ion_device *dev = client->dev; 476 struct ion_buffer *buffer = NULL; 477 struct ion_heap *heap; 478 int ret; 479 480 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__, 481 len, align, heap_id_mask, flags); 482 /* 483 * traverse the list of heaps available in this system in priority 484 * order. If the heap type is supported by the client, and matches the 485 * request of the caller allocate from it. Repeat until allocate has 486 * succeeded or all heaps have been tried 487 */ 488 len = PAGE_ALIGN(len); 489 490 if (!len) 491 return ERR_PTR(-EINVAL); 492 493 down_read(&dev->lock); 494 plist_for_each_entry(heap, &dev->heaps, node) { 495 /* if the caller didn't specify this heap id */ 496 if (!((1 << heap->id) & heap_id_mask)) 497 continue; 498 buffer = ion_buffer_create(heap, dev, len, align, flags); 499 if (!IS_ERR(buffer)) 500 break; 501 } 502 up_read(&dev->lock); 503 504 if (buffer == NULL) 505 return ERR_PTR(-ENODEV); 506 507 if (IS_ERR(buffer)) 508 return ERR_PTR(PTR_ERR(buffer)); 509 510 handle = ion_handle_create(client, buffer); 511 512 /* 513 * ion_buffer_create will create a buffer with a ref_cnt of 1, 514 * and ion_handle_create will take a second reference, drop one here 515 */ 516 ion_buffer_put(buffer); 517 518 if (IS_ERR(handle)) 519 return handle; 520 521 mutex_lock(&client->lock); 522 ret = ion_handle_add(client, handle); 523 mutex_unlock(&client->lock); 524 if (ret) { 525 ion_handle_put(handle); 526 handle = ERR_PTR(ret); 527 } 528 529 return handle; 530} 531EXPORT_SYMBOL(ion_alloc); 532 533void ion_free(struct ion_client *client, struct ion_handle *handle) 534{ 535 bool valid_handle; 536 537 BUG_ON(client != handle->client); 538 539 mutex_lock(&client->lock); 540 valid_handle = ion_handle_validate(client, handle); 541 542 if (!valid_handle) { 543 WARN(1, "%s: invalid handle passed to free.\n", __func__); 544 mutex_unlock(&client->lock); 545 return; 546 } 547 mutex_unlock(&client->lock); 548 ion_handle_put(handle); 549} 550EXPORT_SYMBOL(ion_free); 551 552int ion_phys(struct ion_client *client, struct ion_handle *handle, 553 ion_phys_addr_t *addr, size_t *len) 554{ 555 struct ion_buffer *buffer; 556 int ret; 557 558 mutex_lock(&client->lock); 559 if (!ion_handle_validate(client, handle)) { 560 mutex_unlock(&client->lock); 561 return -EINVAL; 562 } 563 564 buffer = handle->buffer; 565 566 if (!buffer->heap->ops->phys) { 567 pr_err("%s: ion_phys is not implemented by this heap.\n", 568 __func__); 569 mutex_unlock(&client->lock); 570 return -ENODEV; 571 } 572 mutex_unlock(&client->lock); 573 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); 574 return ret; 575} 576EXPORT_SYMBOL(ion_phys); 577 578static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 579{ 580 void *vaddr; 581 582 if (buffer->kmap_cnt) { 583 buffer->kmap_cnt++; 584 return buffer->vaddr; 585 } 586 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 587 if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error")) 588 return ERR_PTR(-EINVAL); 589 if (IS_ERR(vaddr)) 590 return vaddr; 591 buffer->vaddr = vaddr; 592 buffer->kmap_cnt++; 593 return vaddr; 594} 595 596static void *ion_handle_kmap_get(struct ion_handle *handle) 597{ 598 struct ion_buffer *buffer = handle->buffer; 599 void *vaddr; 600 601 if (handle->kmap_cnt) { 602 handle->kmap_cnt++; 603 return buffer->vaddr; 604 } 605 vaddr = ion_buffer_kmap_get(buffer); 606 if (IS_ERR(vaddr)) 607 return vaddr; 608 handle->kmap_cnt++; 609 return vaddr; 610} 611 612static void ion_buffer_kmap_put(struct ion_buffer *buffer) 613{ 614 buffer->kmap_cnt--; 615 if (!buffer->kmap_cnt) { 616 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 617 buffer->vaddr = NULL; 618 } 619} 620 621static void ion_handle_kmap_put(struct ion_handle *handle) 622{ 623 struct ion_buffer *buffer = handle->buffer; 624 625 handle->kmap_cnt--; 626 if (!handle->kmap_cnt) 627 ion_buffer_kmap_put(buffer); 628} 629 630void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 631{ 632 struct ion_buffer *buffer; 633 void *vaddr; 634 635 mutex_lock(&client->lock); 636 if (!ion_handle_validate(client, handle)) { 637 pr_err("%s: invalid handle passed to map_kernel.\n", 638 __func__); 639 mutex_unlock(&client->lock); 640 return ERR_PTR(-EINVAL); 641 } 642 643 buffer = handle->buffer; 644 645 if (!handle->buffer->heap->ops->map_kernel) { 646 pr_err("%s: map_kernel is not implemented by this heap.\n", 647 __func__); 648 mutex_unlock(&client->lock); 649 return ERR_PTR(-ENODEV); 650 } 651 652 mutex_lock(&buffer->lock); 653 vaddr = ion_handle_kmap_get(handle); 654 mutex_unlock(&buffer->lock); 655 mutex_unlock(&client->lock); 656 return vaddr; 657} 658EXPORT_SYMBOL(ion_map_kernel); 659 660void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 661{ 662 struct ion_buffer *buffer; 663 664 mutex_lock(&client->lock); 665 buffer = handle->buffer; 666 mutex_lock(&buffer->lock); 667 ion_handle_kmap_put(handle); 668 mutex_unlock(&buffer->lock); 669 mutex_unlock(&client->lock); 670} 671EXPORT_SYMBOL(ion_unmap_kernel); 672 673static int ion_debug_client_show(struct seq_file *s, void *unused) 674{ 675 struct ion_client *client = s->private; 676 struct rb_node *n; 677 size_t sizes[ION_NUM_HEAP_IDS] = {0}; 678 const char *names[ION_NUM_HEAP_IDS] = {0}; 679 int i; 680 681 mutex_lock(&client->lock); 682 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 683 struct ion_handle *handle = rb_entry(n, struct ion_handle, 684 node); 685 unsigned int id = handle->buffer->heap->id; 686 687 if (!names[id]) 688 names[id] = handle->buffer->heap->name; 689 sizes[id] += handle->buffer->size; 690 } 691 mutex_unlock(&client->lock); 692 693 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 694 for (i = 0; i < ION_NUM_HEAP_IDS; i++) { 695 if (!names[i]) 696 continue; 697 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); 698 } 699 return 0; 700} 701 702static int ion_debug_client_open(struct inode *inode, struct file *file) 703{ 704 return single_open(file, ion_debug_client_show, inode->i_private); 705} 706 707static const struct file_operations debug_client_fops = { 708 .open = ion_debug_client_open, 709 .read = seq_read, 710 .llseek = seq_lseek, 711 .release = single_release, 712}; 713 714struct ion_client *ion_client_create(struct ion_device *dev, 715 const char *name) 716{ 717 struct ion_client *client; 718 struct task_struct *task; 719 struct rb_node **p; 720 struct rb_node *parent = NULL; 721 struct ion_client *entry; 722 char debug_name[64]; 723 pid_t pid; 724 725 get_task_struct(current->group_leader); 726 task_lock(current->group_leader); 727 pid = task_pid_nr(current->group_leader); 728 /* don't bother to store task struct for kernel threads, 729 they can't be killed anyway */ 730 if (current->group_leader->flags & PF_KTHREAD) { 731 put_task_struct(current->group_leader); 732 task = NULL; 733 } else { 734 task = current->group_leader; 735 } 736 task_unlock(current->group_leader); 737 738 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 739 if (!client) { 740 if (task) 741 put_task_struct(current->group_leader); 742 return ERR_PTR(-ENOMEM); 743 } 744 745 client->dev = dev; 746 client->handles = RB_ROOT; 747 idr_init(&client->idr); 748 mutex_init(&client->lock); 749 client->name = name; 750 client->task = task; 751 client->pid = pid; 752 753 down_write(&dev->lock); 754 p = &dev->clients.rb_node; 755 while (*p) { 756 parent = *p; 757 entry = rb_entry(parent, struct ion_client, node); 758 759 if (client < entry) 760 p = &(*p)->rb_left; 761 else if (client > entry) 762 p = &(*p)->rb_right; 763 } 764 rb_link_node(&client->node, parent, p); 765 rb_insert_color(&client->node, &dev->clients); 766 767 snprintf(debug_name, 64, "%u", client->pid); 768 client->debug_root = debugfs_create_file(debug_name, 0664, 769 dev->debug_root, client, 770 &debug_client_fops); 771 up_write(&dev->lock); 772 773 return client; 774} 775EXPORT_SYMBOL(ion_client_create); 776 777void ion_client_destroy(struct ion_client *client) 778{ 779 struct ion_device *dev = client->dev; 780 struct rb_node *n; 781 782 pr_debug("%s: %d\n", __func__, __LINE__); 783 while ((n = rb_first(&client->handles))) { 784 struct ion_handle *handle = rb_entry(n, struct ion_handle, 785 node); 786 ion_handle_destroy(&handle->ref); 787 } 788 789 idr_remove_all(&client->idr); 790 idr_destroy(&client->idr); 791 792 down_write(&dev->lock); 793 if (client->task) 794 put_task_struct(client->task); 795 rb_erase(&client->node, &dev->clients); 796 debugfs_remove_recursive(client->debug_root); 797 up_write(&dev->lock); 798 799 kfree(client); 800} 801EXPORT_SYMBOL(ion_client_destroy); 802 803struct sg_table *ion_sg_table(struct ion_client *client, 804 struct ion_handle *handle) 805{ 806 struct ion_buffer *buffer; 807 struct sg_table *table; 808 809 mutex_lock(&client->lock); 810 if (!ion_handle_validate(client, handle)) { 811 pr_err("%s: invalid handle passed to map_dma.\n", 812 __func__); 813 mutex_unlock(&client->lock); 814 return ERR_PTR(-EINVAL); 815 } 816 buffer = handle->buffer; 817 table = buffer->sg_table; 818 mutex_unlock(&client->lock); 819 return table; 820} 821EXPORT_SYMBOL(ion_sg_table); 822 823static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 824 struct device *dev, 825 enum dma_data_direction direction); 826 827static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 828 enum dma_data_direction direction) 829{ 830 struct dma_buf *dmabuf = attachment->dmabuf; 831 struct ion_buffer *buffer = dmabuf->priv; 832 833 ion_buffer_sync_for_device(buffer, attachment->dev, direction); 834 return buffer->sg_table; 835} 836 837static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, 838 struct sg_table *table, 839 enum dma_data_direction direction) 840{ 841} 842 843struct ion_vma_list { 844 struct list_head list; 845 struct vm_area_struct *vma; 846}; 847 848static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 849 struct device *dev, 850 enum dma_data_direction dir) 851{ 852 struct ion_vma_list *vma_list; 853 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 854 int i; 855 856 pr_debug("%s: syncing for device %s\n", __func__, 857 dev ? dev_name(dev) : "null"); 858 859 if (!ion_buffer_fault_user_mappings(buffer)) 860 return; 861 862 mutex_lock(&buffer->lock); 863 for (i = 0; i < pages; i++) { 864 struct page *page = buffer->pages[i]; 865 866 if (ion_buffer_page_is_dirty(page)) 867 __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir); 868 ion_buffer_page_clean(buffer->pages + i); 869 } 870 list_for_each_entry(vma_list, &buffer->vmas, list) { 871 struct vm_area_struct *vma = vma_list->vma; 872 873 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, 874 NULL); 875 } 876 mutex_unlock(&buffer->lock); 877} 878 879int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 880{ 881 struct ion_buffer *buffer = vma->vm_private_data; 882 int ret; 883 884 mutex_lock(&buffer->lock); 885 ion_buffer_page_dirty(buffer->pages + vmf->pgoff); 886 887 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); 888 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, 889 ion_buffer_page(buffer->pages[vmf->pgoff])); 890 mutex_unlock(&buffer->lock); 891 if (ret) 892 return VM_FAULT_ERROR; 893 894 return VM_FAULT_NOPAGE; 895} 896 897static void ion_vm_open(struct vm_area_struct *vma) 898{ 899 struct ion_buffer *buffer = vma->vm_private_data; 900 struct ion_vma_list *vma_list; 901 902 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); 903 if (!vma_list) 904 return; 905 vma_list->vma = vma; 906 mutex_lock(&buffer->lock); 907 list_add(&vma_list->list, &buffer->vmas); 908 mutex_unlock(&buffer->lock); 909 pr_debug("%s: adding %p\n", __func__, vma); 910} 911 912static void ion_vm_close(struct vm_area_struct *vma) 913{ 914 struct ion_buffer *buffer = vma->vm_private_data; 915 struct ion_vma_list *vma_list, *tmp; 916 917 pr_debug("%s\n", __func__); 918 mutex_lock(&buffer->lock); 919 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { 920 if (vma_list->vma != vma) 921 continue; 922 list_del(&vma_list->list); 923 kfree(vma_list); 924 pr_debug("%s: deleting %p\n", __func__, vma); 925 break; 926 } 927 mutex_unlock(&buffer->lock); 928} 929 930struct vm_operations_struct ion_vma_ops = { 931 .open = ion_vm_open, 932 .close = ion_vm_close, 933 .fault = ion_vm_fault, 934}; 935 936static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 937{ 938 struct ion_buffer *buffer = dmabuf->priv; 939 int ret = 0; 940 941 if (!buffer->heap->ops->map_user) { 942 pr_err("%s: this heap does not define a method for mapping " 943 "to userspace\n", __func__); 944 return -EINVAL; 945 } 946 947 if (ion_buffer_fault_user_mappings(buffer)) { 948 vma->vm_private_data = buffer; 949 vma->vm_ops = &ion_vma_ops; 950 ion_vm_open(vma); 951 return 0; 952 } 953 954 if (!(buffer->flags & ION_FLAG_CACHED)) 955 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 956 957 mutex_lock(&buffer->lock); 958 /* now map it to userspace */ 959 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 960 mutex_unlock(&buffer->lock); 961 962 if (ret) 963 pr_err("%s: failure mapping buffer to userspace\n", 964 __func__); 965 966 return ret; 967} 968 969static void ion_dma_buf_release(struct dma_buf *dmabuf) 970{ 971 struct ion_buffer *buffer = dmabuf->priv; 972 ion_buffer_put(buffer); 973} 974 975static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 976{ 977 struct ion_buffer *buffer = dmabuf->priv; 978 return buffer->vaddr + offset * PAGE_SIZE; 979} 980 981static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, 982 void *ptr) 983{ 984 return; 985} 986 987static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, 988 size_t len, 989 enum dma_data_direction direction) 990{ 991 struct ion_buffer *buffer = dmabuf->priv; 992 void *vaddr; 993 994 if (!buffer->heap->ops->map_kernel) { 995 pr_err("%s: map kernel is not implemented by this heap.\n", 996 __func__); 997 return -ENODEV; 998 } 999 1000 mutex_lock(&buffer->lock); 1001 vaddr = ion_buffer_kmap_get(buffer); 1002 mutex_unlock(&buffer->lock); 1003 if (IS_ERR(vaddr)) 1004 return PTR_ERR(vaddr); 1005 return 0; 1006} 1007 1008static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, 1009 size_t len, 1010 enum dma_data_direction direction) 1011{ 1012 struct ion_buffer *buffer = dmabuf->priv; 1013 1014 mutex_lock(&buffer->lock); 1015 ion_buffer_kmap_put(buffer); 1016 mutex_unlock(&buffer->lock); 1017} 1018 1019struct dma_buf_ops dma_buf_ops = { 1020 .map_dma_buf = ion_map_dma_buf, 1021 .unmap_dma_buf = ion_unmap_dma_buf, 1022 .mmap = ion_mmap, 1023 .release = ion_dma_buf_release, 1024 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 1025 .end_cpu_access = ion_dma_buf_end_cpu_access, 1026 .kmap_atomic = ion_dma_buf_kmap, 1027 .kunmap_atomic = ion_dma_buf_kunmap, 1028 .kmap = ion_dma_buf_kmap, 1029 .kunmap = ion_dma_buf_kunmap, 1030}; 1031 1032struct dma_buf *ion_share_dma_buf(struct ion_client *client, 1033 struct ion_handle *handle) 1034{ 1035 struct ion_buffer *buffer; 1036 struct dma_buf *dmabuf; 1037 bool valid_handle; 1038 1039 mutex_lock(&client->lock); 1040 valid_handle = ion_handle_validate(client, handle); 1041 if (!valid_handle) { 1042 WARN(1, "%s: invalid handle passed to share.\n", __func__); 1043 mutex_unlock(&client->lock); 1044 return ERR_PTR(-EINVAL); 1045 } 1046 buffer = handle->buffer; 1047 ion_buffer_get(buffer); 1048 mutex_unlock(&client->lock); 1049 1050 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); 1051 if (IS_ERR(dmabuf)) { 1052 ion_buffer_put(buffer); 1053 return dmabuf; 1054 } 1055 1056 return dmabuf; 1057} 1058EXPORT_SYMBOL(ion_share_dma_buf); 1059 1060int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) 1061{ 1062 struct dma_buf *dmabuf; 1063 int fd; 1064 1065 dmabuf = ion_share_dma_buf(client, handle); 1066 if (IS_ERR(dmabuf)) 1067 return PTR_ERR(dmabuf); 1068 1069 fd = dma_buf_fd(dmabuf, O_CLOEXEC); 1070 if (fd < 0) 1071 dma_buf_put(dmabuf); 1072 1073 return fd; 1074} 1075EXPORT_SYMBOL(ion_share_dma_buf_fd); 1076 1077struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) 1078{ 1079 struct dma_buf *dmabuf; 1080 struct ion_buffer *buffer; 1081 struct ion_handle *handle; 1082 int ret; 1083 1084 dmabuf = dma_buf_get(fd); 1085 if (IS_ERR(dmabuf)) 1086 return ERR_PTR(PTR_ERR(dmabuf)); 1087 /* if this memory came from ion */ 1088 1089 if (dmabuf->ops != &dma_buf_ops) { 1090 pr_err("%s: can not import dmabuf from another exporter\n", 1091 __func__); 1092 dma_buf_put(dmabuf); 1093 return ERR_PTR(-EINVAL); 1094 } 1095 buffer = dmabuf->priv; 1096 1097 mutex_lock(&client->lock); 1098 /* if a handle exists for this buffer just take a reference to it */ 1099 handle = ion_handle_lookup(client, buffer); 1100 if (!IS_ERR(handle)) { 1101 ion_handle_get(handle); 1102 mutex_unlock(&client->lock); 1103 goto end; 1104 } 1105 mutex_unlock(&client->lock); 1106 1107 handle = ion_handle_create(client, buffer); 1108 if (IS_ERR(handle)) 1109 goto end; 1110 1111 mutex_lock(&client->lock); 1112 ret = ion_handle_add(client, handle); 1113 mutex_unlock(&client->lock); 1114 if (ret) { 1115 ion_handle_put(handle); 1116 handle = ERR_PTR(ret); 1117 } 1118 1119end: 1120 dma_buf_put(dmabuf); 1121 return handle; 1122} 1123EXPORT_SYMBOL(ion_import_dma_buf); 1124 1125static int ion_sync_for_device(struct ion_client *client, int fd) 1126{ 1127 struct dma_buf *dmabuf; 1128 struct ion_buffer *buffer; 1129 1130 dmabuf = dma_buf_get(fd); 1131 if (IS_ERR(dmabuf)) 1132 return PTR_ERR(dmabuf); 1133 1134 /* if this memory came from ion */ 1135 if (dmabuf->ops != &dma_buf_ops) { 1136 pr_err("%s: can not sync dmabuf from another exporter\n", 1137 __func__); 1138 dma_buf_put(dmabuf); 1139 return -EINVAL; 1140 } 1141 buffer = dmabuf->priv; 1142 1143 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, 1144 buffer->sg_table->nents, DMA_BIDIRECTIONAL); 1145 dma_buf_put(dmabuf); 1146 return 0; 1147} 1148 1149static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1150{ 1151 struct ion_client *client = filp->private_data; 1152 1153 switch (cmd) { 1154 case ION_IOC_ALLOC: 1155 { 1156 struct ion_allocation_data data; 1157 struct ion_handle *handle; 1158 1159 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 1160 return -EFAULT; 1161 handle = ion_alloc(client, data.len, data.align, 1162 data.heap_id_mask, data.flags); 1163 1164 if (IS_ERR(handle)) 1165 return PTR_ERR(handle); 1166 1167 data.handle = handle->id; 1168 1169 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 1170 ion_free(client, handle); 1171 return -EFAULT; 1172 } 1173 break; 1174 } 1175 case ION_IOC_FREE: 1176 { 1177 struct ion_handle_data data; 1178 struct ion_handle *handle; 1179 1180 if (copy_from_user(&data, (void __user *)arg, 1181 sizeof(struct ion_handle_data))) 1182 return -EFAULT; 1183 handle = ion_handle_get_by_id(client, data.handle); 1184 if (IS_ERR(handle)) 1185 return PTR_ERR(handle); 1186 ion_free(client, handle); 1187 ion_handle_put(handle); 1188 break; 1189 } 1190 case ION_IOC_SHARE: 1191 case ION_IOC_MAP: 1192 { 1193 struct ion_fd_data data; 1194 struct ion_handle *handle; 1195 1196 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 1197 return -EFAULT; 1198 handle = ion_handle_get_by_id(client, data.handle); 1199 if (IS_ERR(handle)) 1200 return PTR_ERR(handle); 1201 data.fd = ion_share_dma_buf_fd(client, handle); 1202 ion_handle_put(handle); 1203 if (copy_to_user((void __user *)arg, &data, sizeof(data))) 1204 return -EFAULT; 1205 if (data.fd < 0) 1206 return data.fd; 1207 break; 1208 } 1209 case ION_IOC_IMPORT: 1210 { 1211 struct ion_fd_data data; 1212 struct ion_handle *handle; 1213 int ret = 0; 1214 if (copy_from_user(&data, (void __user *)arg, 1215 sizeof(struct ion_fd_data))) 1216 return -EFAULT; 1217 handle = ion_import_dma_buf(client, data.fd); 1218 if (IS_ERR(handle)) 1219 ret = PTR_ERR(handle); 1220 else 1221 data.handle = handle->id; 1222 1223 if (copy_to_user((void __user *)arg, &data, 1224 sizeof(struct ion_fd_data))) 1225 return -EFAULT; 1226 if (ret < 0) 1227 return ret; 1228 break; 1229 } 1230 case ION_IOC_SYNC: 1231 { 1232 struct ion_fd_data data; 1233 if (copy_from_user(&data, (void __user *)arg, 1234 sizeof(struct ion_fd_data))) 1235 return -EFAULT; 1236 ion_sync_for_device(client, data.fd); 1237 break; 1238 } 1239 case ION_IOC_CUSTOM: 1240 { 1241 struct ion_device *dev = client->dev; 1242 struct ion_custom_data data; 1243 1244 if (!dev->custom_ioctl) 1245 return -ENOTTY; 1246 if (copy_from_user(&data, (void __user *)arg, 1247 sizeof(struct ion_custom_data))) 1248 return -EFAULT; 1249 return dev->custom_ioctl(client, data.cmd, data.arg); 1250 } 1251 default: 1252 return -ENOTTY; 1253 } 1254 return 0; 1255} 1256 1257static int ion_release(struct inode *inode, struct file *file) 1258{ 1259 struct ion_client *client = file->private_data; 1260 1261 pr_debug("%s: %d\n", __func__, __LINE__); 1262 ion_client_destroy(client); 1263 return 0; 1264} 1265 1266static int ion_open(struct inode *inode, struct file *file) 1267{ 1268 struct miscdevice *miscdev = file->private_data; 1269 struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 1270 struct ion_client *client; 1271 1272 pr_debug("%s: %d\n", __func__, __LINE__); 1273 client = ion_client_create(dev, "user"); 1274 if (IS_ERR(client)) 1275 return PTR_ERR(client); 1276 file->private_data = client; 1277 1278 return 0; 1279} 1280 1281static const struct file_operations ion_fops = { 1282 .owner = THIS_MODULE, 1283 .open = ion_open, 1284 .release = ion_release, 1285 .unlocked_ioctl = ion_ioctl, 1286 .compat_ioctl = compat_ion_ioctl, 1287}; 1288 1289static size_t ion_debug_heap_total(struct ion_client *client, 1290 unsigned int id) 1291{ 1292 size_t size = 0; 1293 struct rb_node *n; 1294 1295 mutex_lock(&client->lock); 1296 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 1297 struct ion_handle *handle = rb_entry(n, 1298 struct ion_handle, 1299 node); 1300 if (handle->buffer->heap->id == id) 1301 size += handle->buffer->size; 1302 } 1303 mutex_unlock(&client->lock); 1304 return size; 1305} 1306 1307static int ion_debug_heap_show(struct seq_file *s, void *unused) 1308{ 1309 struct ion_heap *heap = s->private; 1310 struct ion_device *dev = heap->dev; 1311 struct rb_node *n; 1312 size_t total_size = 0; 1313 size_t total_orphaned_size = 0; 1314 1315 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); 1316 seq_printf(s, "----------------------------------------------------\n"); 1317 1318 for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 1319 struct ion_client *client = rb_entry(n, struct ion_client, 1320 node); 1321 size_t size = ion_debug_heap_total(client, heap->id); 1322 if (!size) 1323 continue; 1324 if (client->task) { 1325 char task_comm[TASK_COMM_LEN]; 1326 1327 get_task_comm(task_comm, client->task); 1328 seq_printf(s, "%16.s %16u %16u\n", task_comm, 1329 client->pid, size); 1330 } else { 1331 seq_printf(s, "%16.s %16u %16u\n", client->name, 1332 client->pid, size); 1333 } 1334 } 1335 seq_printf(s, "----------------------------------------------------\n"); 1336 seq_printf(s, "orphaned allocations (info is from last known client):" 1337 "\n"); 1338 mutex_lock(&dev->buffer_lock); 1339 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { 1340 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, 1341 node); 1342 if (buffer->heap->id != heap->id) 1343 continue; 1344 total_size += buffer->size; 1345 if (!buffer->handle_count) { 1346 seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm, 1347 buffer->pid, buffer->size, buffer->kmap_cnt, 1348 atomic_read(&buffer->ref.refcount)); 1349 total_orphaned_size += buffer->size; 1350 } 1351 } 1352 mutex_unlock(&dev->buffer_lock); 1353 seq_printf(s, "----------------------------------------------------\n"); 1354 seq_printf(s, "%16.s %16u\n", "total orphaned", 1355 total_orphaned_size); 1356 seq_printf(s, "%16.s %16u\n", "total ", total_size); 1357 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1358 seq_printf(s, "%16.s %16u\n", "deferred free", 1359 heap->free_list_size); 1360 seq_printf(s, "----------------------------------------------------\n"); 1361 1362 if (heap->debug_show) 1363 heap->debug_show(heap, s, unused); 1364 1365 return 0; 1366} 1367 1368static int ion_debug_heap_open(struct inode *inode, struct file *file) 1369{ 1370 return single_open(file, ion_debug_heap_show, inode->i_private); 1371} 1372 1373static const struct file_operations debug_heap_fops = { 1374 .open = ion_debug_heap_open, 1375 .read = seq_read, 1376 .llseek = seq_lseek, 1377 .release = single_release, 1378}; 1379 1380#ifdef DEBUG_HEAP_SHRINKER 1381static int debug_shrink_set(void *data, u64 val) 1382{ 1383 struct ion_heap *heap = data; 1384 struct shrink_control sc; 1385 int objs; 1386 1387 sc.gfp_mask = -1; 1388 sc.nr_to_scan = 0; 1389 1390 if (!val) 1391 return 0; 1392 1393 objs = heap->shrinker.shrink(&heap->shrinker, &sc); 1394 sc.nr_to_scan = objs; 1395 1396 heap->shrinker.shrink(&heap->shrinker, &sc); 1397 return 0; 1398} 1399 1400static int debug_shrink_get(void *data, u64 *val) 1401{ 1402 struct ion_heap *heap = data; 1403 struct shrink_control sc; 1404 int objs; 1405 1406 sc.gfp_mask = -1; 1407 sc.nr_to_scan = 0; 1408 1409 objs = heap->shrinker.shrink(&heap->shrinker, &sc); 1410 *val = objs; 1411 return 0; 1412} 1413 1414DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, 1415 debug_shrink_set, "%llu\n"); 1416#endif 1417 1418void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 1419{ 1420 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || 1421 !heap->ops->unmap_dma) 1422 pr_err("%s: can not add heap with invalid ops struct.\n", 1423 __func__); 1424 1425 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1426 ion_heap_init_deferred_free(heap); 1427 1428 heap->dev = dev; 1429 down_write(&dev->lock); 1430 /* use negative heap->id to reverse the priority -- when traversing 1431 the list later attempt higher id numbers first */ 1432 plist_node_init(&heap->node, -heap->id); 1433 plist_add(&heap->node, &dev->heaps); 1434 debugfs_create_file(heap->name, 0664, dev->debug_root, heap, 1435 &debug_heap_fops); 1436#ifdef DEBUG_HEAP_SHRINKER 1437 if (heap->shrinker.shrink) { 1438 char debug_name[64]; 1439 1440 snprintf(debug_name, 64, "%s_shrink", heap->name); 1441 debugfs_create_file(debug_name, 0644, dev->debug_root, heap, 1442 &debug_shrink_fops); 1443 } 1444#endif 1445 up_write(&dev->lock); 1446} 1447 1448struct ion_device *ion_device_create(long (*custom_ioctl) 1449 (struct ion_client *client, 1450 unsigned int cmd, 1451 unsigned long arg)) 1452{ 1453 struct ion_device *idev; 1454 int ret; 1455 1456 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1457 if (!idev) 1458 return ERR_PTR(-ENOMEM); 1459 1460 idev->dev.minor = MISC_DYNAMIC_MINOR; 1461 idev->dev.name = "ion"; 1462 idev->dev.fops = &ion_fops; 1463 idev->dev.parent = NULL; 1464 ret = misc_register(&idev->dev); 1465 if (ret) { 1466 pr_err("ion: failed to register misc device.\n"); 1467 return ERR_PTR(ret); 1468 } 1469 1470 idev->debug_root = debugfs_create_dir("ion", NULL); 1471 if (!idev->debug_root) 1472 pr_err("ion: failed to create debug files.\n"); 1473 1474 idev->custom_ioctl = custom_ioctl; 1475 idev->buffers = RB_ROOT; 1476 mutex_init(&idev->buffer_lock); 1477 init_rwsem(&idev->lock); 1478 plist_head_init(&idev->heaps); 1479 idev->clients = RB_ROOT; 1480 return idev; 1481} 1482 1483void ion_device_destroy(struct ion_device *dev) 1484{ 1485 misc_deregister(&dev->dev); 1486 /* XXX need to free the heaps and clients ? */ 1487 kfree(dev); 1488} 1489 1490void __init ion_reserve(struct ion_platform_data *data) 1491{ 1492 int i; 1493 1494 for (i = 0; i < data->nr; i++) { 1495 if (data->heaps[i].size == 0) 1496 continue; 1497 1498 if (data->heaps[i].base == 0) { 1499 phys_addr_t paddr; 1500 paddr = memblock_alloc_base(data->heaps[i].size, 1501 data->heaps[i].align, 1502 MEMBLOCK_ALLOC_ANYWHERE); 1503 if (!paddr) { 1504 pr_err("%s: error allocating memblock for " 1505 "heap %d\n", 1506 __func__, i); 1507 continue; 1508 } 1509 data->heaps[i].base = paddr; 1510 } else { 1511 int ret = memblock_reserve(data->heaps[i].base, 1512 data->heaps[i].size); 1513 if (ret) 1514 pr_err("memblock reserve of %x@%lx failed\n", 1515 data->heaps[i].size, 1516 data->heaps[i].base); 1517 } 1518 pr_info("%s: %s reserved base %lx size %d\n", __func__, 1519 data->heaps[i].name, 1520 data->heaps[i].base, 1521 data->heaps[i].size); 1522 } 1523} 1524