ion.c revision ea313b5f88ed7119f79ad3f6b85e9620971b9875
1/* 2 3 * drivers/staging/android/ion/ion.c 4 * 5 * Copyright (C) 2011 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#include <linux/device.h> 19#include <linux/file.h> 20#include <linux/freezer.h> 21#include <linux/fs.h> 22#include <linux/anon_inodes.h> 23#include <linux/kthread.h> 24#include <linux/list.h> 25#include <linux/memblock.h> 26#include <linux/miscdevice.h> 27#include <linux/export.h> 28#include <linux/mm.h> 29#include <linux/mm_types.h> 30#include <linux/rbtree.h> 31#include <linux/slab.h> 32#include <linux/seq_file.h> 33#include <linux/uaccess.h> 34#include <linux/debugfs.h> 35#include <linux/dma-buf.h> 36 37#include "ion.h" 38#include "ion_priv.h" 39 40/** 41 * struct ion_device - the metadata of the ion device node 42 * @dev: the actual misc device 43 * @buffers: an rb tree of all the existing buffers 44 * @buffer_lock: lock protecting the tree of buffers 45 * @lock: rwsem protecting the tree of heaps and clients 46 * @heaps: list of all the heaps in the system 47 * @user_clients: list of all the clients created from userspace 48 */ 49struct ion_device { 50 struct miscdevice dev; 51 struct rb_root buffers; 52 struct mutex buffer_lock; 53 struct rw_semaphore lock; 54 struct plist_head heaps; 55 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, 56 unsigned long arg); 57 struct rb_root clients; 58 struct dentry *debug_root; 59}; 60 61/** 62 * struct ion_client - a process/hw block local address space 63 * @node: node in the tree of all clients 64 * @dev: backpointer to ion device 65 * @handles: an rb tree of all the handles in this client 66 * @lock: lock protecting the tree of handles 67 * @name: used for debugging 68 * @task: used for debugging 69 * 70 * A client represents a list of buffers this client may access. 71 * The mutex stored here is used to protect both handles tree 72 * as well as the handles themselves, and should be held while modifying either. 73 */ 74struct ion_client { 75 struct rb_node node; 76 struct ion_device *dev; 77 struct rb_root handles; 78 struct mutex lock; 79 const char *name; 80 struct task_struct *task; 81 pid_t pid; 82 struct dentry *debug_root; 83}; 84 85/** 86 * ion_handle - a client local reference to a buffer 87 * @ref: reference count 88 * @client: back pointer to the client the buffer resides in 89 * @buffer: pointer to the buffer 90 * @node: node in the client's handle rbtree 91 * @kmap_cnt: count of times this client has mapped to kernel 92 * @dmap_cnt: count of times this client has mapped for dma 93 * 94 * Modifications to node, map_cnt or mapping should be protected by the 95 * lock in the client. Other fields are never changed after initialization. 96 */ 97struct ion_handle { 98 struct kref ref; 99 struct ion_client *client; 100 struct ion_buffer *buffer; 101 struct rb_node node; 102 unsigned int kmap_cnt; 103}; 104 105bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) 106{ 107 return ((buffer->flags & ION_FLAG_CACHED) && 108 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)); 109} 110 111bool ion_buffer_cached(struct ion_buffer *buffer) 112{ 113 return !!(buffer->flags & ION_FLAG_CACHED); 114} 115 116/* this function should only be called while dev->lock is held */ 117static void ion_buffer_add(struct ion_device *dev, 118 struct ion_buffer *buffer) 119{ 120 struct rb_node **p = &dev->buffers.rb_node; 121 struct rb_node *parent = NULL; 122 struct ion_buffer *entry; 123 124 while (*p) { 125 parent = *p; 126 entry = rb_entry(parent, struct ion_buffer, node); 127 128 if (buffer < entry) { 129 p = &(*p)->rb_left; 130 } else if (buffer > entry) { 131 p = &(*p)->rb_right; 132 } else { 133 pr_err("%s: buffer already found.", __func__); 134 BUG(); 135 } 136 } 137 138 rb_link_node(&buffer->node, parent, p); 139 rb_insert_color(&buffer->node, &dev->buffers); 140} 141 142static int ion_buffer_alloc_dirty(struct ion_buffer *buffer); 143 144/* this function should only be called while dev->lock is held */ 145static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 146 struct ion_device *dev, 147 unsigned long len, 148 unsigned long align, 149 unsigned long flags) 150{ 151 struct ion_buffer *buffer; 152 struct sg_table *table; 153 struct scatterlist *sg; 154 int i, ret; 155 156 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 157 if (!buffer) 158 return ERR_PTR(-ENOMEM); 159 160 buffer->heap = heap; 161 buffer->flags = flags; 162 kref_init(&buffer->ref); 163 164 ret = heap->ops->allocate(heap, buffer, len, align, flags); 165 166 if (ret) { 167 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) 168 goto err2; 169 170 ion_heap_freelist_drain(heap, 0); 171 ret = heap->ops->allocate(heap, buffer, len, align, 172 flags); 173 if (ret) 174 goto err2; 175 } 176 177 buffer->dev = dev; 178 buffer->size = len; 179 180 table = heap->ops->map_dma(heap, buffer); 181 if (IS_ERR_OR_NULL(table)) { 182 heap->ops->free(buffer); 183 kfree(buffer); 184 return ERR_PTR(PTR_ERR(table)); 185 } 186 buffer->sg_table = table; 187 if (ion_buffer_fault_user_mappings(buffer)) { 188 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, 189 i) { 190 if (sg_dma_len(sg) == PAGE_SIZE) 191 continue; 192 pr_err("%s: cached mappings that will be faulted in " 193 "must have pagewise sg_lists\n", __func__); 194 ret = -EINVAL; 195 goto err; 196 } 197 198 ret = ion_buffer_alloc_dirty(buffer); 199 if (ret) 200 goto err; 201 } 202 203 buffer->dev = dev; 204 buffer->size = len; 205 INIT_LIST_HEAD(&buffer->vmas); 206 mutex_init(&buffer->lock); 207 /* this will set up dma addresses for the sglist -- it is not 208 technically correct as per the dma api -- a specific 209 device isn't really taking ownership here. However, in practice on 210 our systems the only dma_address space is physical addresses. 211 Additionally, we can't afford the overhead of invalidating every 212 allocation via dma_map_sg. The implicit contract here is that 213 memory comming from the heaps is ready for dma, ie if it has a 214 cached mapping that mapping has been invalidated */ 215 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) 216 sg_dma_address(sg) = sg_phys(sg); 217 mutex_lock(&dev->buffer_lock); 218 ion_buffer_add(dev, buffer); 219 mutex_unlock(&dev->buffer_lock); 220 return buffer; 221 222err: 223 heap->ops->unmap_dma(heap, buffer); 224 heap->ops->free(buffer); 225err2: 226 kfree(buffer); 227 return ERR_PTR(ret); 228} 229 230void ion_buffer_destroy(struct ion_buffer *buffer) 231{ 232 if (WARN_ON(buffer->kmap_cnt > 0)) 233 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 234 buffer->heap->ops->unmap_dma(buffer->heap, buffer); 235 buffer->heap->ops->free(buffer); 236 if (buffer->flags & ION_FLAG_CACHED) 237 kfree(buffer->dirty); 238 kfree(buffer); 239} 240 241static void _ion_buffer_destroy(struct kref *kref) 242{ 243 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 244 struct ion_heap *heap = buffer->heap; 245 struct ion_device *dev = buffer->dev; 246 247 mutex_lock(&dev->buffer_lock); 248 rb_erase(&buffer->node, &dev->buffers); 249 mutex_unlock(&dev->buffer_lock); 250 251 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 252 ion_heap_freelist_add(heap, buffer); 253 else 254 ion_buffer_destroy(buffer); 255} 256 257static void ion_buffer_get(struct ion_buffer *buffer) 258{ 259 kref_get(&buffer->ref); 260} 261 262static int ion_buffer_put(struct ion_buffer *buffer) 263{ 264 return kref_put(&buffer->ref, _ion_buffer_destroy); 265} 266 267static void ion_buffer_add_to_handle(struct ion_buffer *buffer) 268{ 269 mutex_lock(&buffer->lock); 270 buffer->handle_count++; 271 mutex_unlock(&buffer->lock); 272} 273 274static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) 275{ 276 /* 277 * when a buffer is removed from a handle, if it is not in 278 * any other handles, copy the taskcomm and the pid of the 279 * process it's being removed from into the buffer. At this 280 * point there will be no way to track what processes this buffer is 281 * being used by, it only exists as a dma_buf file descriptor. 282 * The taskcomm and pid can provide a debug hint as to where this fd 283 * is in the system 284 */ 285 mutex_lock(&buffer->lock); 286 buffer->handle_count--; 287 BUG_ON(buffer->handle_count < 0); 288 if (!buffer->handle_count) { 289 struct task_struct *task; 290 291 task = current->group_leader; 292 get_task_comm(buffer->task_comm, task); 293 buffer->pid = task_pid_nr(task); 294 } 295 mutex_unlock(&buffer->lock); 296} 297 298static struct ion_handle *ion_handle_create(struct ion_client *client, 299 struct ion_buffer *buffer) 300{ 301 struct ion_handle *handle; 302 303 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 304 if (!handle) 305 return ERR_PTR(-ENOMEM); 306 kref_init(&handle->ref); 307 RB_CLEAR_NODE(&handle->node); 308 handle->client = client; 309 ion_buffer_get(buffer); 310 ion_buffer_add_to_handle(buffer); 311 handle->buffer = buffer; 312 313 return handle; 314} 315 316static void ion_handle_kmap_put(struct ion_handle *); 317 318static void ion_handle_destroy(struct kref *kref) 319{ 320 struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 321 struct ion_client *client = handle->client; 322 struct ion_buffer *buffer = handle->buffer; 323 324 mutex_lock(&buffer->lock); 325 while (handle->kmap_cnt) 326 ion_handle_kmap_put(handle); 327 mutex_unlock(&buffer->lock); 328 329 if (!RB_EMPTY_NODE(&handle->node)) 330 rb_erase(&handle->node, &client->handles); 331 332 ion_buffer_remove_from_handle(buffer); 333 ion_buffer_put(buffer); 334 335 kfree(handle); 336} 337 338struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) 339{ 340 return handle->buffer; 341} 342 343static void ion_handle_get(struct ion_handle *handle) 344{ 345 kref_get(&handle->ref); 346} 347 348static int ion_handle_put(struct ion_handle *handle) 349{ 350 return kref_put(&handle->ref, ion_handle_destroy); 351} 352 353static struct ion_handle *ion_handle_lookup(struct ion_client *client, 354 struct ion_buffer *buffer) 355{ 356 struct rb_node *n; 357 358 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 359 struct ion_handle *handle = rb_entry(n, struct ion_handle, 360 node); 361 if (handle->buffer == buffer) 362 return handle; 363 } 364 return NULL; 365} 366 367static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) 368{ 369 struct rb_node *n = client->handles.rb_node; 370 371 while (n) { 372 struct ion_handle *handle_node = rb_entry(n, struct ion_handle, 373 node); 374 if (handle < handle_node) 375 n = n->rb_left; 376 else if (handle > handle_node) 377 n = n->rb_right; 378 else 379 return true; 380 } 381 return false; 382} 383 384static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) 385{ 386 struct rb_node **p = &client->handles.rb_node; 387 struct rb_node *parent = NULL; 388 struct ion_handle *entry; 389 390 while (*p) { 391 parent = *p; 392 entry = rb_entry(parent, struct ion_handle, node); 393 394 if (handle < entry) 395 p = &(*p)->rb_left; 396 else if (handle > entry) 397 p = &(*p)->rb_right; 398 else 399 WARN(1, "%s: buffer already found.", __func__); 400 } 401 402 rb_link_node(&handle->node, parent, p); 403 rb_insert_color(&handle->node, &client->handles); 404} 405 406struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 407 size_t align, unsigned int heap_id_mask, 408 unsigned int flags) 409{ 410 struct ion_handle *handle; 411 struct ion_device *dev = client->dev; 412 struct ion_buffer *buffer = NULL; 413 struct ion_heap *heap; 414 415 pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__, 416 len, align, heap_id_mask, flags); 417 /* 418 * traverse the list of heaps available in this system in priority 419 * order. If the heap type is supported by the client, and matches the 420 * request of the caller allocate from it. Repeat until allocate has 421 * succeeded or all heaps have been tried 422 */ 423 if (WARN_ON(!len)) 424 return ERR_PTR(-EINVAL); 425 426 len = PAGE_ALIGN(len); 427 428 down_read(&dev->lock); 429 plist_for_each_entry(heap, &dev->heaps, node) { 430 /* if the caller didn't specify this heap id */ 431 if (!((1 << heap->id) & heap_id_mask)) 432 continue; 433 buffer = ion_buffer_create(heap, dev, len, align, flags); 434 if (!IS_ERR_OR_NULL(buffer)) 435 break; 436 } 437 up_read(&dev->lock); 438 439 if (buffer == NULL) 440 return ERR_PTR(-ENODEV); 441 442 if (IS_ERR(buffer)) 443 return ERR_PTR(PTR_ERR(buffer)); 444 445 handle = ion_handle_create(client, buffer); 446 447 /* 448 * ion_buffer_create will create a buffer with a ref_cnt of 1, 449 * and ion_handle_create will take a second reference, drop one here 450 */ 451 ion_buffer_put(buffer); 452 453 if (!IS_ERR(handle)) { 454 mutex_lock(&client->lock); 455 ion_handle_add(client, handle); 456 mutex_unlock(&client->lock); 457 } 458 459 460 return handle; 461} 462EXPORT_SYMBOL(ion_alloc); 463 464void ion_free(struct ion_client *client, struct ion_handle *handle) 465{ 466 bool valid_handle; 467 468 BUG_ON(client != handle->client); 469 470 mutex_lock(&client->lock); 471 valid_handle = ion_handle_validate(client, handle); 472 473 if (!valid_handle) { 474 WARN(1, "%s: invalid handle passed to free.\n", __func__); 475 mutex_unlock(&client->lock); 476 return; 477 } 478 ion_handle_put(handle); 479 mutex_unlock(&client->lock); 480} 481EXPORT_SYMBOL(ion_free); 482 483int ion_phys(struct ion_client *client, struct ion_handle *handle, 484 ion_phys_addr_t *addr, size_t *len) 485{ 486 struct ion_buffer *buffer; 487 int ret; 488 489 mutex_lock(&client->lock); 490 if (!ion_handle_validate(client, handle)) { 491 mutex_unlock(&client->lock); 492 return -EINVAL; 493 } 494 495 buffer = handle->buffer; 496 497 if (!buffer->heap->ops->phys) { 498 pr_err("%s: ion_phys is not implemented by this heap.\n", 499 __func__); 500 mutex_unlock(&client->lock); 501 return -ENODEV; 502 } 503 mutex_unlock(&client->lock); 504 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); 505 return ret; 506} 507EXPORT_SYMBOL(ion_phys); 508 509static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 510{ 511 void *vaddr; 512 513 if (buffer->kmap_cnt) { 514 buffer->kmap_cnt++; 515 return buffer->vaddr; 516 } 517 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 518 if (IS_ERR_OR_NULL(vaddr)) 519 return vaddr; 520 buffer->vaddr = vaddr; 521 buffer->kmap_cnt++; 522 return vaddr; 523} 524 525static void *ion_handle_kmap_get(struct ion_handle *handle) 526{ 527 struct ion_buffer *buffer = handle->buffer; 528 void *vaddr; 529 530 if (handle->kmap_cnt) { 531 handle->kmap_cnt++; 532 return buffer->vaddr; 533 } 534 vaddr = ion_buffer_kmap_get(buffer); 535 if (IS_ERR_OR_NULL(vaddr)) 536 return vaddr; 537 handle->kmap_cnt++; 538 return vaddr; 539} 540 541static void ion_buffer_kmap_put(struct ion_buffer *buffer) 542{ 543 buffer->kmap_cnt--; 544 if (!buffer->kmap_cnt) { 545 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 546 buffer->vaddr = NULL; 547 } 548} 549 550static void ion_handle_kmap_put(struct ion_handle *handle) 551{ 552 struct ion_buffer *buffer = handle->buffer; 553 554 handle->kmap_cnt--; 555 if (!handle->kmap_cnt) 556 ion_buffer_kmap_put(buffer); 557} 558 559void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 560{ 561 struct ion_buffer *buffer; 562 void *vaddr; 563 564 mutex_lock(&client->lock); 565 if (!ion_handle_validate(client, handle)) { 566 pr_err("%s: invalid handle passed to map_kernel.\n", 567 __func__); 568 mutex_unlock(&client->lock); 569 return ERR_PTR(-EINVAL); 570 } 571 572 buffer = handle->buffer; 573 574 if (!handle->buffer->heap->ops->map_kernel) { 575 pr_err("%s: map_kernel is not implemented by this heap.\n", 576 __func__); 577 mutex_unlock(&client->lock); 578 return ERR_PTR(-ENODEV); 579 } 580 581 mutex_lock(&buffer->lock); 582 vaddr = ion_handle_kmap_get(handle); 583 mutex_unlock(&buffer->lock); 584 mutex_unlock(&client->lock); 585 return vaddr; 586} 587EXPORT_SYMBOL(ion_map_kernel); 588 589void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 590{ 591 struct ion_buffer *buffer; 592 593 mutex_lock(&client->lock); 594 buffer = handle->buffer; 595 mutex_lock(&buffer->lock); 596 ion_handle_kmap_put(handle); 597 mutex_unlock(&buffer->lock); 598 mutex_unlock(&client->lock); 599} 600EXPORT_SYMBOL(ion_unmap_kernel); 601 602static int ion_debug_client_show(struct seq_file *s, void *unused) 603{ 604 struct ion_client *client = s->private; 605 struct rb_node *n; 606 size_t sizes[ION_NUM_HEAP_IDS] = {0}; 607 const char *names[ION_NUM_HEAP_IDS] = {0}; 608 int i; 609 610 mutex_lock(&client->lock); 611 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 612 struct ion_handle *handle = rb_entry(n, struct ion_handle, 613 node); 614 unsigned int id = handle->buffer->heap->id; 615 616 if (!names[id]) 617 names[id] = handle->buffer->heap->name; 618 sizes[id] += handle->buffer->size; 619 } 620 mutex_unlock(&client->lock); 621 622 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 623 for (i = 0; i < ION_NUM_HEAP_IDS; i++) { 624 if (!names[i]) 625 continue; 626 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); 627 } 628 return 0; 629} 630 631static int ion_debug_client_open(struct inode *inode, struct file *file) 632{ 633 return single_open(file, ion_debug_client_show, inode->i_private); 634} 635 636static const struct file_operations debug_client_fops = { 637 .open = ion_debug_client_open, 638 .read = seq_read, 639 .llseek = seq_lseek, 640 .release = single_release, 641}; 642 643struct ion_client *ion_client_create(struct ion_device *dev, 644 const char *name) 645{ 646 struct ion_client *client; 647 struct task_struct *task; 648 struct rb_node **p; 649 struct rb_node *parent = NULL; 650 struct ion_client *entry; 651 char debug_name[64]; 652 pid_t pid; 653 654 get_task_struct(current->group_leader); 655 task_lock(current->group_leader); 656 pid = task_pid_nr(current->group_leader); 657 /* don't bother to store task struct for kernel threads, 658 they can't be killed anyway */ 659 if (current->group_leader->flags & PF_KTHREAD) { 660 put_task_struct(current->group_leader); 661 task = NULL; 662 } else { 663 task = current->group_leader; 664 } 665 task_unlock(current->group_leader); 666 667 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 668 if (!client) { 669 if (task) 670 put_task_struct(current->group_leader); 671 return ERR_PTR(-ENOMEM); 672 } 673 674 client->dev = dev; 675 client->handles = RB_ROOT; 676 mutex_init(&client->lock); 677 client->name = name; 678 client->task = task; 679 client->pid = pid; 680 681 down_write(&dev->lock); 682 p = &dev->clients.rb_node; 683 while (*p) { 684 parent = *p; 685 entry = rb_entry(parent, struct ion_client, node); 686 687 if (client < entry) 688 p = &(*p)->rb_left; 689 else if (client > entry) 690 p = &(*p)->rb_right; 691 } 692 rb_link_node(&client->node, parent, p); 693 rb_insert_color(&client->node, &dev->clients); 694 695 snprintf(debug_name, 64, "%u", client->pid); 696 client->debug_root = debugfs_create_file(debug_name, 0664, 697 dev->debug_root, client, 698 &debug_client_fops); 699 up_write(&dev->lock); 700 701 return client; 702} 703EXPORT_SYMBOL(ion_client_create); 704 705void ion_client_destroy(struct ion_client *client) 706{ 707 struct ion_device *dev = client->dev; 708 struct rb_node *n; 709 710 pr_debug("%s: %d\n", __func__, __LINE__); 711 while ((n = rb_first(&client->handles))) { 712 struct ion_handle *handle = rb_entry(n, struct ion_handle, 713 node); 714 ion_handle_destroy(&handle->ref); 715 } 716 down_write(&dev->lock); 717 if (client->task) 718 put_task_struct(client->task); 719 rb_erase(&client->node, &dev->clients); 720 debugfs_remove_recursive(client->debug_root); 721 up_write(&dev->lock); 722 723 kfree(client); 724} 725EXPORT_SYMBOL(ion_client_destroy); 726 727struct sg_table *ion_sg_table(struct ion_client *client, 728 struct ion_handle *handle) 729{ 730 struct ion_buffer *buffer; 731 struct sg_table *table; 732 733 mutex_lock(&client->lock); 734 if (!ion_handle_validate(client, handle)) { 735 pr_err("%s: invalid handle passed to map_dma.\n", 736 __func__); 737 mutex_unlock(&client->lock); 738 return ERR_PTR(-EINVAL); 739 } 740 buffer = handle->buffer; 741 table = buffer->sg_table; 742 mutex_unlock(&client->lock); 743 return table; 744} 745EXPORT_SYMBOL(ion_sg_table); 746 747static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 748 struct device *dev, 749 enum dma_data_direction direction); 750 751static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 752 enum dma_data_direction direction) 753{ 754 struct dma_buf *dmabuf = attachment->dmabuf; 755 struct ion_buffer *buffer = dmabuf->priv; 756 757 ion_buffer_sync_for_device(buffer, attachment->dev, direction); 758 return buffer->sg_table; 759} 760 761static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, 762 struct sg_table *table, 763 enum dma_data_direction direction) 764{ 765} 766 767static int ion_buffer_alloc_dirty(struct ion_buffer *buffer) 768{ 769 unsigned long pages = buffer->sg_table->nents; 770 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG; 771 772 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL); 773 if (!buffer->dirty) 774 return -ENOMEM; 775 return 0; 776} 777 778struct ion_vma_list { 779 struct list_head list; 780 struct vm_area_struct *vma; 781}; 782 783static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 784 struct device *dev, 785 enum dma_data_direction dir) 786{ 787 struct scatterlist *sg; 788 int i; 789 struct ion_vma_list *vma_list; 790 791 pr_debug("%s: syncing for device %s\n", __func__, 792 dev ? dev_name(dev) : "null"); 793 794 if (!ion_buffer_fault_user_mappings(buffer)) 795 return; 796 797 mutex_lock(&buffer->lock); 798 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { 799 if (!test_bit(i, buffer->dirty)) 800 continue; 801 dma_sync_sg_for_device(dev, sg, 1, dir); 802 clear_bit(i, buffer->dirty); 803 } 804 list_for_each_entry(vma_list, &buffer->vmas, list) { 805 struct vm_area_struct *vma = vma_list->vma; 806 807 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, 808 NULL); 809 } 810 mutex_unlock(&buffer->lock); 811} 812 813int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 814{ 815 struct ion_buffer *buffer = vma->vm_private_data; 816 struct scatterlist *sg; 817 int i; 818 819 mutex_lock(&buffer->lock); 820 set_bit(vmf->pgoff, buffer->dirty); 821 822 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { 823 if (i != vmf->pgoff) 824 continue; 825 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL); 826 vm_insert_page(vma, (unsigned long)vmf->virtual_address, 827 sg_page(sg)); 828 break; 829 } 830 mutex_unlock(&buffer->lock); 831 return VM_FAULT_NOPAGE; 832} 833 834static void ion_vm_open(struct vm_area_struct *vma) 835{ 836 struct ion_buffer *buffer = vma->vm_private_data; 837 struct ion_vma_list *vma_list; 838 839 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); 840 if (!vma_list) 841 return; 842 vma_list->vma = vma; 843 mutex_lock(&buffer->lock); 844 list_add(&vma_list->list, &buffer->vmas); 845 mutex_unlock(&buffer->lock); 846 pr_debug("%s: adding %p\n", __func__, vma); 847} 848 849static void ion_vm_close(struct vm_area_struct *vma) 850{ 851 struct ion_buffer *buffer = vma->vm_private_data; 852 struct ion_vma_list *vma_list, *tmp; 853 854 pr_debug("%s\n", __func__); 855 mutex_lock(&buffer->lock); 856 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { 857 if (vma_list->vma != vma) 858 continue; 859 list_del(&vma_list->list); 860 kfree(vma_list); 861 pr_debug("%s: deleting %p\n", __func__, vma); 862 break; 863 } 864 mutex_unlock(&buffer->lock); 865} 866 867struct vm_operations_struct ion_vma_ops = { 868 .open = ion_vm_open, 869 .close = ion_vm_close, 870 .fault = ion_vm_fault, 871}; 872 873static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 874{ 875 struct ion_buffer *buffer = dmabuf->priv; 876 int ret = 0; 877 878 if (!buffer->heap->ops->map_user) { 879 pr_err("%s: this heap does not define a method for mapping " 880 "to userspace\n", __func__); 881 return -EINVAL; 882 } 883 884 if (ion_buffer_fault_user_mappings(buffer)) { 885 vma->vm_private_data = buffer; 886 vma->vm_ops = &ion_vma_ops; 887 ion_vm_open(vma); 888 return 0; 889 } 890 891 if (!(buffer->flags & ION_FLAG_CACHED)) 892 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 893 894 mutex_lock(&buffer->lock); 895 /* now map it to userspace */ 896 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 897 mutex_unlock(&buffer->lock); 898 899 if (ret) 900 pr_err("%s: failure mapping buffer to userspace\n", 901 __func__); 902 903 return ret; 904} 905 906static void ion_dma_buf_release(struct dma_buf *dmabuf) 907{ 908 struct ion_buffer *buffer = dmabuf->priv; 909 ion_buffer_put(buffer); 910} 911 912static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 913{ 914 struct ion_buffer *buffer = dmabuf->priv; 915 return buffer->vaddr + offset * PAGE_SIZE; 916} 917 918static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, 919 void *ptr) 920{ 921 return; 922} 923 924static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, 925 size_t len, 926 enum dma_data_direction direction) 927{ 928 struct ion_buffer *buffer = dmabuf->priv; 929 void *vaddr; 930 931 if (!buffer->heap->ops->map_kernel) { 932 pr_err("%s: map kernel is not implemented by this heap.\n", 933 __func__); 934 return -ENODEV; 935 } 936 937 mutex_lock(&buffer->lock); 938 vaddr = ion_buffer_kmap_get(buffer); 939 mutex_unlock(&buffer->lock); 940 if (IS_ERR(vaddr)) 941 return PTR_ERR(vaddr); 942 if (!vaddr) 943 return -ENOMEM; 944 return 0; 945} 946 947static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, 948 size_t len, 949 enum dma_data_direction direction) 950{ 951 struct ion_buffer *buffer = dmabuf->priv; 952 953 mutex_lock(&buffer->lock); 954 ion_buffer_kmap_put(buffer); 955 mutex_unlock(&buffer->lock); 956} 957 958struct dma_buf_ops dma_buf_ops = { 959 .map_dma_buf = ion_map_dma_buf, 960 .unmap_dma_buf = ion_unmap_dma_buf, 961 .mmap = ion_mmap, 962 .release = ion_dma_buf_release, 963 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 964 .end_cpu_access = ion_dma_buf_end_cpu_access, 965 .kmap_atomic = ion_dma_buf_kmap, 966 .kunmap_atomic = ion_dma_buf_kunmap, 967 .kmap = ion_dma_buf_kmap, 968 .kunmap = ion_dma_buf_kunmap, 969}; 970 971struct dma_buf *ion_share_dma_buf(struct ion_client *client, 972 struct ion_handle *handle) 973{ 974 struct ion_buffer *buffer; 975 struct dma_buf *dmabuf; 976 bool valid_handle; 977 978 mutex_lock(&client->lock); 979 valid_handle = ion_handle_validate(client, handle); 980 mutex_unlock(&client->lock); 981 if (!valid_handle) { 982 WARN(1, "%s: invalid handle passed to share.\n", __func__); 983 return ERR_PTR(-EINVAL); 984 } 985 986 buffer = handle->buffer; 987 ion_buffer_get(buffer); 988 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); 989 if (IS_ERR(dmabuf)) { 990 ion_buffer_put(buffer); 991 return dmabuf; 992 } 993 994 return dmabuf; 995} 996EXPORT_SYMBOL(ion_share_dma_buf); 997 998int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) 999{ 1000 struct dma_buf *dmabuf; 1001 int fd; 1002 1003 dmabuf = ion_share_dma_buf(client, handle); 1004 if (IS_ERR(dmabuf)) 1005 return PTR_ERR(dmabuf); 1006 1007 fd = dma_buf_fd(dmabuf, O_CLOEXEC); 1008 if (fd < 0) 1009 dma_buf_put(dmabuf); 1010 1011 return fd; 1012} 1013EXPORT_SYMBOL(ion_share_dma_buf_fd); 1014 1015struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) 1016{ 1017 struct dma_buf *dmabuf; 1018 struct ion_buffer *buffer; 1019 struct ion_handle *handle; 1020 1021 dmabuf = dma_buf_get(fd); 1022 if (IS_ERR_OR_NULL(dmabuf)) 1023 return ERR_PTR(PTR_ERR(dmabuf)); 1024 /* if this memory came from ion */ 1025 1026 if (dmabuf->ops != &dma_buf_ops) { 1027 pr_err("%s: can not import dmabuf from another exporter\n", 1028 __func__); 1029 dma_buf_put(dmabuf); 1030 return ERR_PTR(-EINVAL); 1031 } 1032 buffer = dmabuf->priv; 1033 1034 mutex_lock(&client->lock); 1035 /* if a handle exists for this buffer just take a reference to it */ 1036 handle = ion_handle_lookup(client, buffer); 1037 if (!IS_ERR_OR_NULL(handle)) { 1038 ion_handle_get(handle); 1039 goto end; 1040 } 1041 handle = ion_handle_create(client, buffer); 1042 if (IS_ERR_OR_NULL(handle)) 1043 goto end; 1044 ion_handle_add(client, handle); 1045end: 1046 mutex_unlock(&client->lock); 1047 dma_buf_put(dmabuf); 1048 return handle; 1049} 1050EXPORT_SYMBOL(ion_import_dma_buf); 1051 1052static int ion_sync_for_device(struct ion_client *client, int fd) 1053{ 1054 struct dma_buf *dmabuf; 1055 struct ion_buffer *buffer; 1056 1057 dmabuf = dma_buf_get(fd); 1058 if (IS_ERR_OR_NULL(dmabuf)) 1059 return PTR_ERR(dmabuf); 1060 1061 /* if this memory came from ion */ 1062 if (dmabuf->ops != &dma_buf_ops) { 1063 pr_err("%s: can not sync dmabuf from another exporter\n", 1064 __func__); 1065 dma_buf_put(dmabuf); 1066 return -EINVAL; 1067 } 1068 buffer = dmabuf->priv; 1069 1070 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, 1071 buffer->sg_table->nents, DMA_BIDIRECTIONAL); 1072 dma_buf_put(dmabuf); 1073 return 0; 1074} 1075 1076static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1077{ 1078 struct ion_client *client = filp->private_data; 1079 1080 switch (cmd) { 1081 case ION_IOC_ALLOC: 1082 { 1083 struct ion_allocation_data data; 1084 1085 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 1086 return -EFAULT; 1087 data.handle = ion_alloc(client, data.len, data.align, 1088 data.heap_id_mask, data.flags); 1089 1090 if (IS_ERR(data.handle)) 1091 return PTR_ERR(data.handle); 1092 1093 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 1094 ion_free(client, data.handle); 1095 return -EFAULT; 1096 } 1097 break; 1098 } 1099 case ION_IOC_FREE: 1100 { 1101 struct ion_handle_data data; 1102 bool valid; 1103 1104 if (copy_from_user(&data, (void __user *)arg, 1105 sizeof(struct ion_handle_data))) 1106 return -EFAULT; 1107 mutex_lock(&client->lock); 1108 valid = ion_handle_validate(client, data.handle); 1109 mutex_unlock(&client->lock); 1110 if (!valid) 1111 return -EINVAL; 1112 ion_free(client, data.handle); 1113 break; 1114 } 1115 case ION_IOC_SHARE: 1116 case ION_IOC_MAP: 1117 { 1118 struct ion_fd_data data; 1119 1120 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 1121 return -EFAULT; 1122 data.fd = ion_share_dma_buf_fd(client, data.handle); 1123 if (copy_to_user((void __user *)arg, &data, sizeof(data))) 1124 return -EFAULT; 1125 if (data.fd < 0) 1126 return data.fd; 1127 break; 1128 } 1129 case ION_IOC_IMPORT: 1130 { 1131 struct ion_fd_data data; 1132 int ret = 0; 1133 if (copy_from_user(&data, (void __user *)arg, 1134 sizeof(struct ion_fd_data))) 1135 return -EFAULT; 1136 data.handle = ion_import_dma_buf(client, data.fd); 1137 if (IS_ERR(data.handle)) { 1138 ret = PTR_ERR(data.handle); 1139 data.handle = NULL; 1140 } 1141 if (copy_to_user((void __user *)arg, &data, 1142 sizeof(struct ion_fd_data))) 1143 return -EFAULT; 1144 if (ret < 0) 1145 return ret; 1146 break; 1147 } 1148 case ION_IOC_SYNC: 1149 { 1150 struct ion_fd_data data; 1151 if (copy_from_user(&data, (void __user *)arg, 1152 sizeof(struct ion_fd_data))) 1153 return -EFAULT; 1154 ion_sync_for_device(client, data.fd); 1155 break; 1156 } 1157 case ION_IOC_CUSTOM: 1158 { 1159 struct ion_device *dev = client->dev; 1160 struct ion_custom_data data; 1161 1162 if (!dev->custom_ioctl) 1163 return -ENOTTY; 1164 if (copy_from_user(&data, (void __user *)arg, 1165 sizeof(struct ion_custom_data))) 1166 return -EFAULT; 1167 return dev->custom_ioctl(client, data.cmd, data.arg); 1168 } 1169 default: 1170 return -ENOTTY; 1171 } 1172 return 0; 1173} 1174 1175static int ion_release(struct inode *inode, struct file *file) 1176{ 1177 struct ion_client *client = file->private_data; 1178 1179 pr_debug("%s: %d\n", __func__, __LINE__); 1180 ion_client_destroy(client); 1181 return 0; 1182} 1183 1184static int ion_open(struct inode *inode, struct file *file) 1185{ 1186 struct miscdevice *miscdev = file->private_data; 1187 struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 1188 struct ion_client *client; 1189 1190 pr_debug("%s: %d\n", __func__, __LINE__); 1191 client = ion_client_create(dev, "user"); 1192 if (IS_ERR_OR_NULL(client)) 1193 return PTR_ERR(client); 1194 file->private_data = client; 1195 1196 return 0; 1197} 1198 1199static const struct file_operations ion_fops = { 1200 .owner = THIS_MODULE, 1201 .open = ion_open, 1202 .release = ion_release, 1203 .unlocked_ioctl = ion_ioctl, 1204}; 1205 1206static size_t ion_debug_heap_total(struct ion_client *client, 1207 unsigned int id) 1208{ 1209 size_t size = 0; 1210 struct rb_node *n; 1211 1212 mutex_lock(&client->lock); 1213 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 1214 struct ion_handle *handle = rb_entry(n, 1215 struct ion_handle, 1216 node); 1217 if (handle->buffer->heap->id == id) 1218 size += handle->buffer->size; 1219 } 1220 mutex_unlock(&client->lock); 1221 return size; 1222} 1223 1224static int ion_debug_heap_show(struct seq_file *s, void *unused) 1225{ 1226 struct ion_heap *heap = s->private; 1227 struct ion_device *dev = heap->dev; 1228 struct rb_node *n; 1229 size_t total_size = 0; 1230 size_t total_orphaned_size = 0; 1231 1232 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); 1233 seq_printf(s, "----------------------------------------------------\n"); 1234 1235 for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 1236 struct ion_client *client = rb_entry(n, struct ion_client, 1237 node); 1238 size_t size = ion_debug_heap_total(client, heap->id); 1239 if (!size) 1240 continue; 1241 if (client->task) { 1242 char task_comm[TASK_COMM_LEN]; 1243 1244 get_task_comm(task_comm, client->task); 1245 seq_printf(s, "%16.s %16u %16u\n", task_comm, 1246 client->pid, size); 1247 } else { 1248 seq_printf(s, "%16.s %16u %16u\n", client->name, 1249 client->pid, size); 1250 } 1251 } 1252 seq_printf(s, "----------------------------------------------------\n"); 1253 seq_printf(s, "orphaned allocations (info is from last known client):" 1254 "\n"); 1255 mutex_lock(&dev->buffer_lock); 1256 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { 1257 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, 1258 node); 1259 if (buffer->heap->id != heap->id) 1260 continue; 1261 total_size += buffer->size; 1262 if (!buffer->handle_count) { 1263 seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm, 1264 buffer->pid, buffer->size, buffer->kmap_cnt, 1265 atomic_read(&buffer->ref.refcount)); 1266 total_orphaned_size += buffer->size; 1267 } 1268 } 1269 mutex_unlock(&dev->buffer_lock); 1270 seq_printf(s, "----------------------------------------------------\n"); 1271 seq_printf(s, "%16.s %16u\n", "total orphaned", 1272 total_orphaned_size); 1273 seq_printf(s, "%16.s %16u\n", "total ", total_size); 1274 seq_printf(s, "----------------------------------------------------\n"); 1275 1276 if (heap->debug_show) 1277 heap->debug_show(heap, s, unused); 1278 1279 return 0; 1280} 1281 1282static int ion_debug_heap_open(struct inode *inode, struct file *file) 1283{ 1284 return single_open(file, ion_debug_heap_show, inode->i_private); 1285} 1286 1287static const struct file_operations debug_heap_fops = { 1288 .open = ion_debug_heap_open, 1289 .read = seq_read, 1290 .llseek = seq_lseek, 1291 .release = single_release, 1292}; 1293 1294#ifdef DEBUG_HEAP_SHRINKER 1295static int debug_shrink_set(void *data, u64 val) 1296{ 1297 struct ion_heap *heap = data; 1298 struct shrink_control sc; 1299 int objs; 1300 1301 sc.gfp_mask = -1; 1302 sc.nr_to_scan = 0; 1303 1304 if (!val) 1305 return 0; 1306 1307 objs = heap->shrinker.shrink(&heap->shrinker, &sc); 1308 sc.nr_to_scan = objs; 1309 1310 heap->shrinker.shrink(&heap->shrinker, &sc); 1311 return 0; 1312} 1313 1314static int debug_shrink_get(void *data, u64 *val) 1315{ 1316 struct ion_heap *heap = data; 1317 struct shrink_control sc; 1318 int objs; 1319 1320 sc.gfp_mask = -1; 1321 sc.nr_to_scan = 0; 1322 1323 objs = heap->shrinker.shrink(&heap->shrinker, &sc); 1324 *val = objs; 1325 return 0; 1326} 1327 1328DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, 1329 debug_shrink_set, "%llu\n"); 1330#endif 1331 1332void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 1333{ 1334 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || 1335 !heap->ops->unmap_dma) 1336 pr_err("%s: can not add heap with invalid ops struct.\n", 1337 __func__); 1338 1339 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 1340 ion_heap_init_deferred_free(heap); 1341 1342 heap->dev = dev; 1343 down_write(&dev->lock); 1344 /* use negative heap->id to reverse the priority -- when traversing 1345 the list later attempt higher id numbers first */ 1346 plist_node_init(&heap->node, -heap->id); 1347 plist_add(&heap->node, &dev->heaps); 1348 debugfs_create_file(heap->name, 0664, dev->debug_root, heap, 1349 &debug_heap_fops); 1350#ifdef DEBUG_HEAP_SHRINKER 1351 if (heap->shrinker.shrink) { 1352 char debug_name[64]; 1353 1354 snprintf(debug_name, 64, "%s_shrink", heap->name); 1355 debugfs_create_file(debug_name, 0644, dev->debug_root, heap, 1356 &debug_shrink_fops); 1357 } 1358#endif 1359 up_write(&dev->lock); 1360} 1361 1362struct ion_device *ion_device_create(long (*custom_ioctl) 1363 (struct ion_client *client, 1364 unsigned int cmd, 1365 unsigned long arg)) 1366{ 1367 struct ion_device *idev; 1368 int ret; 1369 1370 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1371 if (!idev) 1372 return ERR_PTR(-ENOMEM); 1373 1374 idev->dev.minor = MISC_DYNAMIC_MINOR; 1375 idev->dev.name = "ion"; 1376 idev->dev.fops = &ion_fops; 1377 idev->dev.parent = NULL; 1378 ret = misc_register(&idev->dev); 1379 if (ret) { 1380 pr_err("ion: failed to register misc device.\n"); 1381 return ERR_PTR(ret); 1382 } 1383 1384 idev->debug_root = debugfs_create_dir("ion", NULL); 1385 if (IS_ERR_OR_NULL(idev->debug_root)) 1386 pr_err("ion: failed to create debug files.\n"); 1387 1388 idev->custom_ioctl = custom_ioctl; 1389 idev->buffers = RB_ROOT; 1390 mutex_init(&idev->buffer_lock); 1391 init_rwsem(&idev->lock); 1392 plist_head_init(&idev->heaps); 1393 idev->clients = RB_ROOT; 1394 return idev; 1395} 1396 1397void ion_device_destroy(struct ion_device *dev) 1398{ 1399 misc_deregister(&dev->dev); 1400 /* XXX need to free the heaps and clients ? */ 1401 kfree(dev); 1402} 1403 1404void __init ion_reserve(struct ion_platform_data *data) 1405{ 1406 int i; 1407 1408 for (i = 0; i < data->nr; i++) { 1409 if (data->heaps[i].size == 0) 1410 continue; 1411 1412 if (data->heaps[i].base == 0) { 1413 phys_addr_t paddr; 1414 paddr = memblock_alloc_base(data->heaps[i].size, 1415 data->heaps[i].align, 1416 MEMBLOCK_ALLOC_ANYWHERE); 1417 if (!paddr) { 1418 pr_err("%s: error allocating memblock for " 1419 "heap %d\n", 1420 __func__, i); 1421 continue; 1422 } 1423 data->heaps[i].base = paddr; 1424 } else { 1425 int ret = memblock_reserve(data->heaps[i].base, 1426 data->heaps[i].size); 1427 if (ret) 1428 pr_err("memblock reserve of %x@%lx failed\n", 1429 data->heaps[i].size, 1430 data->heaps[i].base); 1431 } 1432 pr_info("%s: %s reserved base %lx size %d\n", __func__, 1433 data->heaps[i].name, 1434 data->heaps[i].base, 1435 data->heaps[i].size); 1436 } 1437} 1438