ion.c revision a9bb075da5cdd67fddee27ea20e47ed3d04d6bb4
1/* 2 * drivers/staging/android/ion/ion.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/device.h> 18#include <linux/file.h> 19#include <linux/fs.h> 20#include <linux/anon_inodes.h> 21#include <linux/list.h> 22#include <linux/memblock.h> 23#include <linux/miscdevice.h> 24#include <linux/export.h> 25#include <linux/mm.h> 26#include <linux/mm_types.h> 27#include <linux/rbtree.h> 28#include <linux/sched.h> 29#include <linux/slab.h> 30#include <linux/seq_file.h> 31#include <linux/uaccess.h> 32#include <linux/debugfs.h> 33#include <linux/dma-buf.h> 34 35#include "ion.h" 36#include "ion_priv.h" 37 38/** 39 * struct ion_device - the metadata of the ion device node 40 * @dev: the actual misc device 41 * @buffers: an rb tree of all the existing buffers 42 * @lock: lock protecting the buffers & heaps trees 43 * @heaps: list of all the heaps in the system 44 * @user_clients: list of all the clients created from userspace 45 */ 46struct ion_device { 47 struct miscdevice dev; 48 struct rb_root buffers; 49 struct mutex lock; 50 struct rb_root heaps; 51 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, 52 unsigned long arg); 53 struct rb_root clients; 54 struct dentry *debug_root; 55}; 56 57/** 58 * struct ion_client - a process/hw block local address space 59 * @node: node in the tree of all clients 60 * @dev: backpointer to ion device 61 * @handles: an rb tree of all the handles in this client 62 * @lock: lock protecting the tree of handles 63 * @heap_mask: mask of all supported heaps 64 * @name: used for debugging 65 * @task: used for debugging 66 * 67 * A client represents a list of buffers this client may access. 68 * The mutex stored here is used to protect both handles tree 69 * as well as the handles themselves, and should be held while modifying either. 70 */ 71struct ion_client { 72 struct rb_node node; 73 struct ion_device *dev; 74 struct rb_root handles; 75 struct mutex lock; 76 unsigned int heap_mask; 77 const char *name; 78 struct task_struct *task; 79 pid_t pid; 80 struct dentry *debug_root; 81}; 82 83/** 84 * ion_handle - a client local reference to a buffer 85 * @ref: reference count 86 * @client: back pointer to the client the buffer resides in 87 * @buffer: pointer to the buffer 88 * @node: node in the client's handle rbtree 89 * @kmap_cnt: count of times this client has mapped to kernel 90 * @dmap_cnt: count of times this client has mapped for dma 91 * 92 * Modifications to node, map_cnt or mapping should be protected by the 93 * lock in the client. Other fields are never changed after initialization. 94 */ 95struct ion_handle { 96 struct kref ref; 97 struct ion_client *client; 98 struct ion_buffer *buffer; 99 struct rb_node node; 100 unsigned int kmap_cnt; 101}; 102 103/* this function should only be called while dev->lock is held */ 104static void ion_buffer_add(struct ion_device *dev, 105 struct ion_buffer *buffer) 106{ 107 struct rb_node **p = &dev->buffers.rb_node; 108 struct rb_node *parent = NULL; 109 struct ion_buffer *entry; 110 111 while (*p) { 112 parent = *p; 113 entry = rb_entry(parent, struct ion_buffer, node); 114 115 if (buffer < entry) { 116 p = &(*p)->rb_left; 117 } else if (buffer > entry) { 118 p = &(*p)->rb_right; 119 } else { 120 pr_err("%s: buffer already found.", __func__); 121 BUG(); 122 } 123 } 124 125 rb_link_node(&buffer->node, parent, p); 126 rb_insert_color(&buffer->node, &dev->buffers); 127} 128 129static int ion_buffer_alloc_dirty(struct ion_buffer *buffer); 130 131/* this function should only be called while dev->lock is held */ 132static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 133 struct ion_device *dev, 134 unsigned long len, 135 unsigned long align, 136 unsigned long flags) 137{ 138 struct ion_buffer *buffer; 139 struct sg_table *table; 140 struct scatterlist *sg; 141 int i, ret; 142 143 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 144 if (!buffer) 145 return ERR_PTR(-ENOMEM); 146 147 buffer->heap = heap; 148 kref_init(&buffer->ref); 149 150 ret = heap->ops->allocate(heap, buffer, len, align, flags); 151 if (ret) { 152 kfree(buffer); 153 return ERR_PTR(ret); 154 } 155 156 buffer->dev = dev; 157 buffer->size = len; 158 buffer->flags = flags; 159 160 table = heap->ops->map_dma(heap, buffer); 161 if (IS_ERR_OR_NULL(table)) { 162 heap->ops->free(buffer); 163 kfree(buffer); 164 return ERR_PTR(PTR_ERR(table)); 165 } 166 buffer->sg_table = table; 167 if (buffer->flags & ION_FLAG_CACHED) 168 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, 169 i) { 170 if (sg_dma_len(sg) == PAGE_SIZE) 171 continue; 172 pr_err("%s: cached mappings must have pagewise " 173 "sg_lists\n", __func__); 174 heap->ops->unmap_dma(heap, buffer); 175 kfree(buffer); 176 return ERR_PTR(-EINVAL); 177 } 178 179 ret = ion_buffer_alloc_dirty(buffer); 180 if (ret) { 181 heap->ops->unmap_dma(heap, buffer); 182 heap->ops->free(buffer); 183 kfree(buffer); 184 return ERR_PTR(ret); 185 } 186 187 buffer->dev = dev; 188 buffer->size = len; 189 INIT_LIST_HEAD(&buffer->vmas); 190 mutex_init(&buffer->lock); 191 /* this will set up dma addresses for the sglist -- it is not 192 technically correct as per the dma api -- a specific 193 device isn't really taking ownership here. However, in practice on 194 our systems the only dma_address space is physical addresses. 195 Additionally, we can't afford the overhead of invalidating every 196 allocation via dma_map_sg. The implicit contract here is that 197 memory comming from the heaps is ready for dma, ie if it has a 198 cached mapping that mapping has been invalidated */ 199 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) 200 sg_dma_address(sg) = sg_phys(sg); 201 ion_buffer_add(dev, buffer); 202 return buffer; 203} 204 205static void ion_buffer_destroy(struct kref *kref) 206{ 207 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 208 struct ion_device *dev = buffer->dev; 209 210 if (WARN_ON(buffer->kmap_cnt > 0)) 211 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 212 213 buffer->heap->ops->unmap_dma(buffer->heap, buffer); 214 buffer->heap->ops->free(buffer); 215 mutex_lock(&dev->lock); 216 rb_erase(&buffer->node, &dev->buffers); 217 mutex_unlock(&dev->lock); 218 kfree(buffer); 219} 220 221static void ion_buffer_get(struct ion_buffer *buffer) 222{ 223 kref_get(&buffer->ref); 224} 225 226static int ion_buffer_put(struct ion_buffer *buffer) 227{ 228 return kref_put(&buffer->ref, ion_buffer_destroy); 229} 230 231static struct ion_handle *ion_handle_create(struct ion_client *client, 232 struct ion_buffer *buffer) 233{ 234 struct ion_handle *handle; 235 236 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 237 if (!handle) 238 return ERR_PTR(-ENOMEM); 239 kref_init(&handle->ref); 240 RB_CLEAR_NODE(&handle->node); 241 handle->client = client; 242 ion_buffer_get(buffer); 243 handle->buffer = buffer; 244 245 return handle; 246} 247 248static void ion_handle_kmap_put(struct ion_handle *); 249 250static void ion_handle_destroy(struct kref *kref) 251{ 252 struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 253 struct ion_client *client = handle->client; 254 struct ion_buffer *buffer = handle->buffer; 255 256 mutex_lock(&client->lock); 257 258 mutex_lock(&buffer->lock); 259 while (handle->kmap_cnt) 260 ion_handle_kmap_put(handle); 261 mutex_unlock(&buffer->lock); 262 263 if (!RB_EMPTY_NODE(&handle->node)) 264 rb_erase(&handle->node, &client->handles); 265 mutex_unlock(&client->lock); 266 267 ion_buffer_put(buffer); 268 kfree(handle); 269} 270 271struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) 272{ 273 return handle->buffer; 274} 275 276static void ion_handle_get(struct ion_handle *handle) 277{ 278 kref_get(&handle->ref); 279} 280 281static int ion_handle_put(struct ion_handle *handle) 282{ 283 return kref_put(&handle->ref, ion_handle_destroy); 284} 285 286static struct ion_handle *ion_handle_lookup(struct ion_client *client, 287 struct ion_buffer *buffer) 288{ 289 struct rb_node *n; 290 291 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 292 struct ion_handle *handle = rb_entry(n, struct ion_handle, 293 node); 294 if (handle->buffer == buffer) 295 return handle; 296 } 297 return NULL; 298} 299 300static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) 301{ 302 struct rb_node *n = client->handles.rb_node; 303 304 while (n) { 305 struct ion_handle *handle_node = rb_entry(n, struct ion_handle, 306 node); 307 if (handle < handle_node) 308 n = n->rb_left; 309 else if (handle > handle_node) 310 n = n->rb_right; 311 else 312 return true; 313 } 314 return false; 315} 316 317static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) 318{ 319 struct rb_node **p = &client->handles.rb_node; 320 struct rb_node *parent = NULL; 321 struct ion_handle *entry; 322 323 while (*p) { 324 parent = *p; 325 entry = rb_entry(parent, struct ion_handle, node); 326 327 if (handle < entry) 328 p = &(*p)->rb_left; 329 else if (handle > entry) 330 p = &(*p)->rb_right; 331 else 332 WARN(1, "%s: buffer already found.", __func__); 333 } 334 335 rb_link_node(&handle->node, parent, p); 336 rb_insert_color(&handle->node, &client->handles); 337} 338 339struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 340 size_t align, unsigned int heap_mask, 341 unsigned int flags) 342{ 343 struct rb_node *n; 344 struct ion_handle *handle; 345 struct ion_device *dev = client->dev; 346 struct ion_buffer *buffer = NULL; 347 348 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len, 349 align, heap_mask, flags); 350 /* 351 * traverse the list of heaps available in this system in priority 352 * order. If the heap type is supported by the client, and matches the 353 * request of the caller allocate from it. Repeat until allocate has 354 * succeeded or all heaps have been tried 355 */ 356 if (WARN_ON(!len)) 357 return ERR_PTR(-EINVAL); 358 359 len = PAGE_ALIGN(len); 360 361 mutex_lock(&dev->lock); 362 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { 363 struct ion_heap *heap = rb_entry(n, struct ion_heap, node); 364 /* if the client doesn't support this heap type */ 365 if (!((1 << heap->type) & client->heap_mask)) 366 continue; 367 /* if the caller didn't specify this heap type */ 368 if (!((1 << heap->id) & heap_mask)) 369 continue; 370 buffer = ion_buffer_create(heap, dev, len, align, flags); 371 if (!IS_ERR_OR_NULL(buffer)) 372 break; 373 } 374 mutex_unlock(&dev->lock); 375 376 if (buffer == NULL) 377 return ERR_PTR(-ENODEV); 378 379 if (IS_ERR(buffer)) 380 return ERR_PTR(PTR_ERR(buffer)); 381 382 handle = ion_handle_create(client, buffer); 383 384 /* 385 * ion_buffer_create will create a buffer with a ref_cnt of 1, 386 * and ion_handle_create will take a second reference, drop one here 387 */ 388 ion_buffer_put(buffer); 389 390 if (!IS_ERR(handle)) { 391 mutex_lock(&client->lock); 392 ion_handle_add(client, handle); 393 mutex_unlock(&client->lock); 394 } 395 396 397 return handle; 398} 399 400void ion_free(struct ion_client *client, struct ion_handle *handle) 401{ 402 bool valid_handle; 403 404 BUG_ON(client != handle->client); 405 406 mutex_lock(&client->lock); 407 valid_handle = ion_handle_validate(client, handle); 408 mutex_unlock(&client->lock); 409 410 if (!valid_handle) { 411 WARN(1, "%s: invalid handle passed to free.\n", __func__); 412 return; 413 } 414 ion_handle_put(handle); 415} 416 417int ion_phys(struct ion_client *client, struct ion_handle *handle, 418 ion_phys_addr_t *addr, size_t *len) 419{ 420 struct ion_buffer *buffer; 421 int ret; 422 423 mutex_lock(&client->lock); 424 if (!ion_handle_validate(client, handle)) { 425 mutex_unlock(&client->lock); 426 return -EINVAL; 427 } 428 429 buffer = handle->buffer; 430 431 if (!buffer->heap->ops->phys) { 432 pr_err("%s: ion_phys is not implemented by this heap.\n", 433 __func__); 434 mutex_unlock(&client->lock); 435 return -ENODEV; 436 } 437 mutex_unlock(&client->lock); 438 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); 439 return ret; 440} 441 442static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 443{ 444 void *vaddr; 445 446 if (buffer->kmap_cnt) { 447 buffer->kmap_cnt++; 448 return buffer->vaddr; 449 } 450 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 451 if (IS_ERR_OR_NULL(vaddr)) 452 return vaddr; 453 buffer->vaddr = vaddr; 454 buffer->kmap_cnt++; 455 return vaddr; 456} 457 458static void *ion_handle_kmap_get(struct ion_handle *handle) 459{ 460 struct ion_buffer *buffer = handle->buffer; 461 void *vaddr; 462 463 if (handle->kmap_cnt) { 464 handle->kmap_cnt++; 465 return buffer->vaddr; 466 } 467 vaddr = ion_buffer_kmap_get(buffer); 468 if (IS_ERR_OR_NULL(vaddr)) 469 return vaddr; 470 handle->kmap_cnt++; 471 return vaddr; 472} 473 474static void ion_buffer_kmap_put(struct ion_buffer *buffer) 475{ 476 buffer->kmap_cnt--; 477 if (!buffer->kmap_cnt) { 478 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 479 buffer->vaddr = NULL; 480 } 481} 482 483static void ion_handle_kmap_put(struct ion_handle *handle) 484{ 485 struct ion_buffer *buffer = handle->buffer; 486 487 handle->kmap_cnt--; 488 if (!handle->kmap_cnt) 489 ion_buffer_kmap_put(buffer); 490} 491 492void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 493{ 494 struct ion_buffer *buffer; 495 void *vaddr; 496 497 mutex_lock(&client->lock); 498 if (!ion_handle_validate(client, handle)) { 499 pr_err("%s: invalid handle passed to map_kernel.\n", 500 __func__); 501 mutex_unlock(&client->lock); 502 return ERR_PTR(-EINVAL); 503 } 504 505 buffer = handle->buffer; 506 507 if (!handle->buffer->heap->ops->map_kernel) { 508 pr_err("%s: map_kernel is not implemented by this heap.\n", 509 __func__); 510 mutex_unlock(&client->lock); 511 return ERR_PTR(-ENODEV); 512 } 513 514 mutex_lock(&buffer->lock); 515 vaddr = ion_handle_kmap_get(handle); 516 mutex_unlock(&buffer->lock); 517 mutex_unlock(&client->lock); 518 return vaddr; 519} 520 521void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 522{ 523 struct ion_buffer *buffer; 524 525 mutex_lock(&client->lock); 526 buffer = handle->buffer; 527 mutex_lock(&buffer->lock); 528 ion_handle_kmap_put(handle); 529 mutex_unlock(&buffer->lock); 530 mutex_unlock(&client->lock); 531} 532 533static int ion_debug_client_show(struct seq_file *s, void *unused) 534{ 535 struct ion_client *client = s->private; 536 struct rb_node *n; 537 size_t sizes[ION_NUM_HEAPS] = {0}; 538 const char *names[ION_NUM_HEAPS] = {0}; 539 int i; 540 541 mutex_lock(&client->lock); 542 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 543 struct ion_handle *handle = rb_entry(n, struct ion_handle, 544 node); 545 enum ion_heap_type type = handle->buffer->heap->type; 546 547 if (!names[type]) 548 names[type] = handle->buffer->heap->name; 549 sizes[type] += handle->buffer->size; 550 } 551 mutex_unlock(&client->lock); 552 553 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 554 for (i = 0; i < ION_NUM_HEAPS; i++) { 555 if (!names[i]) 556 continue; 557 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); 558 } 559 return 0; 560} 561 562static int ion_debug_client_open(struct inode *inode, struct file *file) 563{ 564 return single_open(file, ion_debug_client_show, inode->i_private); 565} 566 567static const struct file_operations debug_client_fops = { 568 .open = ion_debug_client_open, 569 .read = seq_read, 570 .llseek = seq_lseek, 571 .release = single_release, 572}; 573 574struct ion_client *ion_client_create(struct ion_device *dev, 575 unsigned int heap_mask, 576 const char *name) 577{ 578 struct ion_client *client; 579 struct task_struct *task; 580 struct rb_node **p; 581 struct rb_node *parent = NULL; 582 struct ion_client *entry; 583 char debug_name[64]; 584 pid_t pid; 585 586 get_task_struct(current->group_leader); 587 task_lock(current->group_leader); 588 pid = task_pid_nr(current->group_leader); 589 /* don't bother to store task struct for kernel threads, 590 they can't be killed anyway */ 591 if (current->group_leader->flags & PF_KTHREAD) { 592 put_task_struct(current->group_leader); 593 task = NULL; 594 } else { 595 task = current->group_leader; 596 } 597 task_unlock(current->group_leader); 598 599 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 600 if (!client) { 601 if (task) 602 put_task_struct(current->group_leader); 603 return ERR_PTR(-ENOMEM); 604 } 605 606 client->dev = dev; 607 client->handles = RB_ROOT; 608 mutex_init(&client->lock); 609 client->name = name; 610 client->heap_mask = heap_mask; 611 client->task = task; 612 client->pid = pid; 613 614 mutex_lock(&dev->lock); 615 p = &dev->clients.rb_node; 616 while (*p) { 617 parent = *p; 618 entry = rb_entry(parent, struct ion_client, node); 619 620 if (client < entry) 621 p = &(*p)->rb_left; 622 else if (client > entry) 623 p = &(*p)->rb_right; 624 } 625 rb_link_node(&client->node, parent, p); 626 rb_insert_color(&client->node, &dev->clients); 627 628 snprintf(debug_name, 64, "%u", client->pid); 629 client->debug_root = debugfs_create_file(debug_name, 0664, 630 dev->debug_root, client, 631 &debug_client_fops); 632 mutex_unlock(&dev->lock); 633 634 return client; 635} 636 637void ion_client_destroy(struct ion_client *client) 638{ 639 struct ion_device *dev = client->dev; 640 struct rb_node *n; 641 642 pr_debug("%s: %d\n", __func__, __LINE__); 643 while ((n = rb_first(&client->handles))) { 644 struct ion_handle *handle = rb_entry(n, struct ion_handle, 645 node); 646 ion_handle_destroy(&handle->ref); 647 } 648 mutex_lock(&dev->lock); 649 if (client->task) 650 put_task_struct(client->task); 651 rb_erase(&client->node, &dev->clients); 652 debugfs_remove_recursive(client->debug_root); 653 mutex_unlock(&dev->lock); 654 655 kfree(client); 656} 657 658struct sg_table *ion_sg_table(struct ion_client *client, 659 struct ion_handle *handle) 660{ 661 struct ion_buffer *buffer; 662 struct sg_table *table; 663 664 mutex_lock(&client->lock); 665 if (!ion_handle_validate(client, handle)) { 666 pr_err("%s: invalid handle passed to map_dma.\n", 667 __func__); 668 mutex_unlock(&client->lock); 669 return ERR_PTR(-EINVAL); 670 } 671 buffer = handle->buffer; 672 table = buffer->sg_table; 673 mutex_unlock(&client->lock); 674 return table; 675} 676 677static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 678 struct device *dev, 679 enum dma_data_direction direction); 680 681static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 682 enum dma_data_direction direction) 683{ 684 struct dma_buf *dmabuf = attachment->dmabuf; 685 struct ion_buffer *buffer = dmabuf->priv; 686 687 ion_buffer_sync_for_device(buffer, attachment->dev, direction); 688 return buffer->sg_table; 689} 690 691static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, 692 struct sg_table *table, 693 enum dma_data_direction direction) 694{ 695} 696 697static int ion_buffer_alloc_dirty(struct ion_buffer *buffer) 698{ 699 unsigned long pages = buffer->sg_table->nents; 700 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG; 701 702 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL); 703 if (!buffer->dirty) 704 return -ENOMEM; 705 return 0; 706} 707 708struct ion_vma_list { 709 struct list_head list; 710 struct vm_area_struct *vma; 711}; 712 713static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 714 struct device *dev, 715 enum dma_data_direction dir) 716{ 717 struct scatterlist *sg; 718 int i; 719 struct ion_vma_list *vma_list; 720 721 pr_debug("%s: syncing for device %s\n", __func__, 722 dev ? dev_name(dev) : "null"); 723 724 if (!(buffer->flags & ION_FLAG_CACHED)) 725 return; 726 727 mutex_lock(&buffer->lock); 728 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { 729 if (!test_bit(i, buffer->dirty)) 730 continue; 731 dma_sync_sg_for_device(dev, sg, 1, dir); 732 clear_bit(i, buffer->dirty); 733 } 734 list_for_each_entry(vma_list, &buffer->vmas, list) { 735 struct vm_area_struct *vma = vma_list->vma; 736 737 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, 738 NULL); 739 } 740 mutex_unlock(&buffer->lock); 741} 742 743int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 744{ 745 struct ion_buffer *buffer = vma->vm_private_data; 746 struct scatterlist *sg; 747 int i; 748 749 mutex_lock(&buffer->lock); 750 set_bit(vmf->pgoff, buffer->dirty); 751 752 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { 753 if (i != vmf->pgoff) 754 continue; 755 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL); 756 vm_insert_page(vma, (unsigned long)vmf->virtual_address, 757 sg_page(sg)); 758 break; 759 } 760 mutex_unlock(&buffer->lock); 761 return VM_FAULT_NOPAGE; 762} 763 764static void ion_vm_open(struct vm_area_struct *vma) 765{ 766 struct ion_buffer *buffer = vma->vm_private_data; 767 struct ion_vma_list *vma_list; 768 769 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); 770 if (!vma_list) 771 return; 772 vma_list->vma = vma; 773 mutex_lock(&buffer->lock); 774 list_add(&vma_list->list, &buffer->vmas); 775 mutex_unlock(&buffer->lock); 776 pr_debug("%s: adding %p\n", __func__, vma); 777} 778 779static void ion_vm_close(struct vm_area_struct *vma) 780{ 781 struct ion_buffer *buffer = vma->vm_private_data; 782 struct ion_vma_list *vma_list, *tmp; 783 784 pr_debug("%s\n", __func__); 785 mutex_lock(&buffer->lock); 786 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { 787 if (vma_list->vma != vma) 788 continue; 789 list_del(&vma_list->list); 790 kfree(vma_list); 791 pr_debug("%s: deleting %p\n", __func__, vma); 792 break; 793 } 794 mutex_unlock(&buffer->lock); 795} 796 797struct vm_operations_struct ion_vma_ops = { 798 .open = ion_vm_open, 799 .close = ion_vm_close, 800 .fault = ion_vm_fault, 801}; 802 803static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 804{ 805 struct ion_buffer *buffer = dmabuf->priv; 806 int ret = 0; 807 808 if (!buffer->heap->ops->map_user) { 809 pr_err("%s: this heap does not define a method for mapping " 810 "to userspace\n", __func__); 811 return -EINVAL; 812 } 813 814 if (buffer->flags & ION_FLAG_CACHED) { 815 vma->vm_private_data = buffer; 816 vma->vm_ops = &ion_vma_ops; 817 ion_vm_open(vma); 818 } else { 819 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 820 mutex_lock(&buffer->lock); 821 /* now map it to userspace */ 822 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 823 mutex_unlock(&buffer->lock); 824 } 825 826 if (ret) 827 pr_err("%s: failure mapping buffer to userspace\n", 828 __func__); 829 830 return ret; 831} 832 833static void ion_dma_buf_release(struct dma_buf *dmabuf) 834{ 835 struct ion_buffer *buffer = dmabuf->priv; 836 ion_buffer_put(buffer); 837} 838 839static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 840{ 841 struct ion_buffer *buffer = dmabuf->priv; 842 return buffer->vaddr + offset; 843} 844 845static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, 846 void *ptr) 847{ 848 return; 849} 850 851static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, 852 size_t len, 853 enum dma_data_direction direction) 854{ 855 struct ion_buffer *buffer = dmabuf->priv; 856 void *vaddr; 857 858 if (!buffer->heap->ops->map_kernel) { 859 pr_err("%s: map kernel is not implemented by this heap.\n", 860 __func__); 861 return -ENODEV; 862 } 863 864 mutex_lock(&buffer->lock); 865 vaddr = ion_buffer_kmap_get(buffer); 866 mutex_unlock(&buffer->lock); 867 if (IS_ERR(vaddr)) 868 return PTR_ERR(vaddr); 869 if (!vaddr) 870 return -ENOMEM; 871 return 0; 872} 873 874static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, 875 size_t len, 876 enum dma_data_direction direction) 877{ 878 struct ion_buffer *buffer = dmabuf->priv; 879 880 mutex_lock(&buffer->lock); 881 ion_buffer_kmap_put(buffer); 882 mutex_unlock(&buffer->lock); 883} 884 885struct dma_buf_ops dma_buf_ops = { 886 .map_dma_buf = ion_map_dma_buf, 887 .unmap_dma_buf = ion_unmap_dma_buf, 888 .mmap = ion_mmap, 889 .release = ion_dma_buf_release, 890 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 891 .end_cpu_access = ion_dma_buf_end_cpu_access, 892 .kmap_atomic = ion_dma_buf_kmap, 893 .kunmap_atomic = ion_dma_buf_kunmap, 894 .kmap = ion_dma_buf_kmap, 895 .kunmap = ion_dma_buf_kunmap, 896}; 897 898int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) 899{ 900 struct ion_buffer *buffer; 901 struct dma_buf *dmabuf; 902 bool valid_handle; 903 int fd; 904 905 mutex_lock(&client->lock); 906 valid_handle = ion_handle_validate(client, handle); 907 mutex_unlock(&client->lock); 908 if (!valid_handle) { 909 WARN(1, "%s: invalid handle passed to share.\n", __func__); 910 return -EINVAL; 911 } 912 913 buffer = handle->buffer; 914 ion_buffer_get(buffer); 915 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); 916 if (IS_ERR(dmabuf)) { 917 ion_buffer_put(buffer); 918 return PTR_ERR(dmabuf); 919 } 920 fd = dma_buf_fd(dmabuf, O_CLOEXEC); 921 if (fd < 0) { 922 dma_buf_put(dmabuf); 923 ion_buffer_put(buffer); 924 } 925 return fd; 926} 927 928struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) 929{ 930 struct dma_buf *dmabuf; 931 struct ion_buffer *buffer; 932 struct ion_handle *handle; 933 934 dmabuf = dma_buf_get(fd); 935 if (IS_ERR_OR_NULL(dmabuf)) 936 return ERR_PTR(PTR_ERR(dmabuf)); 937 /* if this memory came from ion */ 938 939 if (dmabuf->ops != &dma_buf_ops) { 940 pr_err("%s: can not import dmabuf from another exporter\n", 941 __func__); 942 dma_buf_put(dmabuf); 943 return ERR_PTR(-EINVAL); 944 } 945 buffer = dmabuf->priv; 946 947 mutex_lock(&client->lock); 948 /* if a handle exists for this buffer just take a reference to it */ 949 handle = ion_handle_lookup(client, buffer); 950 if (!IS_ERR_OR_NULL(handle)) { 951 ion_handle_get(handle); 952 goto end; 953 } 954 handle = ion_handle_create(client, buffer); 955 if (IS_ERR_OR_NULL(handle)) 956 goto end; 957 ion_handle_add(client, handle); 958end: 959 mutex_unlock(&client->lock); 960 dma_buf_put(dmabuf); 961 return handle; 962} 963 964static int ion_sync_for_device(struct ion_client *client, int fd) 965{ 966 struct dma_buf *dmabuf; 967 struct ion_buffer *buffer; 968 969 dmabuf = dma_buf_get(fd); 970 if (IS_ERR_OR_NULL(dmabuf)) 971 return PTR_ERR(dmabuf); 972 973 /* if this memory came from ion */ 974 if (dmabuf->ops != &dma_buf_ops) { 975 pr_err("%s: can not sync dmabuf from another exporter\n", 976 __func__); 977 dma_buf_put(dmabuf); 978 return -EINVAL; 979 } 980 buffer = dmabuf->priv; 981 ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL); 982 dma_buf_put(dmabuf); 983 return 0; 984} 985 986static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 987{ 988 struct ion_client *client = filp->private_data; 989 990 switch (cmd) { 991 case ION_IOC_ALLOC: 992 { 993 struct ion_allocation_data data; 994 995 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 996 return -EFAULT; 997 data.handle = ion_alloc(client, data.len, data.align, 998 data.heap_mask, data.flags); 999 1000 if (IS_ERR(data.handle)) 1001 return PTR_ERR(data.handle); 1002 1003 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 1004 ion_free(client, data.handle); 1005 return -EFAULT; 1006 } 1007 break; 1008 } 1009 case ION_IOC_FREE: 1010 { 1011 struct ion_handle_data data; 1012 bool valid; 1013 1014 if (copy_from_user(&data, (void __user *)arg, 1015 sizeof(struct ion_handle_data))) 1016 return -EFAULT; 1017 mutex_lock(&client->lock); 1018 valid = ion_handle_validate(client, data.handle); 1019 mutex_unlock(&client->lock); 1020 if (!valid) 1021 return -EINVAL; 1022 ion_free(client, data.handle); 1023 break; 1024 } 1025 case ION_IOC_SHARE: 1026 { 1027 struct ion_fd_data data; 1028 1029 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 1030 return -EFAULT; 1031 data.fd = ion_share_dma_buf(client, data.handle); 1032 if (copy_to_user((void __user *)arg, &data, sizeof(data))) 1033 return -EFAULT; 1034 break; 1035 } 1036 case ION_IOC_IMPORT: 1037 { 1038 struct ion_fd_data data; 1039 if (copy_from_user(&data, (void __user *)arg, 1040 sizeof(struct ion_fd_data))) 1041 return -EFAULT; 1042 data.handle = ion_import_dma_buf(client, data.fd); 1043 if (IS_ERR(data.handle)) 1044 data.handle = NULL; 1045 if (copy_to_user((void __user *)arg, &data, 1046 sizeof(struct ion_fd_data))) 1047 return -EFAULT; 1048 break; 1049 } 1050 case ION_IOC_SYNC: 1051 { 1052 struct ion_fd_data data; 1053 if (copy_from_user(&data, (void __user *)arg, 1054 sizeof(struct ion_fd_data))) 1055 return -EFAULT; 1056 ion_sync_for_device(client, data.fd); 1057 break; 1058 } 1059 case ION_IOC_CUSTOM: 1060 { 1061 struct ion_device *dev = client->dev; 1062 struct ion_custom_data data; 1063 1064 if (!dev->custom_ioctl) 1065 return -ENOTTY; 1066 if (copy_from_user(&data, (void __user *)arg, 1067 sizeof(struct ion_custom_data))) 1068 return -EFAULT; 1069 return dev->custom_ioctl(client, data.cmd, data.arg); 1070 } 1071 default: 1072 return -ENOTTY; 1073 } 1074 return 0; 1075} 1076 1077static int ion_release(struct inode *inode, struct file *file) 1078{ 1079 struct ion_client *client = file->private_data; 1080 1081 pr_debug("%s: %d\n", __func__, __LINE__); 1082 ion_client_destroy(client); 1083 return 0; 1084} 1085 1086static int ion_open(struct inode *inode, struct file *file) 1087{ 1088 struct miscdevice *miscdev = file->private_data; 1089 struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 1090 struct ion_client *client; 1091 1092 pr_debug("%s: %d\n", __func__, __LINE__); 1093 client = ion_client_create(dev, -1, "user"); 1094 if (IS_ERR_OR_NULL(client)) 1095 return PTR_ERR(client); 1096 file->private_data = client; 1097 1098 return 0; 1099} 1100 1101static const struct file_operations ion_fops = { 1102 .owner = THIS_MODULE, 1103 .open = ion_open, 1104 .release = ion_release, 1105 .unlocked_ioctl = ion_ioctl, 1106}; 1107 1108static size_t ion_debug_heap_total(struct ion_client *client, 1109 enum ion_heap_type type) 1110{ 1111 size_t size = 0; 1112 struct rb_node *n; 1113 1114 mutex_lock(&client->lock); 1115 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 1116 struct ion_handle *handle = rb_entry(n, 1117 struct ion_handle, 1118 node); 1119 if (handle->buffer->heap->type == type) 1120 size += handle->buffer->size; 1121 } 1122 mutex_unlock(&client->lock); 1123 return size; 1124} 1125 1126static int ion_debug_heap_show(struct seq_file *s, void *unused) 1127{ 1128 struct ion_heap *heap = s->private; 1129 struct ion_device *dev = heap->dev; 1130 struct rb_node *n; 1131 1132 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); 1133 1134 for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 1135 struct ion_client *client = rb_entry(n, struct ion_client, 1136 node); 1137 size_t size = ion_debug_heap_total(client, heap->type); 1138 if (!size) 1139 continue; 1140 if (client->task) { 1141 char task_comm[TASK_COMM_LEN]; 1142 1143 get_task_comm(task_comm, client->task); 1144 seq_printf(s, "%16.s %16u %16u\n", task_comm, 1145 client->pid, size); 1146 } else { 1147 seq_printf(s, "%16.s %16u %16u\n", client->name, 1148 client->pid, size); 1149 } 1150 } 1151 return 0; 1152} 1153 1154static int ion_debug_heap_open(struct inode *inode, struct file *file) 1155{ 1156 return single_open(file, ion_debug_heap_show, inode->i_private); 1157} 1158 1159static const struct file_operations debug_heap_fops = { 1160 .open = ion_debug_heap_open, 1161 .read = seq_read, 1162 .llseek = seq_lseek, 1163 .release = single_release, 1164}; 1165 1166void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 1167{ 1168 struct rb_node **p = &dev->heaps.rb_node; 1169 struct rb_node *parent = NULL; 1170 struct ion_heap *entry; 1171 1172 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || 1173 !heap->ops->unmap_dma) 1174 pr_err("%s: can not add heap with invalid ops struct.\n", 1175 __func__); 1176 1177 heap->dev = dev; 1178 mutex_lock(&dev->lock); 1179 while (*p) { 1180 parent = *p; 1181 entry = rb_entry(parent, struct ion_heap, node); 1182 1183 if (heap->id < entry->id) { 1184 p = &(*p)->rb_left; 1185 } else if (heap->id > entry->id ) { 1186 p = &(*p)->rb_right; 1187 } else { 1188 pr_err("%s: can not insert multiple heaps with " 1189 "id %d\n", __func__, heap->id); 1190 goto end; 1191 } 1192 } 1193 1194 rb_link_node(&heap->node, parent, p); 1195 rb_insert_color(&heap->node, &dev->heaps); 1196 debugfs_create_file(heap->name, 0664, dev->debug_root, heap, 1197 &debug_heap_fops); 1198end: 1199 mutex_unlock(&dev->lock); 1200} 1201 1202struct ion_device *ion_device_create(long (*custom_ioctl) 1203 (struct ion_client *client, 1204 unsigned int cmd, 1205 unsigned long arg)) 1206{ 1207 struct ion_device *idev; 1208 int ret; 1209 1210 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1211 if (!idev) 1212 return ERR_PTR(-ENOMEM); 1213 1214 idev->dev.minor = MISC_DYNAMIC_MINOR; 1215 idev->dev.name = "ion"; 1216 idev->dev.fops = &ion_fops; 1217 idev->dev.parent = NULL; 1218 ret = misc_register(&idev->dev); 1219 if (ret) { 1220 pr_err("ion: failed to register misc device.\n"); 1221 return ERR_PTR(ret); 1222 } 1223 1224 idev->debug_root = debugfs_create_dir("ion", NULL); 1225 if (IS_ERR_OR_NULL(idev->debug_root)) 1226 pr_err("ion: failed to create debug files.\n"); 1227 1228 idev->custom_ioctl = custom_ioctl; 1229 idev->buffers = RB_ROOT; 1230 mutex_init(&idev->lock); 1231 idev->heaps = RB_ROOT; 1232 idev->clients = RB_ROOT; 1233 return idev; 1234} 1235 1236void ion_device_destroy(struct ion_device *dev) 1237{ 1238 misc_deregister(&dev->dev); 1239 /* XXX need to free the heaps and clients ? */ 1240 kfree(dev); 1241} 1242 1243void __init ion_reserve(struct ion_platform_data *data) 1244{ 1245 int i, ret; 1246 1247 for (i = 0; i < data->nr; i++) { 1248 if (data->heaps[i].size == 0) 1249 continue; 1250 ret = memblock_reserve(data->heaps[i].base, 1251 data->heaps[i].size); 1252 if (ret) 1253 pr_err("memblock reserve of %x@%lx failed\n", 1254 data->heaps[i].size, 1255 data->heaps[i].base); 1256 } 1257} 1258