ion.c revision 12edf53db310345693fb7227c50e70a9b1fce434
1/* 2 * drivers/staging/android/ion/ion.c 3 * 4 * Copyright (C) 2011 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/device.h> 18#include <linux/file.h> 19#include <linux/fs.h> 20#include <linux/anon_inodes.h> 21#include <linux/list.h> 22#include <linux/memblock.h> 23#include <linux/miscdevice.h> 24#include <linux/export.h> 25#include <linux/mm.h> 26#include <linux/mm_types.h> 27#include <linux/rbtree.h> 28#include <linux/sched.h> 29#include <linux/slab.h> 30#include <linux/seq_file.h> 31#include <linux/uaccess.h> 32#include <linux/debugfs.h> 33#include <linux/dma-buf.h> 34 35#include "ion.h" 36#include "ion_priv.h" 37 38/** 39 * struct ion_device - the metadata of the ion device node 40 * @dev: the actual misc device 41 * @buffers: an rb tree of all the existing buffers 42 * @lock: lock protecting the buffers & heaps trees 43 * @heaps: list of all the heaps in the system 44 * @user_clients: list of all the clients created from userspace 45 */ 46struct ion_device { 47 struct miscdevice dev; 48 struct rb_root buffers; 49 struct mutex lock; 50 struct rb_root heaps; 51 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, 52 unsigned long arg); 53 struct rb_root clients; 54 struct dentry *debug_root; 55}; 56 57/** 58 * struct ion_client - a process/hw block local address space 59 * @node: node in the tree of all clients 60 * @dev: backpointer to ion device 61 * @handles: an rb tree of all the handles in this client 62 * @lock: lock protecting the tree of handles 63 * @heap_mask: mask of all supported heaps 64 * @name: used for debugging 65 * @task: used for debugging 66 * 67 * A client represents a list of buffers this client may access. 68 * The mutex stored here is used to protect both handles tree 69 * as well as the handles themselves, and should be held while modifying either. 70 */ 71struct ion_client { 72 struct rb_node node; 73 struct ion_device *dev; 74 struct rb_root handles; 75 struct mutex lock; 76 unsigned int heap_mask; 77 const char *name; 78 struct task_struct *task; 79 pid_t pid; 80 struct dentry *debug_root; 81}; 82 83/** 84 * ion_handle - a client local reference to a buffer 85 * @ref: reference count 86 * @client: back pointer to the client the buffer resides in 87 * @buffer: pointer to the buffer 88 * @node: node in the client's handle rbtree 89 * @kmap_cnt: count of times this client has mapped to kernel 90 * @dmap_cnt: count of times this client has mapped for dma 91 * 92 * Modifications to node, map_cnt or mapping should be protected by the 93 * lock in the client. Other fields are never changed after initialization. 94 */ 95struct ion_handle { 96 struct kref ref; 97 struct ion_client *client; 98 struct ion_buffer *buffer; 99 struct rb_node node; 100 unsigned int kmap_cnt; 101}; 102 103/* this function should only be called while dev->lock is held */ 104static void ion_buffer_add(struct ion_device *dev, 105 struct ion_buffer *buffer) 106{ 107 struct rb_node **p = &dev->buffers.rb_node; 108 struct rb_node *parent = NULL; 109 struct ion_buffer *entry; 110 111 while (*p) { 112 parent = *p; 113 entry = rb_entry(parent, struct ion_buffer, node); 114 115 if (buffer < entry) { 116 p = &(*p)->rb_left; 117 } else if (buffer > entry) { 118 p = &(*p)->rb_right; 119 } else { 120 pr_err("%s: buffer already found.", __func__); 121 BUG(); 122 } 123 } 124 125 rb_link_node(&buffer->node, parent, p); 126 rb_insert_color(&buffer->node, &dev->buffers); 127} 128 129static int ion_buffer_alloc_dirty(struct ion_buffer *buffer); 130 131/* this function should only be called while dev->lock is held */ 132static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 133 struct ion_device *dev, 134 unsigned long len, 135 unsigned long align, 136 unsigned long flags) 137{ 138 struct ion_buffer *buffer; 139 struct sg_table *table; 140 struct scatterlist *sg; 141 int i, ret; 142 143 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 144 if (!buffer) 145 return ERR_PTR(-ENOMEM); 146 147 buffer->heap = heap; 148 kref_init(&buffer->ref); 149 150 ret = heap->ops->allocate(heap, buffer, len, align, flags); 151 if (ret) { 152 kfree(buffer); 153 return ERR_PTR(ret); 154 } 155 156 buffer->dev = dev; 157 buffer->size = len; 158 buffer->flags = flags; 159 160 table = heap->ops->map_dma(heap, buffer); 161 if (IS_ERR_OR_NULL(table)) { 162 heap->ops->free(buffer); 163 kfree(buffer); 164 return ERR_PTR(PTR_ERR(table)); 165 } 166 buffer->sg_table = table; 167 if (buffer->flags & ION_FLAG_CACHED) 168 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, 169 i) { 170 if (sg_dma_len(sg) == PAGE_SIZE) 171 continue; 172 pr_err("%s: cached mappings must have pagewise " 173 "sg_lists\n", __func__); 174 heap->ops->unmap_dma(heap, buffer); 175 kfree(buffer); 176 return ERR_PTR(-EINVAL); 177 } 178 179 ret = ion_buffer_alloc_dirty(buffer); 180 if (ret) { 181 heap->ops->unmap_dma(heap, buffer); 182 heap->ops->free(buffer); 183 kfree(buffer); 184 return ERR_PTR(ret); 185 } 186 187 buffer->dev = dev; 188 buffer->size = len; 189 INIT_LIST_HEAD(&buffer->vmas); 190 mutex_init(&buffer->lock); 191 /* this will set up dma addresses for the sglist -- it is not 192 technically correct as per the dma api -- a specific 193 device isn't really taking ownership here. However, in practice on 194 our systems the only dma_address space is physical addresses. 195 Additionally, we can't afford the overhead of invalidating every 196 allocation via dma_map_sg. The implicit contract here is that 197 memory comming from the heaps is ready for dma, ie if it has a 198 cached mapping that mapping has been invalidated */ 199 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) 200 sg_dma_address(sg) = sg_phys(sg); 201 ion_buffer_add(dev, buffer); 202 return buffer; 203} 204 205static void ion_buffer_destroy(struct kref *kref) 206{ 207 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 208 struct ion_device *dev = buffer->dev; 209 210 if (WARN_ON(buffer->kmap_cnt > 0)) 211 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 212 213 buffer->heap->ops->unmap_dma(buffer->heap, buffer); 214 buffer->heap->ops->free(buffer); 215 mutex_lock(&dev->lock); 216 rb_erase(&buffer->node, &dev->buffers); 217 mutex_unlock(&dev->lock); 218 kfree(buffer); 219} 220 221static void ion_buffer_get(struct ion_buffer *buffer) 222{ 223 kref_get(&buffer->ref); 224} 225 226static int ion_buffer_put(struct ion_buffer *buffer) 227{ 228 return kref_put(&buffer->ref, ion_buffer_destroy); 229} 230 231static struct ion_handle *ion_handle_create(struct ion_client *client, 232 struct ion_buffer *buffer) 233{ 234 struct ion_handle *handle; 235 236 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 237 if (!handle) 238 return ERR_PTR(-ENOMEM); 239 kref_init(&handle->ref); 240 RB_CLEAR_NODE(&handle->node); 241 handle->client = client; 242 ion_buffer_get(buffer); 243 handle->buffer = buffer; 244 245 return handle; 246} 247 248static void ion_handle_kmap_put(struct ion_handle *); 249 250static void ion_handle_destroy(struct kref *kref) 251{ 252 struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 253 struct ion_client *client = handle->client; 254 struct ion_buffer *buffer = handle->buffer; 255 256 mutex_lock(&client->lock); 257 258 mutex_lock(&buffer->lock); 259 while (handle->kmap_cnt) 260 ion_handle_kmap_put(handle); 261 mutex_unlock(&buffer->lock); 262 263 if (!RB_EMPTY_NODE(&handle->node)) 264 rb_erase(&handle->node, &client->handles); 265 mutex_unlock(&client->lock); 266 267 ion_buffer_put(buffer); 268 kfree(handle); 269} 270 271struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) 272{ 273 return handle->buffer; 274} 275 276static void ion_handle_get(struct ion_handle *handle) 277{ 278 kref_get(&handle->ref); 279} 280 281static int ion_handle_put(struct ion_handle *handle) 282{ 283 return kref_put(&handle->ref, ion_handle_destroy); 284} 285 286static struct ion_handle *ion_handle_lookup(struct ion_client *client, 287 struct ion_buffer *buffer) 288{ 289 struct rb_node *n; 290 291 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 292 struct ion_handle *handle = rb_entry(n, struct ion_handle, 293 node); 294 if (handle->buffer == buffer) 295 return handle; 296 } 297 return NULL; 298} 299 300static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) 301{ 302 struct rb_node *n = client->handles.rb_node; 303 304 while (n) { 305 struct ion_handle *handle_node = rb_entry(n, struct ion_handle, 306 node); 307 if (handle < handle_node) 308 n = n->rb_left; 309 else if (handle > handle_node) 310 n = n->rb_right; 311 else 312 return true; 313 } 314 return false; 315} 316 317static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) 318{ 319 struct rb_node **p = &client->handles.rb_node; 320 struct rb_node *parent = NULL; 321 struct ion_handle *entry; 322 323 while (*p) { 324 parent = *p; 325 entry = rb_entry(parent, struct ion_handle, node); 326 327 if (handle < entry) 328 p = &(*p)->rb_left; 329 else if (handle > entry) 330 p = &(*p)->rb_right; 331 else 332 WARN(1, "%s: buffer already found.", __func__); 333 } 334 335 rb_link_node(&handle->node, parent, p); 336 rb_insert_color(&handle->node, &client->handles); 337} 338 339struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 340 size_t align, unsigned int heap_mask, 341 unsigned int flags) 342{ 343 struct rb_node *n; 344 struct ion_handle *handle; 345 struct ion_device *dev = client->dev; 346 struct ion_buffer *buffer = NULL; 347 348 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len, 349 align, heap_mask, flags); 350 /* 351 * traverse the list of heaps available in this system in priority 352 * order. If the heap type is supported by the client, and matches the 353 * request of the caller allocate from it. Repeat until allocate has 354 * succeeded or all heaps have been tried 355 */ 356 if (WARN_ON(!len)) 357 return ERR_PTR(-EINVAL); 358 359 len = PAGE_ALIGN(len); 360 361 mutex_lock(&dev->lock); 362 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { 363 struct ion_heap *heap = rb_entry(n, struct ion_heap, node); 364 /* if the client doesn't support this heap type */ 365 if (!((1 << heap->type) & client->heap_mask)) 366 continue; 367 /* if the caller didn't specify this heap type */ 368 if (!((1 << heap->id) & heap_mask)) 369 continue; 370 buffer = ion_buffer_create(heap, dev, len, align, flags); 371 if (!IS_ERR_OR_NULL(buffer)) 372 break; 373 } 374 mutex_unlock(&dev->lock); 375 376 if (buffer == NULL) 377 return ERR_PTR(-ENODEV); 378 379 if (IS_ERR(buffer)) 380 return ERR_PTR(PTR_ERR(buffer)); 381 382 handle = ion_handle_create(client, buffer); 383 384 /* 385 * ion_buffer_create will create a buffer with a ref_cnt of 1, 386 * and ion_handle_create will take a second reference, drop one here 387 */ 388 ion_buffer_put(buffer); 389 390 if (!IS_ERR(handle)) { 391 mutex_lock(&client->lock); 392 ion_handle_add(client, handle); 393 mutex_unlock(&client->lock); 394 } 395 396 397 return handle; 398} 399EXPORT_SYMBOL(ion_alloc); 400 401void ion_free(struct ion_client *client, struct ion_handle *handle) 402{ 403 bool valid_handle; 404 405 BUG_ON(client != handle->client); 406 407 mutex_lock(&client->lock); 408 valid_handle = ion_handle_validate(client, handle); 409 mutex_unlock(&client->lock); 410 411 if (!valid_handle) { 412 WARN(1, "%s: invalid handle passed to free.\n", __func__); 413 return; 414 } 415 ion_handle_put(handle); 416} 417EXPORT_SYMBOL(ion_free); 418 419int ion_phys(struct ion_client *client, struct ion_handle *handle, 420 ion_phys_addr_t *addr, size_t *len) 421{ 422 struct ion_buffer *buffer; 423 int ret; 424 425 mutex_lock(&client->lock); 426 if (!ion_handle_validate(client, handle)) { 427 mutex_unlock(&client->lock); 428 return -EINVAL; 429 } 430 431 buffer = handle->buffer; 432 433 if (!buffer->heap->ops->phys) { 434 pr_err("%s: ion_phys is not implemented by this heap.\n", 435 __func__); 436 mutex_unlock(&client->lock); 437 return -ENODEV; 438 } 439 mutex_unlock(&client->lock); 440 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); 441 return ret; 442} 443EXPORT_SYMBOL(ion_phys); 444 445static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 446{ 447 void *vaddr; 448 449 if (buffer->kmap_cnt) { 450 buffer->kmap_cnt++; 451 return buffer->vaddr; 452 } 453 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 454 if (IS_ERR_OR_NULL(vaddr)) 455 return vaddr; 456 buffer->vaddr = vaddr; 457 buffer->kmap_cnt++; 458 return vaddr; 459} 460 461static void *ion_handle_kmap_get(struct ion_handle *handle) 462{ 463 struct ion_buffer *buffer = handle->buffer; 464 void *vaddr; 465 466 if (handle->kmap_cnt) { 467 handle->kmap_cnt++; 468 return buffer->vaddr; 469 } 470 vaddr = ion_buffer_kmap_get(buffer); 471 if (IS_ERR_OR_NULL(vaddr)) 472 return vaddr; 473 handle->kmap_cnt++; 474 return vaddr; 475} 476 477static void ion_buffer_kmap_put(struct ion_buffer *buffer) 478{ 479 buffer->kmap_cnt--; 480 if (!buffer->kmap_cnt) { 481 buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 482 buffer->vaddr = NULL; 483 } 484} 485 486static void ion_handle_kmap_put(struct ion_handle *handle) 487{ 488 struct ion_buffer *buffer = handle->buffer; 489 490 handle->kmap_cnt--; 491 if (!handle->kmap_cnt) 492 ion_buffer_kmap_put(buffer); 493} 494 495void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 496{ 497 struct ion_buffer *buffer; 498 void *vaddr; 499 500 mutex_lock(&client->lock); 501 if (!ion_handle_validate(client, handle)) { 502 pr_err("%s: invalid handle passed to map_kernel.\n", 503 __func__); 504 mutex_unlock(&client->lock); 505 return ERR_PTR(-EINVAL); 506 } 507 508 buffer = handle->buffer; 509 510 if (!handle->buffer->heap->ops->map_kernel) { 511 pr_err("%s: map_kernel is not implemented by this heap.\n", 512 __func__); 513 mutex_unlock(&client->lock); 514 return ERR_PTR(-ENODEV); 515 } 516 517 mutex_lock(&buffer->lock); 518 vaddr = ion_handle_kmap_get(handle); 519 mutex_unlock(&buffer->lock); 520 mutex_unlock(&client->lock); 521 return vaddr; 522} 523EXPORT_SYMBOL(ion_map_kernel); 524 525void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 526{ 527 struct ion_buffer *buffer; 528 529 mutex_lock(&client->lock); 530 buffer = handle->buffer; 531 mutex_lock(&buffer->lock); 532 ion_handle_kmap_put(handle); 533 mutex_unlock(&buffer->lock); 534 mutex_unlock(&client->lock); 535} 536EXPORT_SYMBOL(ion_unmap_kernel); 537 538static int ion_debug_client_show(struct seq_file *s, void *unused) 539{ 540 struct ion_client *client = s->private; 541 struct rb_node *n; 542 size_t sizes[ION_NUM_HEAPS] = {0}; 543 const char *names[ION_NUM_HEAPS] = {0}; 544 int i; 545 546 mutex_lock(&client->lock); 547 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 548 struct ion_handle *handle = rb_entry(n, struct ion_handle, 549 node); 550 enum ion_heap_type type = handle->buffer->heap->type; 551 552 if (!names[type]) 553 names[type] = handle->buffer->heap->name; 554 sizes[type] += handle->buffer->size; 555 } 556 mutex_unlock(&client->lock); 557 558 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 559 for (i = 0; i < ION_NUM_HEAPS; i++) { 560 if (!names[i]) 561 continue; 562 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); 563 } 564 return 0; 565} 566 567static int ion_debug_client_open(struct inode *inode, struct file *file) 568{ 569 return single_open(file, ion_debug_client_show, inode->i_private); 570} 571 572static const struct file_operations debug_client_fops = { 573 .open = ion_debug_client_open, 574 .read = seq_read, 575 .llseek = seq_lseek, 576 .release = single_release, 577}; 578 579struct ion_client *ion_client_create(struct ion_device *dev, 580 unsigned int heap_mask, 581 const char *name) 582{ 583 struct ion_client *client; 584 struct task_struct *task; 585 struct rb_node **p; 586 struct rb_node *parent = NULL; 587 struct ion_client *entry; 588 char debug_name[64]; 589 pid_t pid; 590 591 get_task_struct(current->group_leader); 592 task_lock(current->group_leader); 593 pid = task_pid_nr(current->group_leader); 594 /* don't bother to store task struct for kernel threads, 595 they can't be killed anyway */ 596 if (current->group_leader->flags & PF_KTHREAD) { 597 put_task_struct(current->group_leader); 598 task = NULL; 599 } else { 600 task = current->group_leader; 601 } 602 task_unlock(current->group_leader); 603 604 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 605 if (!client) { 606 if (task) 607 put_task_struct(current->group_leader); 608 return ERR_PTR(-ENOMEM); 609 } 610 611 client->dev = dev; 612 client->handles = RB_ROOT; 613 mutex_init(&client->lock); 614 client->name = name; 615 client->heap_mask = heap_mask; 616 client->task = task; 617 client->pid = pid; 618 619 mutex_lock(&dev->lock); 620 p = &dev->clients.rb_node; 621 while (*p) { 622 parent = *p; 623 entry = rb_entry(parent, struct ion_client, node); 624 625 if (client < entry) 626 p = &(*p)->rb_left; 627 else if (client > entry) 628 p = &(*p)->rb_right; 629 } 630 rb_link_node(&client->node, parent, p); 631 rb_insert_color(&client->node, &dev->clients); 632 633 snprintf(debug_name, 64, "%u", client->pid); 634 client->debug_root = debugfs_create_file(debug_name, 0664, 635 dev->debug_root, client, 636 &debug_client_fops); 637 mutex_unlock(&dev->lock); 638 639 return client; 640} 641 642void ion_client_destroy(struct ion_client *client) 643{ 644 struct ion_device *dev = client->dev; 645 struct rb_node *n; 646 647 pr_debug("%s: %d\n", __func__, __LINE__); 648 while ((n = rb_first(&client->handles))) { 649 struct ion_handle *handle = rb_entry(n, struct ion_handle, 650 node); 651 ion_handle_destroy(&handle->ref); 652 } 653 mutex_lock(&dev->lock); 654 if (client->task) 655 put_task_struct(client->task); 656 rb_erase(&client->node, &dev->clients); 657 debugfs_remove_recursive(client->debug_root); 658 mutex_unlock(&dev->lock); 659 660 kfree(client); 661} 662EXPORT_SYMBOL(ion_client_destroy); 663 664struct sg_table *ion_sg_table(struct ion_client *client, 665 struct ion_handle *handle) 666{ 667 struct ion_buffer *buffer; 668 struct sg_table *table; 669 670 mutex_lock(&client->lock); 671 if (!ion_handle_validate(client, handle)) { 672 pr_err("%s: invalid handle passed to map_dma.\n", 673 __func__); 674 mutex_unlock(&client->lock); 675 return ERR_PTR(-EINVAL); 676 } 677 buffer = handle->buffer; 678 table = buffer->sg_table; 679 mutex_unlock(&client->lock); 680 return table; 681} 682EXPORT_SYMBOL(ion_sg_table); 683 684static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 685 struct device *dev, 686 enum dma_data_direction direction); 687 688static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, 689 enum dma_data_direction direction) 690{ 691 struct dma_buf *dmabuf = attachment->dmabuf; 692 struct ion_buffer *buffer = dmabuf->priv; 693 694 ion_buffer_sync_for_device(buffer, attachment->dev, direction); 695 return buffer->sg_table; 696} 697 698static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, 699 struct sg_table *table, 700 enum dma_data_direction direction) 701{ 702} 703 704static int ion_buffer_alloc_dirty(struct ion_buffer *buffer) 705{ 706 unsigned long pages = buffer->sg_table->nents; 707 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG; 708 709 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL); 710 if (!buffer->dirty) 711 return -ENOMEM; 712 return 0; 713} 714 715struct ion_vma_list { 716 struct list_head list; 717 struct vm_area_struct *vma; 718}; 719 720static void ion_buffer_sync_for_device(struct ion_buffer *buffer, 721 struct device *dev, 722 enum dma_data_direction dir) 723{ 724 struct scatterlist *sg; 725 int i; 726 struct ion_vma_list *vma_list; 727 728 pr_debug("%s: syncing for device %s\n", __func__, 729 dev ? dev_name(dev) : "null"); 730 731 if (!(buffer->flags & ION_FLAG_CACHED)) 732 return; 733 734 mutex_lock(&buffer->lock); 735 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { 736 if (!test_bit(i, buffer->dirty)) 737 continue; 738 dma_sync_sg_for_device(dev, sg, 1, dir); 739 clear_bit(i, buffer->dirty); 740 } 741 list_for_each_entry(vma_list, &buffer->vmas, list) { 742 struct vm_area_struct *vma = vma_list->vma; 743 744 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, 745 NULL); 746 } 747 mutex_unlock(&buffer->lock); 748} 749 750int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 751{ 752 struct ion_buffer *buffer = vma->vm_private_data; 753 struct scatterlist *sg; 754 int i; 755 756 mutex_lock(&buffer->lock); 757 set_bit(vmf->pgoff, buffer->dirty); 758 759 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { 760 if (i != vmf->pgoff) 761 continue; 762 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL); 763 vm_insert_page(vma, (unsigned long)vmf->virtual_address, 764 sg_page(sg)); 765 break; 766 } 767 mutex_unlock(&buffer->lock); 768 return VM_FAULT_NOPAGE; 769} 770 771static void ion_vm_open(struct vm_area_struct *vma) 772{ 773 struct ion_buffer *buffer = vma->vm_private_data; 774 struct ion_vma_list *vma_list; 775 776 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); 777 if (!vma_list) 778 return; 779 vma_list->vma = vma; 780 mutex_lock(&buffer->lock); 781 list_add(&vma_list->list, &buffer->vmas); 782 mutex_unlock(&buffer->lock); 783 pr_debug("%s: adding %p\n", __func__, vma); 784} 785 786static void ion_vm_close(struct vm_area_struct *vma) 787{ 788 struct ion_buffer *buffer = vma->vm_private_data; 789 struct ion_vma_list *vma_list, *tmp; 790 791 pr_debug("%s\n", __func__); 792 mutex_lock(&buffer->lock); 793 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { 794 if (vma_list->vma != vma) 795 continue; 796 list_del(&vma_list->list); 797 kfree(vma_list); 798 pr_debug("%s: deleting %p\n", __func__, vma); 799 break; 800 } 801 mutex_unlock(&buffer->lock); 802} 803 804struct vm_operations_struct ion_vma_ops = { 805 .open = ion_vm_open, 806 .close = ion_vm_close, 807 .fault = ion_vm_fault, 808}; 809 810static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 811{ 812 struct ion_buffer *buffer = dmabuf->priv; 813 int ret = 0; 814 815 if (!buffer->heap->ops->map_user) { 816 pr_err("%s: this heap does not define a method for mapping " 817 "to userspace\n", __func__); 818 return -EINVAL; 819 } 820 821 if (buffer->flags & ION_FLAG_CACHED) { 822 vma->vm_private_data = buffer; 823 vma->vm_ops = &ion_vma_ops; 824 ion_vm_open(vma); 825 } else { 826 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 827 mutex_lock(&buffer->lock); 828 /* now map it to userspace */ 829 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 830 mutex_unlock(&buffer->lock); 831 } 832 833 if (ret) 834 pr_err("%s: failure mapping buffer to userspace\n", 835 __func__); 836 837 return ret; 838} 839 840static void ion_dma_buf_release(struct dma_buf *dmabuf) 841{ 842 struct ion_buffer *buffer = dmabuf->priv; 843 ion_buffer_put(buffer); 844} 845 846static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) 847{ 848 struct ion_buffer *buffer = dmabuf->priv; 849 return buffer->vaddr + offset * PAGE_SIZE; 850} 851 852static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, 853 void *ptr) 854{ 855 return; 856} 857 858static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, 859 size_t len, 860 enum dma_data_direction direction) 861{ 862 struct ion_buffer *buffer = dmabuf->priv; 863 void *vaddr; 864 865 if (!buffer->heap->ops->map_kernel) { 866 pr_err("%s: map kernel is not implemented by this heap.\n", 867 __func__); 868 return -ENODEV; 869 } 870 871 mutex_lock(&buffer->lock); 872 vaddr = ion_buffer_kmap_get(buffer); 873 mutex_unlock(&buffer->lock); 874 if (IS_ERR(vaddr)) 875 return PTR_ERR(vaddr); 876 if (!vaddr) 877 return -ENOMEM; 878 return 0; 879} 880 881static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, 882 size_t len, 883 enum dma_data_direction direction) 884{ 885 struct ion_buffer *buffer = dmabuf->priv; 886 887 mutex_lock(&buffer->lock); 888 ion_buffer_kmap_put(buffer); 889 mutex_unlock(&buffer->lock); 890} 891 892struct dma_buf_ops dma_buf_ops = { 893 .map_dma_buf = ion_map_dma_buf, 894 .unmap_dma_buf = ion_unmap_dma_buf, 895 .mmap = ion_mmap, 896 .release = ion_dma_buf_release, 897 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 898 .end_cpu_access = ion_dma_buf_end_cpu_access, 899 .kmap_atomic = ion_dma_buf_kmap, 900 .kunmap_atomic = ion_dma_buf_kunmap, 901 .kmap = ion_dma_buf_kmap, 902 .kunmap = ion_dma_buf_kunmap, 903}; 904 905int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) 906{ 907 struct ion_buffer *buffer; 908 struct dma_buf *dmabuf; 909 bool valid_handle; 910 int fd; 911 912 mutex_lock(&client->lock); 913 valid_handle = ion_handle_validate(client, handle); 914 mutex_unlock(&client->lock); 915 if (!valid_handle) { 916 WARN(1, "%s: invalid handle passed to share.\n", __func__); 917 return -EINVAL; 918 } 919 920 buffer = handle->buffer; 921 ion_buffer_get(buffer); 922 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); 923 if (IS_ERR(dmabuf)) { 924 ion_buffer_put(buffer); 925 return PTR_ERR(dmabuf); 926 } 927 fd = dma_buf_fd(dmabuf, O_CLOEXEC); 928 if (fd < 0) 929 dma_buf_put(dmabuf); 930 931 return fd; 932} 933EXPORT_SYMBOL(ion_share_dma_buf); 934 935struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) 936{ 937 struct dma_buf *dmabuf; 938 struct ion_buffer *buffer; 939 struct ion_handle *handle; 940 941 dmabuf = dma_buf_get(fd); 942 if (IS_ERR_OR_NULL(dmabuf)) 943 return ERR_PTR(PTR_ERR(dmabuf)); 944 /* if this memory came from ion */ 945 946 if (dmabuf->ops != &dma_buf_ops) { 947 pr_err("%s: can not import dmabuf from another exporter\n", 948 __func__); 949 dma_buf_put(dmabuf); 950 return ERR_PTR(-EINVAL); 951 } 952 buffer = dmabuf->priv; 953 954 mutex_lock(&client->lock); 955 /* if a handle exists for this buffer just take a reference to it */ 956 handle = ion_handle_lookup(client, buffer); 957 if (!IS_ERR_OR_NULL(handle)) { 958 ion_handle_get(handle); 959 goto end; 960 } 961 handle = ion_handle_create(client, buffer); 962 if (IS_ERR_OR_NULL(handle)) 963 goto end; 964 ion_handle_add(client, handle); 965end: 966 mutex_unlock(&client->lock); 967 dma_buf_put(dmabuf); 968 return handle; 969} 970EXPORT_SYMBOL(ion_import_dma_buf); 971 972static int ion_sync_for_device(struct ion_client *client, int fd) 973{ 974 struct dma_buf *dmabuf; 975 struct ion_buffer *buffer; 976 977 dmabuf = dma_buf_get(fd); 978 if (IS_ERR_OR_NULL(dmabuf)) 979 return PTR_ERR(dmabuf); 980 981 /* if this memory came from ion */ 982 if (dmabuf->ops != &dma_buf_ops) { 983 pr_err("%s: can not sync dmabuf from another exporter\n", 984 __func__); 985 dma_buf_put(dmabuf); 986 return -EINVAL; 987 } 988 buffer = dmabuf->priv; 989 ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL); 990 dma_buf_put(dmabuf); 991 return 0; 992} 993 994static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 995{ 996 struct ion_client *client = filp->private_data; 997 998 switch (cmd) { 999 case ION_IOC_ALLOC: 1000 { 1001 struct ion_allocation_data data; 1002 1003 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 1004 return -EFAULT; 1005 data.handle = ion_alloc(client, data.len, data.align, 1006 data.heap_mask, data.flags); 1007 1008 if (IS_ERR(data.handle)) 1009 return PTR_ERR(data.handle); 1010 1011 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 1012 ion_free(client, data.handle); 1013 return -EFAULT; 1014 } 1015 break; 1016 } 1017 case ION_IOC_FREE: 1018 { 1019 struct ion_handle_data data; 1020 bool valid; 1021 1022 if (copy_from_user(&data, (void __user *)arg, 1023 sizeof(struct ion_handle_data))) 1024 return -EFAULT; 1025 mutex_lock(&client->lock); 1026 valid = ion_handle_validate(client, data.handle); 1027 mutex_unlock(&client->lock); 1028 if (!valid) 1029 return -EINVAL; 1030 ion_free(client, data.handle); 1031 break; 1032 } 1033 case ION_IOC_SHARE: 1034 { 1035 struct ion_fd_data data; 1036 1037 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 1038 return -EFAULT; 1039 data.fd = ion_share_dma_buf(client, data.handle); 1040 if (copy_to_user((void __user *)arg, &data, sizeof(data))) 1041 return -EFAULT; 1042 if (data.fd < 0) 1043 return data.fd; 1044 break; 1045 } 1046 case ION_IOC_IMPORT: 1047 { 1048 struct ion_fd_data data; 1049 int ret = 0; 1050 if (copy_from_user(&data, (void __user *)arg, 1051 sizeof(struct ion_fd_data))) 1052 return -EFAULT; 1053 data.handle = ion_import_dma_buf(client, data.fd); 1054 if (IS_ERR(data.handle)) { 1055 ret = PTR_ERR(data.handle); 1056 data.handle = NULL; 1057 } 1058 if (copy_to_user((void __user *)arg, &data, 1059 sizeof(struct ion_fd_data))) 1060 return -EFAULT; 1061 if (ret < 0) 1062 return ret; 1063 break; 1064 } 1065 case ION_IOC_SYNC: 1066 { 1067 struct ion_fd_data data; 1068 if (copy_from_user(&data, (void __user *)arg, 1069 sizeof(struct ion_fd_data))) 1070 return -EFAULT; 1071 ion_sync_for_device(client, data.fd); 1072 break; 1073 } 1074 case ION_IOC_CUSTOM: 1075 { 1076 struct ion_device *dev = client->dev; 1077 struct ion_custom_data data; 1078 1079 if (!dev->custom_ioctl) 1080 return -ENOTTY; 1081 if (copy_from_user(&data, (void __user *)arg, 1082 sizeof(struct ion_custom_data))) 1083 return -EFAULT; 1084 return dev->custom_ioctl(client, data.cmd, data.arg); 1085 } 1086 default: 1087 return -ENOTTY; 1088 } 1089 return 0; 1090} 1091 1092static int ion_release(struct inode *inode, struct file *file) 1093{ 1094 struct ion_client *client = file->private_data; 1095 1096 pr_debug("%s: %d\n", __func__, __LINE__); 1097 ion_client_destroy(client); 1098 return 0; 1099} 1100 1101static int ion_open(struct inode *inode, struct file *file) 1102{ 1103 struct miscdevice *miscdev = file->private_data; 1104 struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 1105 struct ion_client *client; 1106 1107 pr_debug("%s: %d\n", __func__, __LINE__); 1108 client = ion_client_create(dev, -1, "user"); 1109 if (IS_ERR_OR_NULL(client)) 1110 return PTR_ERR(client); 1111 file->private_data = client; 1112 1113 return 0; 1114} 1115 1116static const struct file_operations ion_fops = { 1117 .owner = THIS_MODULE, 1118 .open = ion_open, 1119 .release = ion_release, 1120 .unlocked_ioctl = ion_ioctl, 1121}; 1122 1123static size_t ion_debug_heap_total(struct ion_client *client, 1124 enum ion_heap_type type) 1125{ 1126 size_t size = 0; 1127 struct rb_node *n; 1128 1129 mutex_lock(&client->lock); 1130 for (n = rb_first(&client->handles); n; n = rb_next(n)) { 1131 struct ion_handle *handle = rb_entry(n, 1132 struct ion_handle, 1133 node); 1134 if (handle->buffer->heap->type == type) 1135 size += handle->buffer->size; 1136 } 1137 mutex_unlock(&client->lock); 1138 return size; 1139} 1140 1141static int ion_debug_heap_show(struct seq_file *s, void *unused) 1142{ 1143 struct ion_heap *heap = s->private; 1144 struct ion_device *dev = heap->dev; 1145 struct rb_node *n; 1146 1147 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); 1148 1149 for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 1150 struct ion_client *client = rb_entry(n, struct ion_client, 1151 node); 1152 size_t size = ion_debug_heap_total(client, heap->type); 1153 if (!size) 1154 continue; 1155 if (client->task) { 1156 char task_comm[TASK_COMM_LEN]; 1157 1158 get_task_comm(task_comm, client->task); 1159 seq_printf(s, "%16.s %16u %16u\n", task_comm, 1160 client->pid, size); 1161 } else { 1162 seq_printf(s, "%16.s %16u %16u\n", client->name, 1163 client->pid, size); 1164 } 1165 } 1166 return 0; 1167} 1168 1169static int ion_debug_heap_open(struct inode *inode, struct file *file) 1170{ 1171 return single_open(file, ion_debug_heap_show, inode->i_private); 1172} 1173 1174static const struct file_operations debug_heap_fops = { 1175 .open = ion_debug_heap_open, 1176 .read = seq_read, 1177 .llseek = seq_lseek, 1178 .release = single_release, 1179}; 1180 1181void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 1182{ 1183 struct rb_node **p = &dev->heaps.rb_node; 1184 struct rb_node *parent = NULL; 1185 struct ion_heap *entry; 1186 1187 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || 1188 !heap->ops->unmap_dma) 1189 pr_err("%s: can not add heap with invalid ops struct.\n", 1190 __func__); 1191 1192 heap->dev = dev; 1193 mutex_lock(&dev->lock); 1194 while (*p) { 1195 parent = *p; 1196 entry = rb_entry(parent, struct ion_heap, node); 1197 1198 if (heap->id < entry->id) { 1199 p = &(*p)->rb_left; 1200 } else if (heap->id > entry->id ) { 1201 p = &(*p)->rb_right; 1202 } else { 1203 pr_err("%s: can not insert multiple heaps with " 1204 "id %d\n", __func__, heap->id); 1205 goto end; 1206 } 1207 } 1208 1209 rb_link_node(&heap->node, parent, p); 1210 rb_insert_color(&heap->node, &dev->heaps); 1211 debugfs_create_file(heap->name, 0664, dev->debug_root, heap, 1212 &debug_heap_fops); 1213end: 1214 mutex_unlock(&dev->lock); 1215} 1216 1217struct ion_device *ion_device_create(long (*custom_ioctl) 1218 (struct ion_client *client, 1219 unsigned int cmd, 1220 unsigned long arg)) 1221{ 1222 struct ion_device *idev; 1223 int ret; 1224 1225 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1226 if (!idev) 1227 return ERR_PTR(-ENOMEM); 1228 1229 idev->dev.minor = MISC_DYNAMIC_MINOR; 1230 idev->dev.name = "ion"; 1231 idev->dev.fops = &ion_fops; 1232 idev->dev.parent = NULL; 1233 ret = misc_register(&idev->dev); 1234 if (ret) { 1235 pr_err("ion: failed to register misc device.\n"); 1236 return ERR_PTR(ret); 1237 } 1238 1239 idev->debug_root = debugfs_create_dir("ion", NULL); 1240 if (IS_ERR_OR_NULL(idev->debug_root)) 1241 pr_err("ion: failed to create debug files.\n"); 1242 1243 idev->custom_ioctl = custom_ioctl; 1244 idev->buffers = RB_ROOT; 1245 mutex_init(&idev->lock); 1246 idev->heaps = RB_ROOT; 1247 idev->clients = RB_ROOT; 1248 return idev; 1249} 1250 1251void ion_device_destroy(struct ion_device *dev) 1252{ 1253 misc_deregister(&dev->dev); 1254 /* XXX need to free the heaps and clients ? */ 1255 kfree(dev); 1256} 1257 1258void __init ion_reserve(struct ion_platform_data *data) 1259{ 1260 int i, ret; 1261 1262 for (i = 0; i < data->nr; i++) { 1263 if (data->heaps[i].size == 0) 1264 continue; 1265 ret = memblock_reserve(data->heaps[i].base, 1266 data->heaps[i].size); 1267 if (ret) 1268 pr_err("memblock reserve of %x@%lx failed\n", 1269 data->heaps[i].size, 1270 data->heaps[i].base); 1271 } 1272} 1273