sync.c revision b699a644f82110e8e5a0f9b45ee1d3a3cd3e4586
1/* 2 * drivers/base/sync.c 3 * 4 * Copyright (C) 2012 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/debugfs.h> 18#include <linux/export.h> 19#include <linux/file.h> 20#include <linux/fs.h> 21#include <linux/kernel.h> 22#include <linux/poll.h> 23#include <linux/sched.h> 24#include <linux/seq_file.h> 25#include <linux/slab.h> 26#include <linux/uaccess.h> 27#include <linux/anon_inodes.h> 28 29#include "sync.h" 30 31#define CREATE_TRACE_POINTS 32#include "trace/sync.h" 33 34static void sync_fence_signal_pt(struct sync_pt *pt); 35static int _sync_pt_has_signaled(struct sync_pt *pt); 36static void sync_fence_free(struct kref *kref); 37static void sync_dump(void); 38 39static LIST_HEAD(sync_timeline_list_head); 40static DEFINE_SPINLOCK(sync_timeline_list_lock); 41 42static LIST_HEAD(sync_fence_list_head); 43static DEFINE_SPINLOCK(sync_fence_list_lock); 44 45struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, 46 int size, const char *name) 47{ 48 struct sync_timeline *obj; 49 unsigned long flags; 50 51 if (size < sizeof(struct sync_timeline)) 52 return NULL; 53 54 obj = kzalloc(size, GFP_KERNEL); 55 if (obj == NULL) 56 return NULL; 57 58 kref_init(&obj->kref); 59 obj->ops = ops; 60 strlcpy(obj->name, name, sizeof(obj->name)); 61 62 INIT_LIST_HEAD(&obj->child_list_head); 63 spin_lock_init(&obj->child_list_lock); 64 65 INIT_LIST_HEAD(&obj->active_list_head); 66 spin_lock_init(&obj->active_list_lock); 67 68 spin_lock_irqsave(&sync_timeline_list_lock, flags); 69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); 70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 71 72 return obj; 73} 74EXPORT_SYMBOL(sync_timeline_create); 75 76static void sync_timeline_free(struct kref *kref) 77{ 78 struct sync_timeline *obj = 79 container_of(kref, struct sync_timeline, kref); 80 unsigned long flags; 81 82 if (obj->ops->release_obj) 83 obj->ops->release_obj(obj); 84 85 spin_lock_irqsave(&sync_timeline_list_lock, flags); 86 list_del(&obj->sync_timeline_list); 87 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 88 89 kfree(obj); 90} 91 92void sync_timeline_destroy(struct sync_timeline *obj) 93{ 94 obj->destroyed = true; 95 96 /* 97 * If this is not the last reference, signal any children 98 * that their parent is going away. 99 */ 100 101 if (!kref_put(&obj->kref, sync_timeline_free)) 102 sync_timeline_signal(obj); 103} 104EXPORT_SYMBOL(sync_timeline_destroy); 105 106static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) 107{ 108 unsigned long flags; 109 110 pt->parent = obj; 111 112 spin_lock_irqsave(&obj->child_list_lock, flags); 113 list_add_tail(&pt->child_list, &obj->child_list_head); 114 spin_unlock_irqrestore(&obj->child_list_lock, flags); 115} 116 117static void sync_timeline_remove_pt(struct sync_pt *pt) 118{ 119 struct sync_timeline *obj = pt->parent; 120 unsigned long flags; 121 122 spin_lock_irqsave(&obj->active_list_lock, flags); 123 if (!list_empty(&pt->active_list)) 124 list_del_init(&pt->active_list); 125 spin_unlock_irqrestore(&obj->active_list_lock, flags); 126 127 spin_lock_irqsave(&obj->child_list_lock, flags); 128 if (!list_empty(&pt->child_list)) { 129 list_del_init(&pt->child_list); 130 } 131 spin_unlock_irqrestore(&obj->child_list_lock, flags); 132} 133 134void sync_timeline_signal(struct sync_timeline *obj) 135{ 136 unsigned long flags; 137 LIST_HEAD(signaled_pts); 138 struct list_head *pos, *n; 139 140 trace_sync_timeline(obj); 141 142 spin_lock_irqsave(&obj->active_list_lock, flags); 143 144 list_for_each_safe(pos, n, &obj->active_list_head) { 145 struct sync_pt *pt = 146 container_of(pos, struct sync_pt, active_list); 147 148 if (_sync_pt_has_signaled(pt)) { 149 list_del_init(pos); 150 list_add(&pt->signaled_list, &signaled_pts); 151 kref_get(&pt->fence->kref); 152 } 153 } 154 155 spin_unlock_irqrestore(&obj->active_list_lock, flags); 156 157 list_for_each_safe(pos, n, &signaled_pts) { 158 struct sync_pt *pt = 159 container_of(pos, struct sync_pt, signaled_list); 160 161 list_del_init(pos); 162 sync_fence_signal_pt(pt); 163 kref_put(&pt->fence->kref, sync_fence_free); 164 } 165} 166EXPORT_SYMBOL(sync_timeline_signal); 167 168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) 169{ 170 struct sync_pt *pt; 171 172 if (size < sizeof(struct sync_pt)) 173 return NULL; 174 175 pt = kzalloc(size, GFP_KERNEL); 176 if (pt == NULL) 177 return NULL; 178 179 INIT_LIST_HEAD(&pt->active_list); 180 kref_get(&parent->kref); 181 sync_timeline_add_pt(parent, pt); 182 183 return pt; 184} 185EXPORT_SYMBOL(sync_pt_create); 186 187void sync_pt_free(struct sync_pt *pt) 188{ 189 if (pt->parent->ops->free_pt) 190 pt->parent->ops->free_pt(pt); 191 192 sync_timeline_remove_pt(pt); 193 194 kref_put(&pt->parent->kref, sync_timeline_free); 195 196 kfree(pt); 197} 198EXPORT_SYMBOL(sync_pt_free); 199 200/* call with pt->parent->active_list_lock held */ 201static int _sync_pt_has_signaled(struct sync_pt *pt) 202{ 203 int old_status = pt->status; 204 205 if (!pt->status) 206 pt->status = pt->parent->ops->has_signaled(pt); 207 208 if (!pt->status && pt->parent->destroyed) 209 pt->status = -ENOENT; 210 211 if (pt->status != old_status) 212 pt->timestamp = ktime_get(); 213 214 return pt->status; 215} 216 217static struct sync_pt *sync_pt_dup(struct sync_pt *pt) 218{ 219 return pt->parent->ops->dup(pt); 220} 221 222/* Adds a sync pt to the active queue. Called when added to a fence */ 223static void sync_pt_activate(struct sync_pt *pt) 224{ 225 struct sync_timeline *obj = pt->parent; 226 unsigned long flags; 227 int err; 228 229 spin_lock_irqsave(&obj->active_list_lock, flags); 230 231 err = _sync_pt_has_signaled(pt); 232 if (err != 0) 233 goto out; 234 235 list_add_tail(&pt->active_list, &obj->active_list_head); 236 237out: 238 spin_unlock_irqrestore(&obj->active_list_lock, flags); 239} 240 241static int sync_fence_release(struct inode *inode, struct file *file); 242static unsigned int sync_fence_poll(struct file *file, poll_table *wait); 243static long sync_fence_ioctl(struct file *file, unsigned int cmd, 244 unsigned long arg); 245 246 247static const struct file_operations sync_fence_fops = { 248 .release = sync_fence_release, 249 .poll = sync_fence_poll, 250 .unlocked_ioctl = sync_fence_ioctl, 251}; 252 253static struct sync_fence *sync_fence_alloc(const char *name) 254{ 255 struct sync_fence *fence; 256 unsigned long flags; 257 258 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); 259 if (fence == NULL) 260 return NULL; 261 262 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, 263 fence, 0); 264 if (fence->file == NULL) 265 goto err; 266 267 kref_init(&fence->kref); 268 strlcpy(fence->name, name, sizeof(fence->name)); 269 270 INIT_LIST_HEAD(&fence->pt_list_head); 271 INIT_LIST_HEAD(&fence->waiter_list_head); 272 spin_lock_init(&fence->waiter_list_lock); 273 274 init_waitqueue_head(&fence->wq); 275 276 spin_lock_irqsave(&sync_fence_list_lock, flags); 277 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); 278 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 279 280 return fence; 281 282err: 283 kfree(fence); 284 return NULL; 285} 286 287/* TODO: implement a create which takes more that one sync_pt */ 288struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) 289{ 290 struct sync_fence *fence; 291 292 if (pt->fence) 293 return NULL; 294 295 fence = sync_fence_alloc(name); 296 if (fence == NULL) 297 return NULL; 298 299 pt->fence = fence; 300 list_add(&pt->pt_list, &fence->pt_list_head); 301 sync_pt_activate(pt); 302 303 /* 304 * signal the fence in case pt was activated before 305 * sync_pt_activate(pt) was called 306 */ 307 sync_fence_signal_pt(pt); 308 309 return fence; 310} 311EXPORT_SYMBOL(sync_fence_create); 312 313static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) 314{ 315 struct list_head *pos; 316 317 list_for_each(pos, &src->pt_list_head) { 318 struct sync_pt *orig_pt = 319 container_of(pos, struct sync_pt, pt_list); 320 struct sync_pt *new_pt = sync_pt_dup(orig_pt); 321 322 if (new_pt == NULL) 323 return -ENOMEM; 324 325 new_pt->fence = dst; 326 list_add(&new_pt->pt_list, &dst->pt_list_head); 327 sync_pt_activate(new_pt); 328 } 329 330 return 0; 331} 332 333static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) 334{ 335 struct list_head *src_pos, *dst_pos, *n; 336 337 list_for_each(src_pos, &src->pt_list_head) { 338 struct sync_pt *src_pt = 339 container_of(src_pos, struct sync_pt, pt_list); 340 bool collapsed = false; 341 342 list_for_each_safe(dst_pos, n, &dst->pt_list_head) { 343 struct sync_pt *dst_pt = 344 container_of(dst_pos, struct sync_pt, pt_list); 345 /* collapse two sync_pts on the same timeline 346 * to a single sync_pt that will signal at 347 * the later of the two 348 */ 349 if (dst_pt->parent == src_pt->parent) { 350 if (dst_pt->parent->ops->compare(dst_pt, src_pt) 351 == -1) { 352 struct sync_pt *new_pt = 353 sync_pt_dup(src_pt); 354 if (new_pt == NULL) 355 return -ENOMEM; 356 357 new_pt->fence = dst; 358 list_replace(&dst_pt->pt_list, 359 &new_pt->pt_list); 360 sync_pt_activate(new_pt); 361 sync_pt_free(dst_pt); 362 } 363 collapsed = true; 364 break; 365 } 366 } 367 368 if (!collapsed) { 369 struct sync_pt *new_pt = sync_pt_dup(src_pt); 370 371 if (new_pt == NULL) 372 return -ENOMEM; 373 374 new_pt->fence = dst; 375 list_add(&new_pt->pt_list, &dst->pt_list_head); 376 sync_pt_activate(new_pt); 377 } 378 } 379 380 return 0; 381} 382 383static void sync_fence_detach_pts(struct sync_fence *fence) 384{ 385 struct list_head *pos, *n; 386 387 list_for_each_safe(pos, n, &fence->pt_list_head) { 388 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 389 sync_timeline_remove_pt(pt); 390 } 391} 392 393static void sync_fence_free_pts(struct sync_fence *fence) 394{ 395 struct list_head *pos, *n; 396 397 list_for_each_safe(pos, n, &fence->pt_list_head) { 398 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 399 sync_pt_free(pt); 400 } 401} 402 403struct sync_fence *sync_fence_fdget(int fd) 404{ 405 struct file *file = fget(fd); 406 407 if (file == NULL) 408 return NULL; 409 410 if (file->f_op != &sync_fence_fops) 411 goto err; 412 413 return file->private_data; 414 415err: 416 fput(file); 417 return NULL; 418} 419EXPORT_SYMBOL(sync_fence_fdget); 420 421void sync_fence_put(struct sync_fence *fence) 422{ 423 fput(fence->file); 424} 425EXPORT_SYMBOL(sync_fence_put); 426 427void sync_fence_install(struct sync_fence *fence, int fd) 428{ 429 fd_install(fd, fence->file); 430} 431EXPORT_SYMBOL(sync_fence_install); 432 433static int sync_fence_get_status(struct sync_fence *fence) 434{ 435 struct list_head *pos; 436 int status = 1; 437 438 list_for_each(pos, &fence->pt_list_head) { 439 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 440 int pt_status = pt->status; 441 442 if (pt_status < 0) { 443 status = pt_status; 444 break; 445 } else if (status == 1) { 446 status = pt_status; 447 } 448 } 449 450 return status; 451} 452 453struct sync_fence *sync_fence_merge(const char *name, 454 struct sync_fence *a, struct sync_fence *b) 455{ 456 struct sync_fence *fence; 457 int err; 458 459 fence = sync_fence_alloc(name); 460 if (fence == NULL) 461 return NULL; 462 463 err = sync_fence_copy_pts(fence, a); 464 if (err < 0) 465 goto err; 466 467 err = sync_fence_merge_pts(fence, b); 468 if (err < 0) 469 goto err; 470 471 /* 472 * signal the fence in case one of it's pts were activated before 473 * they were activated 474 */ 475 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, 476 struct sync_pt, 477 pt_list)); 478 479 return fence; 480err: 481 sync_fence_free_pts(fence); 482 kfree(fence); 483 return NULL; 484} 485EXPORT_SYMBOL(sync_fence_merge); 486 487static void sync_fence_signal_pt(struct sync_pt *pt) 488{ 489 LIST_HEAD(signaled_waiters); 490 struct sync_fence *fence = pt->fence; 491 struct list_head *pos; 492 struct list_head *n; 493 unsigned long flags; 494 int status; 495 496 status = sync_fence_get_status(fence); 497 498 spin_lock_irqsave(&fence->waiter_list_lock, flags); 499 /* 500 * this should protect against two threads racing on the signaled 501 * false -> true transition 502 */ 503 if (status && !fence->status) { 504 list_for_each_safe(pos, n, &fence->waiter_list_head) 505 list_move(pos, &signaled_waiters); 506 507 fence->status = status; 508 } else { 509 status = 0; 510 } 511 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 512 513 if (status) { 514 list_for_each_safe(pos, n, &signaled_waiters) { 515 struct sync_fence_waiter *waiter = 516 container_of(pos, struct sync_fence_waiter, 517 waiter_list); 518 519 list_del(pos); 520 waiter->callback(fence, waiter); 521 } 522 wake_up(&fence->wq); 523 } 524} 525 526int sync_fence_wait_async(struct sync_fence *fence, 527 struct sync_fence_waiter *waiter) 528{ 529 unsigned long flags; 530 int err = 0; 531 532 spin_lock_irqsave(&fence->waiter_list_lock, flags); 533 534 if (fence->status) { 535 err = fence->status; 536 goto out; 537 } 538 539 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); 540out: 541 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 542 543 return err; 544} 545EXPORT_SYMBOL(sync_fence_wait_async); 546 547int sync_fence_cancel_async(struct sync_fence *fence, 548 struct sync_fence_waiter *waiter) 549{ 550 struct list_head *pos; 551 struct list_head *n; 552 unsigned long flags; 553 int ret = -ENOENT; 554 555 spin_lock_irqsave(&fence->waiter_list_lock, flags); 556 /* 557 * Make sure waiter is still in waiter_list because it is possible for 558 * the waiter to be removed from the list while the callback is still 559 * pending. 560 */ 561 list_for_each_safe(pos, n, &fence->waiter_list_head) { 562 struct sync_fence_waiter *list_waiter = 563 container_of(pos, struct sync_fence_waiter, 564 waiter_list); 565 if (list_waiter == waiter) { 566 list_del(pos); 567 ret = 0; 568 break; 569 } 570 } 571 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 572 return ret; 573} 574EXPORT_SYMBOL(sync_fence_cancel_async); 575 576static bool sync_fence_check(struct sync_fence *fence) 577{ 578 /* 579 * Make sure that reads to fence->status are ordered with the 580 * wait queue event triggering 581 */ 582 smp_rmb(); 583 return fence->status != 0; 584} 585 586int sync_fence_wait(struct sync_fence *fence, long timeout) 587{ 588 int err = 0; 589 struct sync_pt *pt; 590 591 trace_sync_wait(fence, 1); 592 list_for_each_entry(pt, &fence->pt_list_head, pt_list) 593 trace_sync_pt(pt); 594 595 if (timeout > 0) { 596 timeout = msecs_to_jiffies(timeout); 597 err = wait_event_interruptible_timeout(fence->wq, 598 sync_fence_check(fence), 599 timeout); 600 } else if (timeout < 0) { 601 err = wait_event_interruptible(fence->wq, 602 sync_fence_check(fence)); 603 } 604 trace_sync_wait(fence, 0); 605 606 if (err < 0) 607 return err; 608 609 if (fence->status < 0) { 610 pr_info("fence error %d on [%p]\n", fence->status, fence); 611 sync_dump(); 612 return fence->status; 613 } 614 615 if (fence->status == 0) { 616 pr_info("fence timeout on [%p] after %dms\n", fence, 617 jiffies_to_msecs(timeout)); 618 sync_dump(); 619 return -ETIME; 620 } 621 622 return 0; 623} 624EXPORT_SYMBOL(sync_fence_wait); 625 626static void sync_fence_free(struct kref *kref) 627{ 628 struct sync_fence *fence = container_of(kref, struct sync_fence, kref); 629 630 sync_fence_free_pts(fence); 631 632 kfree(fence); 633} 634 635static int sync_fence_release(struct inode *inode, struct file *file) 636{ 637 struct sync_fence *fence = file->private_data; 638 unsigned long flags; 639 640 /* 641 * We need to remove all ways to access this fence before droping 642 * our ref. 643 * 644 * start with its membership in the global fence list 645 */ 646 spin_lock_irqsave(&sync_fence_list_lock, flags); 647 list_del(&fence->sync_fence_list); 648 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 649 650 /* 651 * remove its pts from their parents so that sync_timeline_signal() 652 * can't reference the fence. 653 */ 654 sync_fence_detach_pts(fence); 655 656 kref_put(&fence->kref, sync_fence_free); 657 658 return 0; 659} 660 661static unsigned int sync_fence_poll(struct file *file, poll_table *wait) 662{ 663 struct sync_fence *fence = file->private_data; 664 665 poll_wait(file, &fence->wq, wait); 666 667 /* 668 * Make sure that reads to fence->status are ordered with the 669 * wait queue event triggering 670 */ 671 smp_rmb(); 672 673 if (fence->status == 1) 674 return POLLIN; 675 else if (fence->status < 0) 676 return POLLERR; 677 else 678 return 0; 679} 680 681static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) 682{ 683 __s32 value; 684 685 if (copy_from_user(&value, (void __user *)arg, sizeof(value))) 686 return -EFAULT; 687 688 return sync_fence_wait(fence, value); 689} 690 691static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) 692{ 693 int fd = get_unused_fd(); 694 int err; 695 struct sync_fence *fence2, *fence3; 696 struct sync_merge_data data; 697 698 if (fd < 0) 699 return fd; 700 701 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { 702 err = -EFAULT; 703 goto err_put_fd; 704 } 705 706 fence2 = sync_fence_fdget(data.fd2); 707 if (fence2 == NULL) { 708 err = -ENOENT; 709 goto err_put_fd; 710 } 711 712 data.name[sizeof(data.name) - 1] = '\0'; 713 fence3 = sync_fence_merge(data.name, fence, fence2); 714 if (fence3 == NULL) { 715 err = -ENOMEM; 716 goto err_put_fence2; 717 } 718 719 data.fence = fd; 720 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 721 err = -EFAULT; 722 goto err_put_fence3; 723 } 724 725 sync_fence_install(fence3, fd); 726 sync_fence_put(fence2); 727 return 0; 728 729err_put_fence3: 730 sync_fence_put(fence3); 731 732err_put_fence2: 733 sync_fence_put(fence2); 734 735err_put_fd: 736 put_unused_fd(fd); 737 return err; 738} 739 740static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) 741{ 742 struct sync_pt_info *info = data; 743 int ret; 744 745 if (size < sizeof(struct sync_pt_info)) 746 return -ENOMEM; 747 748 info->len = sizeof(struct sync_pt_info); 749 750 if (pt->parent->ops->fill_driver_data) { 751 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, 752 size - sizeof(*info)); 753 if (ret < 0) 754 return ret; 755 756 info->len += ret; 757 } 758 759 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); 760 strlcpy(info->driver_name, pt->parent->ops->driver_name, 761 sizeof(info->driver_name)); 762 info->status = pt->status; 763 info->timestamp_ns = ktime_to_ns(pt->timestamp); 764 765 return info->len; 766} 767 768static long sync_fence_ioctl_fence_info(struct sync_fence *fence, 769 unsigned long arg) 770{ 771 struct sync_fence_info_data *data; 772 struct list_head *pos; 773 __u32 size; 774 __u32 len = 0; 775 int ret; 776 777 if (copy_from_user(&size, (void __user *)arg, sizeof(size))) 778 return -EFAULT; 779 780 if (size < sizeof(struct sync_fence_info_data)) 781 return -EINVAL; 782 783 if (size > 4096) 784 size = 4096; 785 786 data = kzalloc(size, GFP_KERNEL); 787 if (data == NULL) 788 return -ENOMEM; 789 790 strlcpy(data->name, fence->name, sizeof(data->name)); 791 data->status = fence->status; 792 len = sizeof(struct sync_fence_info_data); 793 794 list_for_each(pos, &fence->pt_list_head) { 795 struct sync_pt *pt = 796 container_of(pos, struct sync_pt, pt_list); 797 798 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); 799 800 if (ret < 0) 801 goto out; 802 803 len += ret; 804 } 805 806 data->len = len; 807 808 if (copy_to_user((void __user *)arg, data, len)) 809 ret = -EFAULT; 810 else 811 ret = 0; 812 813out: 814 kfree(data); 815 816 return ret; 817} 818 819static long sync_fence_ioctl(struct file *file, unsigned int cmd, 820 unsigned long arg) 821{ 822 struct sync_fence *fence = file->private_data; 823 switch (cmd) { 824 case SYNC_IOC_WAIT: 825 return sync_fence_ioctl_wait(fence, arg); 826 827 case SYNC_IOC_MERGE: 828 return sync_fence_ioctl_merge(fence, arg); 829 830 case SYNC_IOC_FENCE_INFO: 831 return sync_fence_ioctl_fence_info(fence, arg); 832 833 default: 834 return -ENOTTY; 835 } 836} 837 838#ifdef CONFIG_DEBUG_FS 839static const char *sync_status_str(int status) 840{ 841 if (status > 0) 842 return "signaled"; 843 else if (status == 0) 844 return "active"; 845 else 846 return "error"; 847} 848 849static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) 850{ 851 int status = pt->status; 852 seq_printf(s, " %s%spt %s", 853 fence ? pt->parent->name : "", 854 fence ? "_" : "", 855 sync_status_str(status)); 856 if (pt->status) { 857 struct timeval tv = ktime_to_timeval(pt->timestamp); 858 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); 859 } 860 861 if (pt->parent->ops->timeline_value_str && 862 pt->parent->ops->pt_value_str) { 863 char value[64]; 864 pt->parent->ops->pt_value_str(pt, value, sizeof(value)); 865 seq_printf(s, ": %s", value); 866 if (fence) { 867 pt->parent->ops->timeline_value_str(pt->parent, value, 868 sizeof(value)); 869 seq_printf(s, " / %s", value); 870 } 871 } else if (pt->parent->ops->print_pt) { 872 seq_printf(s, ": "); 873 pt->parent->ops->print_pt(s, pt); 874 } 875 876 seq_printf(s, "\n"); 877} 878 879static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) 880{ 881 struct list_head *pos; 882 unsigned long flags; 883 884 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); 885 886 if (obj->ops->timeline_value_str) { 887 char value[64]; 888 obj->ops->timeline_value_str(obj, value, sizeof(value)); 889 seq_printf(s, ": %s", value); 890 } else if (obj->ops->print_obj) { 891 seq_printf(s, ": "); 892 obj->ops->print_obj(s, obj); 893 } 894 895 seq_printf(s, "\n"); 896 897 spin_lock_irqsave(&obj->child_list_lock, flags); 898 list_for_each(pos, &obj->child_list_head) { 899 struct sync_pt *pt = 900 container_of(pos, struct sync_pt, child_list); 901 sync_print_pt(s, pt, false); 902 } 903 spin_unlock_irqrestore(&obj->child_list_lock, flags); 904} 905 906static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) 907{ 908 struct list_head *pos; 909 unsigned long flags; 910 911 seq_printf(s, "[%p] %s: %s\n", fence, fence->name, 912 sync_status_str(fence->status)); 913 914 list_for_each(pos, &fence->pt_list_head) { 915 struct sync_pt *pt = 916 container_of(pos, struct sync_pt, pt_list); 917 sync_print_pt(s, pt, true); 918 } 919 920 spin_lock_irqsave(&fence->waiter_list_lock, flags); 921 list_for_each(pos, &fence->waiter_list_head) { 922 struct sync_fence_waiter *waiter = 923 container_of(pos, struct sync_fence_waiter, 924 waiter_list); 925 926 seq_printf(s, "waiter %pF\n", waiter->callback); 927 } 928 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 929} 930 931static int sync_debugfs_show(struct seq_file *s, void *unused) 932{ 933 unsigned long flags; 934 struct list_head *pos; 935 936 seq_printf(s, "objs:\n--------------\n"); 937 938 spin_lock_irqsave(&sync_timeline_list_lock, flags); 939 list_for_each(pos, &sync_timeline_list_head) { 940 struct sync_timeline *obj = 941 container_of(pos, struct sync_timeline, 942 sync_timeline_list); 943 944 sync_print_obj(s, obj); 945 seq_printf(s, "\n"); 946 } 947 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 948 949 seq_printf(s, "fences:\n--------------\n"); 950 951 spin_lock_irqsave(&sync_fence_list_lock, flags); 952 list_for_each(pos, &sync_fence_list_head) { 953 struct sync_fence *fence = 954 container_of(pos, struct sync_fence, sync_fence_list); 955 956 sync_print_fence(s, fence); 957 seq_printf(s, "\n"); 958 } 959 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 960 return 0; 961} 962 963static int sync_debugfs_open(struct inode *inode, struct file *file) 964{ 965 return single_open(file, sync_debugfs_show, inode->i_private); 966} 967 968static const struct file_operations sync_debugfs_fops = { 969 .open = sync_debugfs_open, 970 .read = seq_read, 971 .llseek = seq_lseek, 972 .release = single_release, 973}; 974 975static __init int sync_debugfs_init(void) 976{ 977 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); 978 return 0; 979} 980late_initcall(sync_debugfs_init); 981 982#define DUMP_CHUNK 256 983static char sync_dump_buf[64 * 1024]; 984void sync_dump(void) 985{ 986 struct seq_file s = { 987 .buf = sync_dump_buf, 988 .size = sizeof(sync_dump_buf) - 1, 989 }; 990 int i; 991 992 sync_debugfs_show(&s, NULL); 993 994 for (i = 0; i < s.count; i += DUMP_CHUNK) { 995 if ((s.count - i) > DUMP_CHUNK) { 996 char c = s.buf[i + DUMP_CHUNK]; 997 s.buf[i + DUMP_CHUNK] = 0; 998 pr_cont("%s", s.buf + i); 999 s.buf[i + DUMP_CHUNK] = c; 1000 } else { 1001 s.buf[s.count] = 0; 1002 pr_cont("%s", s.buf + i); 1003 } 1004 } 1005} 1006#else 1007static void sync_dump(void) 1008{ 1009} 1010#endif 1011