sync.c revision eeb2f571639feedcfce3f1718b0c3fd85d796812
1/* 2 * drivers/base/sync.c 3 * 4 * Copyright (C) 2012 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/debugfs.h> 18#include <linux/export.h> 19#include <linux/file.h> 20#include <linux/fs.h> 21#include <linux/kernel.h> 22#include <linux/poll.h> 23#include <linux/sched.h> 24#include <linux/seq_file.h> 25#include <linux/slab.h> 26#include <linux/uaccess.h> 27#include <linux/anon_inodes.h> 28 29#include "sync.h" 30 31static void sync_fence_signal_pt(struct sync_pt *pt); 32static int _sync_pt_has_signaled(struct sync_pt *pt); 33static void sync_fence_free(struct kref *kref); 34static void sync_dump(void); 35 36static LIST_HEAD(sync_timeline_list_head); 37static DEFINE_SPINLOCK(sync_timeline_list_lock); 38 39static LIST_HEAD(sync_fence_list_head); 40static DEFINE_SPINLOCK(sync_fence_list_lock); 41 42struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, 43 int size, const char *name) 44{ 45 struct sync_timeline *obj; 46 unsigned long flags; 47 48 if (size < sizeof(struct sync_timeline)) 49 return NULL; 50 51 obj = kzalloc(size, GFP_KERNEL); 52 if (obj == NULL) 53 return NULL; 54 55 kref_init(&obj->kref); 56 obj->ops = ops; 57 strlcpy(obj->name, name, sizeof(obj->name)); 58 59 INIT_LIST_HEAD(&obj->child_list_head); 60 spin_lock_init(&obj->child_list_lock); 61 62 INIT_LIST_HEAD(&obj->active_list_head); 63 spin_lock_init(&obj->active_list_lock); 64 65 spin_lock_irqsave(&sync_timeline_list_lock, flags); 66 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); 67 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 68 69 return obj; 70} 71EXPORT_SYMBOL(sync_timeline_create); 72 73static void sync_timeline_free(struct kref *kref) 74{ 75 struct sync_timeline *obj = 76 container_of(kref, struct sync_timeline, kref); 77 unsigned long flags; 78 79 if (obj->ops->release_obj) 80 obj->ops->release_obj(obj); 81 82 spin_lock_irqsave(&sync_timeline_list_lock, flags); 83 list_del(&obj->sync_timeline_list); 84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 85 86 kfree(obj); 87} 88 89void sync_timeline_destroy(struct sync_timeline *obj) 90{ 91 obj->destroyed = true; 92 93 /* 94 * If this is not the last reference, signal any children 95 * that their parent is going away. 96 */ 97 98 if (!kref_put(&obj->kref, sync_timeline_free)) 99 sync_timeline_signal(obj); 100} 101EXPORT_SYMBOL(sync_timeline_destroy); 102 103static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) 104{ 105 unsigned long flags; 106 107 pt->parent = obj; 108 109 spin_lock_irqsave(&obj->child_list_lock, flags); 110 list_add_tail(&pt->child_list, &obj->child_list_head); 111 spin_unlock_irqrestore(&obj->child_list_lock, flags); 112} 113 114static void sync_timeline_remove_pt(struct sync_pt *pt) 115{ 116 struct sync_timeline *obj = pt->parent; 117 unsigned long flags; 118 119 spin_lock_irqsave(&obj->active_list_lock, flags); 120 if (!list_empty(&pt->active_list)) 121 list_del_init(&pt->active_list); 122 spin_unlock_irqrestore(&obj->active_list_lock, flags); 123 124 spin_lock_irqsave(&obj->child_list_lock, flags); 125 if (!list_empty(&pt->child_list)) { 126 list_del_init(&pt->child_list); 127 } 128 spin_unlock_irqrestore(&obj->child_list_lock, flags); 129} 130 131void sync_timeline_signal(struct sync_timeline *obj) 132{ 133 unsigned long flags; 134 LIST_HEAD(signaled_pts); 135 struct list_head *pos, *n; 136 137 spin_lock_irqsave(&obj->active_list_lock, flags); 138 139 list_for_each_safe(pos, n, &obj->active_list_head) { 140 struct sync_pt *pt = 141 container_of(pos, struct sync_pt, active_list); 142 143 if (_sync_pt_has_signaled(pt)) { 144 list_del_init(pos); 145 list_add(&pt->signaled_list, &signaled_pts); 146 kref_get(&pt->fence->kref); 147 } 148 } 149 150 spin_unlock_irqrestore(&obj->active_list_lock, flags); 151 152 list_for_each_safe(pos, n, &signaled_pts) { 153 struct sync_pt *pt = 154 container_of(pos, struct sync_pt, signaled_list); 155 156 list_del_init(pos); 157 sync_fence_signal_pt(pt); 158 kref_put(&pt->fence->kref, sync_fence_free); 159 } 160} 161EXPORT_SYMBOL(sync_timeline_signal); 162 163struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) 164{ 165 struct sync_pt *pt; 166 167 if (size < sizeof(struct sync_pt)) 168 return NULL; 169 170 pt = kzalloc(size, GFP_KERNEL); 171 if (pt == NULL) 172 return NULL; 173 174 INIT_LIST_HEAD(&pt->active_list); 175 kref_get(&parent->kref); 176 sync_timeline_add_pt(parent, pt); 177 178 return pt; 179} 180EXPORT_SYMBOL(sync_pt_create); 181 182void sync_pt_free(struct sync_pt *pt) 183{ 184 if (pt->parent->ops->free_pt) 185 pt->parent->ops->free_pt(pt); 186 187 sync_timeline_remove_pt(pt); 188 189 kref_put(&pt->parent->kref, sync_timeline_free); 190 191 kfree(pt); 192} 193EXPORT_SYMBOL(sync_pt_free); 194 195/* call with pt->parent->active_list_lock held */ 196static int _sync_pt_has_signaled(struct sync_pt *pt) 197{ 198 int old_status = pt->status; 199 200 if (!pt->status) 201 pt->status = pt->parent->ops->has_signaled(pt); 202 203 if (!pt->status && pt->parent->destroyed) 204 pt->status = -ENOENT; 205 206 if (pt->status != old_status) 207 pt->timestamp = ktime_get(); 208 209 return pt->status; 210} 211 212static struct sync_pt *sync_pt_dup(struct sync_pt *pt) 213{ 214 return pt->parent->ops->dup(pt); 215} 216 217/* Adds a sync pt to the active queue. Called when added to a fence */ 218static void sync_pt_activate(struct sync_pt *pt) 219{ 220 struct sync_timeline *obj = pt->parent; 221 unsigned long flags; 222 int err; 223 224 spin_lock_irqsave(&obj->active_list_lock, flags); 225 226 err = _sync_pt_has_signaled(pt); 227 if (err != 0) 228 goto out; 229 230 list_add_tail(&pt->active_list, &obj->active_list_head); 231 232out: 233 spin_unlock_irqrestore(&obj->active_list_lock, flags); 234} 235 236static int sync_fence_release(struct inode *inode, struct file *file); 237static unsigned int sync_fence_poll(struct file *file, poll_table *wait); 238static long sync_fence_ioctl(struct file *file, unsigned int cmd, 239 unsigned long arg); 240 241 242static const struct file_operations sync_fence_fops = { 243 .release = sync_fence_release, 244 .poll = sync_fence_poll, 245 .unlocked_ioctl = sync_fence_ioctl, 246}; 247 248static struct sync_fence *sync_fence_alloc(const char *name) 249{ 250 struct sync_fence *fence; 251 unsigned long flags; 252 253 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); 254 if (fence == NULL) 255 return NULL; 256 257 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, 258 fence, 0); 259 if (fence->file == NULL) 260 goto err; 261 262 kref_init(&fence->kref); 263 strlcpy(fence->name, name, sizeof(fence->name)); 264 265 INIT_LIST_HEAD(&fence->pt_list_head); 266 INIT_LIST_HEAD(&fence->waiter_list_head); 267 spin_lock_init(&fence->waiter_list_lock); 268 269 init_waitqueue_head(&fence->wq); 270 271 spin_lock_irqsave(&sync_fence_list_lock, flags); 272 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); 273 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 274 275 return fence; 276 277err: 278 kfree(fence); 279 return NULL; 280} 281 282/* TODO: implement a create which takes more that one sync_pt */ 283struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) 284{ 285 struct sync_fence *fence; 286 287 if (pt->fence) 288 return NULL; 289 290 fence = sync_fence_alloc(name); 291 if (fence == NULL) 292 return NULL; 293 294 pt->fence = fence; 295 list_add(&pt->pt_list, &fence->pt_list_head); 296 sync_pt_activate(pt); 297 298 /* 299 * signal the fence in case pt was activated before 300 * sync_pt_activate(pt) was called 301 */ 302 sync_fence_signal_pt(pt); 303 304 return fence; 305} 306EXPORT_SYMBOL(sync_fence_create); 307 308static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) 309{ 310 struct list_head *pos; 311 312 list_for_each(pos, &src->pt_list_head) { 313 struct sync_pt *orig_pt = 314 container_of(pos, struct sync_pt, pt_list); 315 struct sync_pt *new_pt = sync_pt_dup(orig_pt); 316 317 if (new_pt == NULL) 318 return -ENOMEM; 319 320 new_pt->fence = dst; 321 list_add(&new_pt->pt_list, &dst->pt_list_head); 322 sync_pt_activate(new_pt); 323 } 324 325 return 0; 326} 327 328static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) 329{ 330 struct list_head *src_pos, *dst_pos, *n; 331 332 list_for_each(src_pos, &src->pt_list_head) { 333 struct sync_pt *src_pt = 334 container_of(src_pos, struct sync_pt, pt_list); 335 bool collapsed = false; 336 337 list_for_each_safe(dst_pos, n, &dst->pt_list_head) { 338 struct sync_pt *dst_pt = 339 container_of(dst_pos, struct sync_pt, pt_list); 340 /* collapse two sync_pts on the same timeline 341 * to a single sync_pt that will signal at 342 * the later of the two 343 */ 344 if (dst_pt->parent == src_pt->parent) { 345 if (dst_pt->parent->ops->compare(dst_pt, src_pt) 346 == -1) { 347 struct sync_pt *new_pt = 348 sync_pt_dup(src_pt); 349 if (new_pt == NULL) 350 return -ENOMEM; 351 352 new_pt->fence = dst; 353 list_replace(&dst_pt->pt_list, 354 &new_pt->pt_list); 355 sync_pt_activate(new_pt); 356 sync_pt_free(dst_pt); 357 } 358 collapsed = true; 359 break; 360 } 361 } 362 363 if (!collapsed) { 364 struct sync_pt *new_pt = sync_pt_dup(src_pt); 365 366 if (new_pt == NULL) 367 return -ENOMEM; 368 369 new_pt->fence = dst; 370 list_add(&new_pt->pt_list, &dst->pt_list_head); 371 sync_pt_activate(new_pt); 372 } 373 } 374 375 return 0; 376} 377 378static void sync_fence_detach_pts(struct sync_fence *fence) 379{ 380 struct list_head *pos, *n; 381 382 list_for_each_safe(pos, n, &fence->pt_list_head) { 383 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 384 sync_timeline_remove_pt(pt); 385 } 386} 387 388static void sync_fence_free_pts(struct sync_fence *fence) 389{ 390 struct list_head *pos, *n; 391 392 list_for_each_safe(pos, n, &fence->pt_list_head) { 393 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 394 sync_pt_free(pt); 395 } 396} 397 398struct sync_fence *sync_fence_fdget(int fd) 399{ 400 struct file *file = fget(fd); 401 402 if (file == NULL) 403 return NULL; 404 405 if (file->f_op != &sync_fence_fops) 406 goto err; 407 408 return file->private_data; 409 410err: 411 fput(file); 412 return NULL; 413} 414EXPORT_SYMBOL(sync_fence_fdget); 415 416void sync_fence_put(struct sync_fence *fence) 417{ 418 fput(fence->file); 419} 420EXPORT_SYMBOL(sync_fence_put); 421 422void sync_fence_install(struct sync_fence *fence, int fd) 423{ 424 fd_install(fd, fence->file); 425} 426EXPORT_SYMBOL(sync_fence_install); 427 428static int sync_fence_get_status(struct sync_fence *fence) 429{ 430 struct list_head *pos; 431 int status = 1; 432 433 list_for_each(pos, &fence->pt_list_head) { 434 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 435 int pt_status = pt->status; 436 437 if (pt_status < 0) { 438 status = pt_status; 439 break; 440 } else if (status == 1) { 441 status = pt_status; 442 } 443 } 444 445 return status; 446} 447 448struct sync_fence *sync_fence_merge(const char *name, 449 struct sync_fence *a, struct sync_fence *b) 450{ 451 struct sync_fence *fence; 452 int err; 453 454 fence = sync_fence_alloc(name); 455 if (fence == NULL) 456 return NULL; 457 458 err = sync_fence_copy_pts(fence, a); 459 if (err < 0) 460 goto err; 461 462 err = sync_fence_merge_pts(fence, b); 463 if (err < 0) 464 goto err; 465 466 /* 467 * signal the fence in case one of it's pts were activated before 468 * they were activated 469 */ 470 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, 471 struct sync_pt, 472 pt_list)); 473 474 return fence; 475err: 476 sync_fence_free_pts(fence); 477 kfree(fence); 478 return NULL; 479} 480EXPORT_SYMBOL(sync_fence_merge); 481 482static void sync_fence_signal_pt(struct sync_pt *pt) 483{ 484 LIST_HEAD(signaled_waiters); 485 struct sync_fence *fence = pt->fence; 486 struct list_head *pos; 487 struct list_head *n; 488 unsigned long flags; 489 int status; 490 491 status = sync_fence_get_status(fence); 492 493 spin_lock_irqsave(&fence->waiter_list_lock, flags); 494 /* 495 * this should protect against two threads racing on the signaled 496 * false -> true transition 497 */ 498 if (status && !fence->status) { 499 list_for_each_safe(pos, n, &fence->waiter_list_head) 500 list_move(pos, &signaled_waiters); 501 502 fence->status = status; 503 } else { 504 status = 0; 505 } 506 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 507 508 if (status) { 509 list_for_each_safe(pos, n, &signaled_waiters) { 510 struct sync_fence_waiter *waiter = 511 container_of(pos, struct sync_fence_waiter, 512 waiter_list); 513 514 list_del(pos); 515 waiter->callback(fence, waiter); 516 } 517 wake_up(&fence->wq); 518 } 519} 520 521int sync_fence_wait_async(struct sync_fence *fence, 522 struct sync_fence_waiter *waiter) 523{ 524 unsigned long flags; 525 int err = 0; 526 527 spin_lock_irqsave(&fence->waiter_list_lock, flags); 528 529 if (fence->status) { 530 err = fence->status; 531 goto out; 532 } 533 534 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); 535out: 536 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 537 538 return err; 539} 540EXPORT_SYMBOL(sync_fence_wait_async); 541 542int sync_fence_cancel_async(struct sync_fence *fence, 543 struct sync_fence_waiter *waiter) 544{ 545 struct list_head *pos; 546 struct list_head *n; 547 unsigned long flags; 548 int ret = -ENOENT; 549 550 spin_lock_irqsave(&fence->waiter_list_lock, flags); 551 /* 552 * Make sure waiter is still in waiter_list because it is possible for 553 * the waiter to be removed from the list while the callback is still 554 * pending. 555 */ 556 list_for_each_safe(pos, n, &fence->waiter_list_head) { 557 struct sync_fence_waiter *list_waiter = 558 container_of(pos, struct sync_fence_waiter, 559 waiter_list); 560 if (list_waiter == waiter) { 561 list_del(pos); 562 ret = 0; 563 break; 564 } 565 } 566 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 567 return ret; 568} 569EXPORT_SYMBOL(sync_fence_cancel_async); 570 571static bool sync_fence_check(struct sync_fence *fence) 572{ 573 /* 574 * Make sure that reads to fence->status are ordered with the 575 * wait queue event triggering 576 */ 577 smp_rmb(); 578 return fence->status != 0; 579} 580 581int sync_fence_wait(struct sync_fence *fence, long timeout) 582{ 583 int err = 0; 584 585 if (timeout > 0) { 586 timeout = msecs_to_jiffies(timeout); 587 err = wait_event_interruptible_timeout(fence->wq, 588 sync_fence_check(fence), 589 timeout); 590 } else if (timeout < 0) { 591 err = wait_event_interruptible(fence->wq, fence->status != 0); 592 } 593 594 if (err < 0) 595 return err; 596 597 if (fence->status < 0) { 598 pr_info("fence error %d on [%p]\n", fence->status, fence); 599 sync_dump(); 600 return fence->status; 601 } 602 603 if (fence->status == 0) { 604 pr_info("fence timeout on [%p] after %dms\n", fence, 605 jiffies_to_msecs(timeout)); 606 sync_dump(); 607 return -ETIME; 608 } 609 610 return 0; 611} 612EXPORT_SYMBOL(sync_fence_wait); 613 614static void sync_fence_free(struct kref *kref) 615{ 616 struct sync_fence *fence = container_of(kref, struct sync_fence, kref); 617 618 sync_fence_free_pts(fence); 619 620 kfree(fence); 621} 622 623static int sync_fence_release(struct inode *inode, struct file *file) 624{ 625 struct sync_fence *fence = file->private_data; 626 unsigned long flags; 627 628 /* 629 * We need to remove all ways to access this fence before droping 630 * our ref. 631 * 632 * start with its membership in the global fence list 633 */ 634 spin_lock_irqsave(&sync_fence_list_lock, flags); 635 list_del(&fence->sync_fence_list); 636 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 637 638 /* 639 * remove its pts from their parents so that sync_timeline_signal() 640 * can't reference the fence. 641 */ 642 sync_fence_detach_pts(fence); 643 644 kref_put(&fence->kref, sync_fence_free); 645 646 return 0; 647} 648 649static unsigned int sync_fence_poll(struct file *file, poll_table *wait) 650{ 651 struct sync_fence *fence = file->private_data; 652 653 poll_wait(file, &fence->wq, wait); 654 655 /* 656 * Make sure that reads to fence->status are ordered with the 657 * wait queue event triggering 658 */ 659 smp_rmb(); 660 661 if (fence->status == 1) 662 return POLLIN; 663 else if (fence->status < 0) 664 return POLLERR; 665 else 666 return 0; 667} 668 669static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) 670{ 671 __s32 value; 672 673 if (copy_from_user(&value, (void __user *)arg, sizeof(value))) 674 return -EFAULT; 675 676 return sync_fence_wait(fence, value); 677} 678 679static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) 680{ 681 int fd = get_unused_fd(); 682 int err; 683 struct sync_fence *fence2, *fence3; 684 struct sync_merge_data data; 685 686 if (fd < 0) 687 return fd; 688 689 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { 690 err = -EFAULT; 691 goto err_put_fd; 692 } 693 694 fence2 = sync_fence_fdget(data.fd2); 695 if (fence2 == NULL) { 696 err = -ENOENT; 697 goto err_put_fd; 698 } 699 700 data.name[sizeof(data.name) - 1] = '\0'; 701 fence3 = sync_fence_merge(data.name, fence, fence2); 702 if (fence3 == NULL) { 703 err = -ENOMEM; 704 goto err_put_fence2; 705 } 706 707 data.fence = fd; 708 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 709 err = -EFAULT; 710 goto err_put_fence3; 711 } 712 713 sync_fence_install(fence3, fd); 714 sync_fence_put(fence2); 715 return 0; 716 717err_put_fence3: 718 sync_fence_put(fence3); 719 720err_put_fence2: 721 sync_fence_put(fence2); 722 723err_put_fd: 724 put_unused_fd(fd); 725 return err; 726} 727 728static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) 729{ 730 struct sync_pt_info *info = data; 731 int ret; 732 733 if (size < sizeof(struct sync_pt_info)) 734 return -ENOMEM; 735 736 info->len = sizeof(struct sync_pt_info); 737 738 if (pt->parent->ops->fill_driver_data) { 739 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, 740 size - sizeof(*info)); 741 if (ret < 0) 742 return ret; 743 744 info->len += ret; 745 } 746 747 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); 748 strlcpy(info->driver_name, pt->parent->ops->driver_name, 749 sizeof(info->driver_name)); 750 info->status = pt->status; 751 info->timestamp_ns = ktime_to_ns(pt->timestamp); 752 753 return info->len; 754} 755 756static long sync_fence_ioctl_fence_info(struct sync_fence *fence, 757 unsigned long arg) 758{ 759 struct sync_fence_info_data *data; 760 struct list_head *pos; 761 __u32 size; 762 __u32 len = 0; 763 int ret; 764 765 if (copy_from_user(&size, (void __user *)arg, sizeof(size))) 766 return -EFAULT; 767 768 if (size < sizeof(struct sync_fence_info_data)) 769 return -EINVAL; 770 771 if (size > 4096) 772 size = 4096; 773 774 data = kzalloc(size, GFP_KERNEL); 775 if (data == NULL) 776 return -ENOMEM; 777 778 strlcpy(data->name, fence->name, sizeof(data->name)); 779 data->status = fence->status; 780 len = sizeof(struct sync_fence_info_data); 781 782 list_for_each(pos, &fence->pt_list_head) { 783 struct sync_pt *pt = 784 container_of(pos, struct sync_pt, pt_list); 785 786 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); 787 788 if (ret < 0) 789 goto out; 790 791 len += ret; 792 } 793 794 data->len = len; 795 796 if (copy_to_user((void __user *)arg, data, len)) 797 ret = -EFAULT; 798 else 799 ret = 0; 800 801out: 802 kfree(data); 803 804 return ret; 805} 806 807static long sync_fence_ioctl(struct file *file, unsigned int cmd, 808 unsigned long arg) 809{ 810 struct sync_fence *fence = file->private_data; 811 switch (cmd) { 812 case SYNC_IOC_WAIT: 813 return sync_fence_ioctl_wait(fence, arg); 814 815 case SYNC_IOC_MERGE: 816 return sync_fence_ioctl_merge(fence, arg); 817 818 case SYNC_IOC_FENCE_INFO: 819 return sync_fence_ioctl_fence_info(fence, arg); 820 821 default: 822 return -ENOTTY; 823 } 824} 825 826#ifdef CONFIG_DEBUG_FS 827static const char *sync_status_str(int status) 828{ 829 if (status > 0) 830 return "signaled"; 831 else if (status == 0) 832 return "active"; 833 else 834 return "error"; 835} 836 837static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) 838{ 839 int status = pt->status; 840 seq_printf(s, " %s%spt %s", 841 fence ? pt->parent->name : "", 842 fence ? "_" : "", 843 sync_status_str(status)); 844 if (pt->status) { 845 struct timeval tv = ktime_to_timeval(pt->timestamp); 846 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); 847 } 848 849 if (pt->parent->ops->print_pt) { 850 seq_printf(s, ": "); 851 pt->parent->ops->print_pt(s, pt); 852 } 853 854 seq_printf(s, "\n"); 855} 856 857static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) 858{ 859 struct list_head *pos; 860 unsigned long flags; 861 862 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); 863 864 if (obj->ops->print_obj) { 865 seq_printf(s, ": "); 866 obj->ops->print_obj(s, obj); 867 } 868 869 seq_printf(s, "\n"); 870 871 spin_lock_irqsave(&obj->child_list_lock, flags); 872 list_for_each(pos, &obj->child_list_head) { 873 struct sync_pt *pt = 874 container_of(pos, struct sync_pt, child_list); 875 sync_print_pt(s, pt, false); 876 } 877 spin_unlock_irqrestore(&obj->child_list_lock, flags); 878} 879 880static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) 881{ 882 struct list_head *pos; 883 unsigned long flags; 884 885 seq_printf(s, "[%p] %s: %s\n", fence, fence->name, 886 sync_status_str(fence->status)); 887 888 list_for_each(pos, &fence->pt_list_head) { 889 struct sync_pt *pt = 890 container_of(pos, struct sync_pt, pt_list); 891 sync_print_pt(s, pt, true); 892 } 893 894 spin_lock_irqsave(&fence->waiter_list_lock, flags); 895 list_for_each(pos, &fence->waiter_list_head) { 896 struct sync_fence_waiter *waiter = 897 container_of(pos, struct sync_fence_waiter, 898 waiter_list); 899 900 seq_printf(s, "waiter %pF\n", waiter->callback); 901 } 902 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 903} 904 905static int sync_debugfs_show(struct seq_file *s, void *unused) 906{ 907 unsigned long flags; 908 struct list_head *pos; 909 910 seq_printf(s, "objs:\n--------------\n"); 911 912 spin_lock_irqsave(&sync_timeline_list_lock, flags); 913 list_for_each(pos, &sync_timeline_list_head) { 914 struct sync_timeline *obj = 915 container_of(pos, struct sync_timeline, 916 sync_timeline_list); 917 918 sync_print_obj(s, obj); 919 seq_printf(s, "\n"); 920 } 921 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 922 923 seq_printf(s, "fences:\n--------------\n"); 924 925 spin_lock_irqsave(&sync_fence_list_lock, flags); 926 list_for_each(pos, &sync_fence_list_head) { 927 struct sync_fence *fence = 928 container_of(pos, struct sync_fence, sync_fence_list); 929 930 sync_print_fence(s, fence); 931 seq_printf(s, "\n"); 932 } 933 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 934 return 0; 935} 936 937static int sync_debugfs_open(struct inode *inode, struct file *file) 938{ 939 return single_open(file, sync_debugfs_show, inode->i_private); 940} 941 942static const struct file_operations sync_debugfs_fops = { 943 .open = sync_debugfs_open, 944 .read = seq_read, 945 .llseek = seq_lseek, 946 .release = single_release, 947}; 948 949static __init int sync_debugfs_init(void) 950{ 951 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); 952 return 0; 953} 954late_initcall(sync_debugfs_init); 955 956#define DUMP_CHUNK 256 957static char sync_dump_buf[64 * 1024]; 958void sync_dump(void) 959{ 960 struct seq_file s = { 961 .buf = sync_dump_buf, 962 .size = sizeof(sync_dump_buf) - 1, 963 }; 964 int i; 965 966 sync_debugfs_show(&s, NULL); 967 968 for (i = 0; i < s.count; i += DUMP_CHUNK) { 969 if ((s.count - i) > DUMP_CHUNK) { 970 char c = s.buf[i + DUMP_CHUNK]; 971 s.buf[i + DUMP_CHUNK] = 0; 972 pr_cont("%s", s.buf + i); 973 s.buf[i + DUMP_CHUNK] = c; 974 } else { 975 s.buf[s.count] = 0; 976 pr_cont("%s", s.buf + i); 977 } 978 } 979} 980#else 981static void sync_dump(void) 982{ 983} 984#endif 985