sync.c revision 01544170e1959dd261ceec6413674a528221669b
1/* 2 * drivers/base/sync.c 3 * 4 * Copyright (C) 2012 Google, Inc. 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/debugfs.h> 18#include <linux/export.h> 19#include <linux/file.h> 20#include <linux/fs.h> 21#include <linux/kernel.h> 22#include <linux/poll.h> 23#include <linux/sched.h> 24#include <linux/seq_file.h> 25#include <linux/slab.h> 26#include <linux/uaccess.h> 27#include <linux/anon_inodes.h> 28 29#include "sync.h" 30 31static void sync_fence_signal_pt(struct sync_pt *pt); 32static int _sync_pt_has_signaled(struct sync_pt *pt); 33static void sync_fence_free(struct kref *kref); 34 35static LIST_HEAD(sync_timeline_list_head); 36static DEFINE_SPINLOCK(sync_timeline_list_lock); 37 38static LIST_HEAD(sync_fence_list_head); 39static DEFINE_SPINLOCK(sync_fence_list_lock); 40 41struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, 42 int size, const char *name) 43{ 44 struct sync_timeline *obj; 45 unsigned long flags; 46 47 if (size < sizeof(struct sync_timeline)) 48 return NULL; 49 50 obj = kzalloc(size, GFP_KERNEL); 51 if (obj == NULL) 52 return NULL; 53 54 obj->ops = ops; 55 strlcpy(obj->name, name, sizeof(obj->name)); 56 57 INIT_LIST_HEAD(&obj->child_list_head); 58 spin_lock_init(&obj->child_list_lock); 59 60 INIT_LIST_HEAD(&obj->active_list_head); 61 spin_lock_init(&obj->active_list_lock); 62 63 spin_lock_irqsave(&sync_timeline_list_lock, flags); 64 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); 65 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 66 67 return obj; 68} 69EXPORT_SYMBOL(sync_timeline_create); 70 71static void sync_timeline_free(struct sync_timeline *obj) 72{ 73 unsigned long flags; 74 75 if (obj->ops->release_obj) 76 obj->ops->release_obj(obj); 77 78 spin_lock_irqsave(&sync_timeline_list_lock, flags); 79 list_del(&obj->sync_timeline_list); 80 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 81 82 kfree(obj); 83} 84 85void sync_timeline_destroy(struct sync_timeline *obj) 86{ 87 unsigned long flags; 88 bool needs_freeing; 89 90 spin_lock_irqsave(&obj->child_list_lock, flags); 91 obj->destroyed = true; 92 needs_freeing = list_empty(&obj->child_list_head); 93 spin_unlock_irqrestore(&obj->child_list_lock, flags); 94 95 if (needs_freeing) 96 sync_timeline_free(obj); 97 else 98 sync_timeline_signal(obj); 99} 100EXPORT_SYMBOL(sync_timeline_destroy); 101 102static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) 103{ 104 unsigned long flags; 105 106 pt->parent = obj; 107 108 spin_lock_irqsave(&obj->child_list_lock, flags); 109 list_add_tail(&pt->child_list, &obj->child_list_head); 110 spin_unlock_irqrestore(&obj->child_list_lock, flags); 111} 112 113static void sync_timeline_remove_pt(struct sync_pt *pt) 114{ 115 struct sync_timeline *obj = pt->parent; 116 unsigned long flags; 117 bool needs_freeing = false; 118 119 spin_lock_irqsave(&obj->active_list_lock, flags); 120 if (!list_empty(&pt->active_list)) 121 list_del_init(&pt->active_list); 122 spin_unlock_irqrestore(&obj->active_list_lock, flags); 123 124 spin_lock_irqsave(&obj->child_list_lock, flags); 125 if (!list_empty(&pt->child_list)) { 126 list_del_init(&pt->child_list); 127 needs_freeing = obj->destroyed && 128 list_empty(&obj->child_list_head); 129 } 130 spin_unlock_irqrestore(&obj->child_list_lock, flags); 131 132 if (needs_freeing) 133 sync_timeline_free(obj); 134} 135 136void sync_timeline_signal(struct sync_timeline *obj) 137{ 138 unsigned long flags; 139 LIST_HEAD(signaled_pts); 140 struct list_head *pos, *n; 141 142 spin_lock_irqsave(&obj->active_list_lock, flags); 143 144 list_for_each_safe(pos, n, &obj->active_list_head) { 145 struct sync_pt *pt = 146 container_of(pos, struct sync_pt, active_list); 147 148 if (_sync_pt_has_signaled(pt)) { 149 list_del_init(pos); 150 list_add(&pt->signaled_list, &signaled_pts); 151 kref_get(&pt->fence->kref); 152 } 153 } 154 155 spin_unlock_irqrestore(&obj->active_list_lock, flags); 156 157 list_for_each_safe(pos, n, &signaled_pts) { 158 struct sync_pt *pt = 159 container_of(pos, struct sync_pt, signaled_list); 160 161 list_del_init(pos); 162 sync_fence_signal_pt(pt); 163 kref_put(&pt->fence->kref, sync_fence_free); 164 } 165} 166EXPORT_SYMBOL(sync_timeline_signal); 167 168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) 169{ 170 struct sync_pt *pt; 171 172 if (size < sizeof(struct sync_pt)) 173 return NULL; 174 175 pt = kzalloc(size, GFP_KERNEL); 176 if (pt == NULL) 177 return NULL; 178 179 INIT_LIST_HEAD(&pt->active_list); 180 sync_timeline_add_pt(parent, pt); 181 182 return pt; 183} 184EXPORT_SYMBOL(sync_pt_create); 185 186void sync_pt_free(struct sync_pt *pt) 187{ 188 if (pt->parent->ops->free_pt) 189 pt->parent->ops->free_pt(pt); 190 191 sync_timeline_remove_pt(pt); 192 193 kfree(pt); 194} 195EXPORT_SYMBOL(sync_pt_free); 196 197/* call with pt->parent->active_list_lock held */ 198static int _sync_pt_has_signaled(struct sync_pt *pt) 199{ 200 int old_status = pt->status; 201 202 if (!pt->status) 203 pt->status = pt->parent->ops->has_signaled(pt); 204 205 if (!pt->status && pt->parent->destroyed) 206 pt->status = -ENOENT; 207 208 if (pt->status != old_status) 209 pt->timestamp = ktime_get(); 210 211 return pt->status; 212} 213 214static struct sync_pt *sync_pt_dup(struct sync_pt *pt) 215{ 216 return pt->parent->ops->dup(pt); 217} 218 219/* Adds a sync pt to the active queue. Called when added to a fence */ 220static void sync_pt_activate(struct sync_pt *pt) 221{ 222 struct sync_timeline *obj = pt->parent; 223 unsigned long flags; 224 int err; 225 226 spin_lock_irqsave(&obj->active_list_lock, flags); 227 228 err = _sync_pt_has_signaled(pt); 229 if (err != 0) 230 goto out; 231 232 list_add_tail(&pt->active_list, &obj->active_list_head); 233 234out: 235 spin_unlock_irqrestore(&obj->active_list_lock, flags); 236} 237 238static int sync_fence_release(struct inode *inode, struct file *file); 239static unsigned int sync_fence_poll(struct file *file, poll_table *wait); 240static long sync_fence_ioctl(struct file *file, unsigned int cmd, 241 unsigned long arg); 242 243 244static const struct file_operations sync_fence_fops = { 245 .release = sync_fence_release, 246 .poll = sync_fence_poll, 247 .unlocked_ioctl = sync_fence_ioctl, 248}; 249 250static struct sync_fence *sync_fence_alloc(const char *name) 251{ 252 struct sync_fence *fence; 253 unsigned long flags; 254 255 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); 256 if (fence == NULL) 257 return NULL; 258 259 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, 260 fence, 0); 261 if (fence->file == NULL) 262 goto err; 263 264 kref_init(&fence->kref); 265 strlcpy(fence->name, name, sizeof(fence->name)); 266 267 INIT_LIST_HEAD(&fence->pt_list_head); 268 INIT_LIST_HEAD(&fence->waiter_list_head); 269 spin_lock_init(&fence->waiter_list_lock); 270 271 init_waitqueue_head(&fence->wq); 272 273 spin_lock_irqsave(&sync_fence_list_lock, flags); 274 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); 275 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 276 277 return fence; 278 279err: 280 kfree(fence); 281 return NULL; 282} 283 284/* TODO: implement a create which takes more that one sync_pt */ 285struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) 286{ 287 struct sync_fence *fence; 288 289 if (pt->fence) 290 return NULL; 291 292 fence = sync_fence_alloc(name); 293 if (fence == NULL) 294 return NULL; 295 296 pt->fence = fence; 297 list_add(&pt->pt_list, &fence->pt_list_head); 298 sync_pt_activate(pt); 299 300 return fence; 301} 302EXPORT_SYMBOL(sync_fence_create); 303 304static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) 305{ 306 struct list_head *pos; 307 308 list_for_each(pos, &src->pt_list_head) { 309 struct sync_pt *orig_pt = 310 container_of(pos, struct sync_pt, pt_list); 311 struct sync_pt *new_pt = sync_pt_dup(orig_pt); 312 313 if (new_pt == NULL) 314 return -ENOMEM; 315 316 new_pt->fence = dst; 317 list_add(&new_pt->pt_list, &dst->pt_list_head); 318 sync_pt_activate(new_pt); 319 } 320 321 return 0; 322} 323 324static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) 325{ 326 struct list_head *src_pos, *dst_pos, *n; 327 328 list_for_each(src_pos, &src->pt_list_head) { 329 struct sync_pt *src_pt = 330 container_of(src_pos, struct sync_pt, pt_list); 331 bool collapsed = false; 332 333 list_for_each_safe(dst_pos, n, &dst->pt_list_head) { 334 struct sync_pt *dst_pt = 335 container_of(dst_pos, struct sync_pt, pt_list); 336 /* collapse two sync_pts on the same timeline 337 * to a single sync_pt that will signal at 338 * the later of the two 339 */ 340 if (dst_pt->parent == src_pt->parent) { 341 if (dst_pt->parent->ops->compare(dst_pt, src_pt) 342 == -1) { 343 struct sync_pt *new_pt = 344 sync_pt_dup(src_pt); 345 if (new_pt == NULL) 346 return -ENOMEM; 347 348 new_pt->fence = dst; 349 list_replace(&dst_pt->pt_list, 350 &new_pt->pt_list); 351 sync_pt_activate(new_pt); 352 sync_pt_free(dst_pt); 353 } 354 collapsed = true; 355 break; 356 } 357 } 358 359 if (!collapsed) { 360 struct sync_pt *new_pt = sync_pt_dup(src_pt); 361 362 if (new_pt == NULL) 363 return -ENOMEM; 364 365 new_pt->fence = dst; 366 list_add(&new_pt->pt_list, &dst->pt_list_head); 367 sync_pt_activate(new_pt); 368 } 369 } 370 371 return 0; 372} 373 374static void sync_fence_detach_pts(struct sync_fence *fence) 375{ 376 struct list_head *pos, *n; 377 378 list_for_each_safe(pos, n, &fence->pt_list_head) { 379 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 380 sync_timeline_remove_pt(pt); 381 } 382} 383 384static void sync_fence_free_pts(struct sync_fence *fence) 385{ 386 struct list_head *pos, *n; 387 388 list_for_each_safe(pos, n, &fence->pt_list_head) { 389 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 390 sync_pt_free(pt); 391 } 392} 393 394struct sync_fence *sync_fence_fdget(int fd) 395{ 396 struct file *file = fget(fd); 397 398 if (file == NULL) 399 return NULL; 400 401 if (file->f_op != &sync_fence_fops) 402 goto err; 403 404 return file->private_data; 405 406err: 407 fput(file); 408 return NULL; 409} 410EXPORT_SYMBOL(sync_fence_fdget); 411 412void sync_fence_put(struct sync_fence *fence) 413{ 414 fput(fence->file); 415} 416EXPORT_SYMBOL(sync_fence_put); 417 418void sync_fence_install(struct sync_fence *fence, int fd) 419{ 420 fd_install(fd, fence->file); 421} 422EXPORT_SYMBOL(sync_fence_install); 423 424static int sync_fence_get_status(struct sync_fence *fence) 425{ 426 struct list_head *pos; 427 int status = 1; 428 429 list_for_each(pos, &fence->pt_list_head) { 430 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); 431 int pt_status = pt->status; 432 433 if (pt_status < 0) { 434 status = pt_status; 435 break; 436 } else if (status == 1) { 437 status = pt_status; 438 } 439 } 440 441 return status; 442} 443 444struct sync_fence *sync_fence_merge(const char *name, 445 struct sync_fence *a, struct sync_fence *b) 446{ 447 struct sync_fence *fence; 448 int err; 449 450 fence = sync_fence_alloc(name); 451 if (fence == NULL) 452 return NULL; 453 454 err = sync_fence_copy_pts(fence, a); 455 if (err < 0) 456 goto err; 457 458 err = sync_fence_merge_pts(fence, b); 459 if (err < 0) 460 goto err; 461 462 fence->status = sync_fence_get_status(fence); 463 464 return fence; 465err: 466 sync_fence_free_pts(fence); 467 kfree(fence); 468 return NULL; 469} 470EXPORT_SYMBOL(sync_fence_merge); 471 472static void sync_fence_signal_pt(struct sync_pt *pt) 473{ 474 LIST_HEAD(signaled_waiters); 475 struct sync_fence *fence = pt->fence; 476 struct list_head *pos; 477 struct list_head *n; 478 unsigned long flags; 479 int status; 480 481 status = sync_fence_get_status(fence); 482 483 spin_lock_irqsave(&fence->waiter_list_lock, flags); 484 /* 485 * this should protect against two threads racing on the signaled 486 * false -> true transition 487 */ 488 if (status && !fence->status) { 489 list_for_each_safe(pos, n, &fence->waiter_list_head) 490 list_move(pos, &signaled_waiters); 491 492 fence->status = status; 493 } else { 494 status = 0; 495 } 496 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 497 498 if (status) { 499 list_for_each_safe(pos, n, &signaled_waiters) { 500 struct sync_fence_waiter *waiter = 501 container_of(pos, struct sync_fence_waiter, 502 waiter_list); 503 504 list_del(pos); 505 waiter->callback(fence, waiter); 506 } 507 wake_up(&fence->wq); 508 } 509} 510 511int sync_fence_wait_async(struct sync_fence *fence, 512 struct sync_fence_waiter *waiter) 513{ 514 unsigned long flags; 515 int err = 0; 516 517 spin_lock_irqsave(&fence->waiter_list_lock, flags); 518 519 if (fence->status) { 520 err = fence->status; 521 goto out; 522 } 523 524 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); 525out: 526 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 527 528 return err; 529} 530EXPORT_SYMBOL(sync_fence_wait_async); 531 532int sync_fence_cancel_async(struct sync_fence *fence, 533 struct sync_fence_waiter *waiter) 534{ 535 struct list_head *pos; 536 struct list_head *n; 537 unsigned long flags; 538 int ret = -ENOENT; 539 540 spin_lock_irqsave(&fence->waiter_list_lock, flags); 541 /* 542 * Make sure waiter is still in waiter_list because it is possible for 543 * the waiter to be removed from the list while the callback is still 544 * pending. 545 */ 546 list_for_each_safe(pos, n, &fence->waiter_list_head) { 547 struct sync_fence_waiter *list_waiter = 548 container_of(pos, struct sync_fence_waiter, 549 waiter_list); 550 if (list_waiter == waiter) { 551 list_del(pos); 552 ret = 0; 553 break; 554 } 555 } 556 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 557 return ret; 558} 559EXPORT_SYMBOL(sync_fence_cancel_async); 560 561int sync_fence_wait(struct sync_fence *fence, long timeout) 562{ 563 int err; 564 565 if (timeout) { 566 timeout = msecs_to_jiffies(timeout); 567 err = wait_event_interruptible_timeout(fence->wq, 568 fence->status != 0, 569 timeout); 570 } else { 571 err = wait_event_interruptible(fence->wq, fence->status != 0); 572 } 573 574 if (err < 0) 575 return err; 576 577 if (fence->status < 0) 578 return fence->status; 579 580 if (fence->status == 0) 581 return -ETIME; 582 583 return 0; 584} 585EXPORT_SYMBOL(sync_fence_wait); 586 587static void sync_fence_free(struct kref *kref) 588{ 589 struct sync_fence *fence = container_of(kref, struct sync_fence, kref); 590 591 sync_fence_free_pts(fence); 592 593 kfree(fence); 594} 595 596static int sync_fence_release(struct inode *inode, struct file *file) 597{ 598 struct sync_fence *fence = file->private_data; 599 unsigned long flags; 600 601 /* 602 * We need to remove all ways to access this fence before droping 603 * our ref. 604 * 605 * start with its membership in the global fence list 606 */ 607 spin_lock_irqsave(&sync_fence_list_lock, flags); 608 list_del(&fence->sync_fence_list); 609 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 610 611 /* 612 * remove its pts from their parents so that sync_timeline_signal() 613 * can't reference the fence. 614 */ 615 sync_fence_detach_pts(fence); 616 617 kref_put(&fence->kref, sync_fence_free); 618 619 return 0; 620} 621 622static unsigned int sync_fence_poll(struct file *file, poll_table *wait) 623{ 624 struct sync_fence *fence = file->private_data; 625 626 poll_wait(file, &fence->wq, wait); 627 628 if (fence->status == 1) 629 return POLLIN; 630 else if (fence->status < 0) 631 return POLLERR; 632 else 633 return 0; 634} 635 636static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) 637{ 638 __s32 value; 639 640 if (copy_from_user(&value, (void __user *)arg, sizeof(value))) 641 return -EFAULT; 642 643 return sync_fence_wait(fence, value); 644} 645 646static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) 647{ 648 int fd = get_unused_fd(); 649 int err; 650 struct sync_fence *fence2, *fence3; 651 struct sync_merge_data data; 652 653 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 654 return -EFAULT; 655 656 fence2 = sync_fence_fdget(data.fd2); 657 if (fence2 == NULL) { 658 err = -ENOENT; 659 goto err_put_fd; 660 } 661 662 data.name[sizeof(data.name) - 1] = '\0'; 663 fence3 = sync_fence_merge(data.name, fence, fence2); 664 if (fence3 == NULL) { 665 err = -ENOMEM; 666 goto err_put_fence2; 667 } 668 669 data.fence = fd; 670 if (copy_to_user((void __user *)arg, &data, sizeof(data))) { 671 err = -EFAULT; 672 goto err_put_fence3; 673 } 674 675 sync_fence_install(fence3, fd); 676 sync_fence_put(fence2); 677 return 0; 678 679err_put_fence3: 680 sync_fence_put(fence3); 681 682err_put_fence2: 683 sync_fence_put(fence2); 684 685err_put_fd: 686 put_unused_fd(fd); 687 return err; 688} 689 690static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) 691{ 692 struct sync_pt_info *info = data; 693 int ret; 694 695 if (size < sizeof(struct sync_pt_info)) 696 return -ENOMEM; 697 698 info->len = sizeof(struct sync_pt_info); 699 700 if (pt->parent->ops->fill_driver_data) { 701 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, 702 size - sizeof(*info)); 703 if (ret < 0) 704 return ret; 705 706 info->len += ret; 707 } 708 709 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); 710 strlcpy(info->driver_name, pt->parent->ops->driver_name, 711 sizeof(info->driver_name)); 712 info->status = pt->status; 713 info->timestamp_ns = ktime_to_ns(pt->timestamp); 714 715 return info->len; 716} 717 718static long sync_fence_ioctl_fence_info(struct sync_fence *fence, 719 unsigned long arg) 720{ 721 struct sync_fence_info_data *data; 722 struct list_head *pos; 723 __u32 size; 724 __u32 len = 0; 725 int ret; 726 727 if (copy_from_user(&size, (void __user *)arg, sizeof(size))) 728 return -EFAULT; 729 730 if (size < sizeof(struct sync_fence_info_data)) 731 return -EINVAL; 732 733 if (size > 4096) 734 size = 4096; 735 736 data = kzalloc(size, GFP_KERNEL); 737 if (data == NULL) 738 return -ENOMEM; 739 740 strlcpy(data->name, fence->name, sizeof(data->name)); 741 data->status = fence->status; 742 len = sizeof(struct sync_fence_info_data); 743 744 list_for_each(pos, &fence->pt_list_head) { 745 struct sync_pt *pt = 746 container_of(pos, struct sync_pt, pt_list); 747 748 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); 749 750 if (ret < 0) 751 goto out; 752 753 len += ret; 754 } 755 756 data->len = len; 757 758 if (copy_to_user((void __user *)arg, data, len)) 759 ret = -EFAULT; 760 else 761 ret = 0; 762 763out: 764 kfree(data); 765 766 return ret; 767} 768 769static long sync_fence_ioctl(struct file *file, unsigned int cmd, 770 unsigned long arg) 771{ 772 struct sync_fence *fence = file->private_data; 773 switch (cmd) { 774 case SYNC_IOC_WAIT: 775 return sync_fence_ioctl_wait(fence, arg); 776 777 case SYNC_IOC_MERGE: 778 return sync_fence_ioctl_merge(fence, arg); 779 780 case SYNC_IOC_FENCE_INFO: 781 return sync_fence_ioctl_fence_info(fence, arg); 782 783 default: 784 return -ENOTTY; 785 } 786} 787 788#ifdef CONFIG_DEBUG_FS 789static const char *sync_status_str(int status) 790{ 791 if (status > 0) 792 return "signaled"; 793 else if (status == 0) 794 return "active"; 795 else 796 return "error"; 797} 798 799static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) 800{ 801 int status = pt->status; 802 seq_printf(s, " %s%spt %s", 803 fence ? pt->parent->name : "", 804 fence ? "_" : "", 805 sync_status_str(status)); 806 if (pt->status) { 807 struct timeval tv = ktime_to_timeval(pt->timestamp); 808 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); 809 } 810 811 if (pt->parent->ops->print_pt) { 812 seq_printf(s, ": "); 813 pt->parent->ops->print_pt(s, pt); 814 } 815 816 seq_printf(s, "\n"); 817} 818 819static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) 820{ 821 struct list_head *pos; 822 unsigned long flags; 823 824 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); 825 826 if (obj->ops->print_obj) { 827 seq_printf(s, ": "); 828 obj->ops->print_obj(s, obj); 829 } 830 831 seq_printf(s, "\n"); 832 833 spin_lock_irqsave(&obj->child_list_lock, flags); 834 list_for_each(pos, &obj->child_list_head) { 835 struct sync_pt *pt = 836 container_of(pos, struct sync_pt, child_list); 837 sync_print_pt(s, pt, false); 838 } 839 spin_unlock_irqrestore(&obj->child_list_lock, flags); 840} 841 842static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) 843{ 844 struct list_head *pos; 845 unsigned long flags; 846 847 seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status)); 848 849 list_for_each(pos, &fence->pt_list_head) { 850 struct sync_pt *pt = 851 container_of(pos, struct sync_pt, pt_list); 852 sync_print_pt(s, pt, true); 853 } 854 855 spin_lock_irqsave(&fence->waiter_list_lock, flags); 856 list_for_each(pos, &fence->waiter_list_head) { 857 struct sync_fence_waiter *waiter = 858 container_of(pos, struct sync_fence_waiter, 859 waiter_list); 860 861 seq_printf(s, "waiter %pF\n", waiter->callback); 862 } 863 spin_unlock_irqrestore(&fence->waiter_list_lock, flags); 864} 865 866static int sync_debugfs_show(struct seq_file *s, void *unused) 867{ 868 unsigned long flags; 869 struct list_head *pos; 870 871 seq_printf(s, "objs:\n--------------\n"); 872 873 spin_lock_irqsave(&sync_timeline_list_lock, flags); 874 list_for_each(pos, &sync_timeline_list_head) { 875 struct sync_timeline *obj = 876 container_of(pos, struct sync_timeline, 877 sync_timeline_list); 878 879 sync_print_obj(s, obj); 880 seq_printf(s, "\n"); 881 } 882 spin_unlock_irqrestore(&sync_timeline_list_lock, flags); 883 884 seq_printf(s, "fences:\n--------------\n"); 885 886 spin_lock_irqsave(&sync_fence_list_lock, flags); 887 list_for_each(pos, &sync_fence_list_head) { 888 struct sync_fence *fence = 889 container_of(pos, struct sync_fence, sync_fence_list); 890 891 sync_print_fence(s, fence); 892 seq_printf(s, "\n"); 893 } 894 spin_unlock_irqrestore(&sync_fence_list_lock, flags); 895 return 0; 896} 897 898static int sync_debugfs_open(struct inode *inode, struct file *file) 899{ 900 return single_open(file, sync_debugfs_show, inode->i_private); 901} 902 903static const struct file_operations sync_debugfs_fops = { 904 .open = sync_debugfs_open, 905 .read = seq_read, 906 .llseek = seq_lseek, 907 .release = single_release, 908}; 909 910static __init int sync_debugfs_init(void) 911{ 912 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); 913 return 0; 914} 915 916late_initcall(sync_debugfs_init); 917 918#endif 919