clk.c revision 3a5aec246f294004564cbe960724fa0ace59a4c5
1/* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/clk.txt 10 */ 11 12#include <linux/clk-private.h> 13#include <linux/module.h> 14#include <linux/mutex.h> 15#include <linux/spinlock.h> 16#include <linux/err.h> 17#include <linux/list.h> 18#include <linux/slab.h> 19#include <linux/of.h> 20#include <linux/device.h> 21#include <linux/init.h> 22#include <linux/sched.h> 23 24static DEFINE_SPINLOCK(enable_lock); 25static DEFINE_MUTEX(prepare_lock); 26 27static struct task_struct *prepare_owner; 28static struct task_struct *enable_owner; 29 30static int prepare_refcnt; 31static int enable_refcnt; 32 33static HLIST_HEAD(clk_root_list); 34static HLIST_HEAD(clk_orphan_list); 35static LIST_HEAD(clk_notifier_list); 36 37/*** locking ***/ 38static void clk_prepare_lock(void) 39{ 40 if (!mutex_trylock(&prepare_lock)) { 41 if (prepare_owner == current) { 42 prepare_refcnt++; 43 return; 44 } 45 mutex_lock(&prepare_lock); 46 } 47 WARN_ON_ONCE(prepare_owner != NULL); 48 WARN_ON_ONCE(prepare_refcnt != 0); 49 prepare_owner = current; 50 prepare_refcnt = 1; 51} 52 53static void clk_prepare_unlock(void) 54{ 55 WARN_ON_ONCE(prepare_owner != current); 56 WARN_ON_ONCE(prepare_refcnt == 0); 57 58 if (--prepare_refcnt) 59 return; 60 prepare_owner = NULL; 61 mutex_unlock(&prepare_lock); 62} 63 64static unsigned long clk_enable_lock(void) 65{ 66 unsigned long flags; 67 68 if (!spin_trylock_irqsave(&enable_lock, flags)) { 69 if (enable_owner == current) { 70 enable_refcnt++; 71 return flags; 72 } 73 spin_lock_irqsave(&enable_lock, flags); 74 } 75 WARN_ON_ONCE(enable_owner != NULL); 76 WARN_ON_ONCE(enable_refcnt != 0); 77 enable_owner = current; 78 enable_refcnt = 1; 79 return flags; 80} 81 82static void clk_enable_unlock(unsigned long flags) 83{ 84 WARN_ON_ONCE(enable_owner != current); 85 WARN_ON_ONCE(enable_refcnt == 0); 86 87 if (--enable_refcnt) 88 return; 89 enable_owner = NULL; 90 spin_unlock_irqrestore(&enable_lock, flags); 91} 92 93/*** debugfs support ***/ 94 95#ifdef CONFIG_COMMON_CLK_DEBUG 96#include <linux/debugfs.h> 97 98static struct dentry *rootdir; 99static struct dentry *orphandir; 100static int inited = 0; 101 102static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) 103{ 104 if (!c) 105 return; 106 107 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu", 108 level * 3 + 1, "", 109 30 - level * 3, c->name, 110 c->enable_count, c->prepare_count, clk_get_rate(c)); 111 seq_printf(s, "\n"); 112} 113 114static void clk_summary_show_subtree(struct seq_file *s, struct clk *c, 115 int level) 116{ 117 struct clk *child; 118 119 if (!c) 120 return; 121 122 clk_summary_show_one(s, c, level); 123 124 hlist_for_each_entry(child, &c->children, child_node) 125 clk_summary_show_subtree(s, child, level + 1); 126} 127 128static int clk_summary_show(struct seq_file *s, void *data) 129{ 130 struct clk *c; 131 132 seq_printf(s, " clock enable_cnt prepare_cnt rate\n"); 133 seq_printf(s, "---------------------------------------------------------------------\n"); 134 135 clk_prepare_lock(); 136 137 hlist_for_each_entry(c, &clk_root_list, child_node) 138 clk_summary_show_subtree(s, c, 0); 139 140 hlist_for_each_entry(c, &clk_orphan_list, child_node) 141 clk_summary_show_subtree(s, c, 0); 142 143 clk_prepare_unlock(); 144 145 return 0; 146} 147 148 149static int clk_summary_open(struct inode *inode, struct file *file) 150{ 151 return single_open(file, clk_summary_show, inode->i_private); 152} 153 154static const struct file_operations clk_summary_fops = { 155 .open = clk_summary_open, 156 .read = seq_read, 157 .llseek = seq_lseek, 158 .release = single_release, 159}; 160 161static void clk_dump_one(struct seq_file *s, struct clk *c, int level) 162{ 163 if (!c) 164 return; 165 166 seq_printf(s, "\"%s\": { ", c->name); 167 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 168 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 169 seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); 170} 171 172static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 173{ 174 struct clk *child; 175 176 if (!c) 177 return; 178 179 clk_dump_one(s, c, level); 180 181 hlist_for_each_entry(child, &c->children, child_node) { 182 seq_printf(s, ","); 183 clk_dump_subtree(s, child, level + 1); 184 } 185 186 seq_printf(s, "}"); 187} 188 189static int clk_dump(struct seq_file *s, void *data) 190{ 191 struct clk *c; 192 bool first_node = true; 193 194 seq_printf(s, "{"); 195 196 clk_prepare_lock(); 197 198 hlist_for_each_entry(c, &clk_root_list, child_node) { 199 if (!first_node) 200 seq_printf(s, ","); 201 first_node = false; 202 clk_dump_subtree(s, c, 0); 203 } 204 205 hlist_for_each_entry(c, &clk_orphan_list, child_node) { 206 seq_printf(s, ","); 207 clk_dump_subtree(s, c, 0); 208 } 209 210 clk_prepare_unlock(); 211 212 seq_printf(s, "}"); 213 return 0; 214} 215 216 217static int clk_dump_open(struct inode *inode, struct file *file) 218{ 219 return single_open(file, clk_dump, inode->i_private); 220} 221 222static const struct file_operations clk_dump_fops = { 223 .open = clk_dump_open, 224 .read = seq_read, 225 .llseek = seq_lseek, 226 .release = single_release, 227}; 228 229/* caller must hold prepare_lock */ 230static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 231{ 232 struct dentry *d; 233 int ret = -ENOMEM; 234 235 if (!clk || !pdentry) { 236 ret = -EINVAL; 237 goto out; 238 } 239 240 d = debugfs_create_dir(clk->name, pdentry); 241 if (!d) 242 goto out; 243 244 clk->dentry = d; 245 246 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry, 247 (u32 *)&clk->rate); 248 if (!d) 249 goto err_out; 250 251 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry, 252 (u32 *)&clk->flags); 253 if (!d) 254 goto err_out; 255 256 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry, 257 (u32 *)&clk->prepare_count); 258 if (!d) 259 goto err_out; 260 261 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry, 262 (u32 *)&clk->enable_count); 263 if (!d) 264 goto err_out; 265 266 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry, 267 (u32 *)&clk->notifier_count); 268 if (!d) 269 goto err_out; 270 271 ret = 0; 272 goto out; 273 274err_out: 275 debugfs_remove_recursive(clk->dentry); 276 clk->dentry = NULL; 277out: 278 return ret; 279} 280 281/* caller must hold prepare_lock */ 282static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry) 283{ 284 struct clk *child; 285 int ret = -EINVAL;; 286 287 if (!clk || !pdentry) 288 goto out; 289 290 ret = clk_debug_create_one(clk, pdentry); 291 292 if (ret) 293 goto out; 294 295 hlist_for_each_entry(child, &clk->children, child_node) 296 clk_debug_create_subtree(child, clk->dentry); 297 298 ret = 0; 299out: 300 return ret; 301} 302 303/** 304 * clk_debug_register - add a clk node to the debugfs clk tree 305 * @clk: the clk being added to the debugfs clk tree 306 * 307 * Dynamically adds a clk to the debugfs clk tree if debugfs has been 308 * initialized. Otherwise it bails out early since the debugfs clk tree 309 * will be created lazily by clk_debug_init as part of a late_initcall. 310 * 311 * Caller must hold prepare_lock. Only clk_init calls this function (so 312 * far) so this is taken care. 313 */ 314static int clk_debug_register(struct clk *clk) 315{ 316 struct clk *parent; 317 struct dentry *pdentry; 318 int ret = 0; 319 320 if (!inited) 321 goto out; 322 323 parent = clk->parent; 324 325 /* 326 * Check to see if a clk is a root clk. Also check that it is 327 * safe to add this clk to debugfs 328 */ 329 if (!parent) 330 if (clk->flags & CLK_IS_ROOT) 331 pdentry = rootdir; 332 else 333 pdentry = orphandir; 334 else 335 if (parent->dentry) 336 pdentry = parent->dentry; 337 else 338 goto out; 339 340 ret = clk_debug_create_subtree(clk, pdentry); 341 342out: 343 return ret; 344} 345 346/** 347 * clk_debug_reparent - reparent clk node in the debugfs clk tree 348 * @clk: the clk being reparented 349 * @new_parent: the new clk parent, may be NULL 350 * 351 * Rename clk entry in the debugfs clk tree if debugfs has been 352 * initialized. Otherwise it bails out early since the debugfs clk tree 353 * will be created lazily by clk_debug_init as part of a late_initcall. 354 * 355 * Caller must hold prepare_lock. 356 */ 357static void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 358{ 359 struct dentry *d; 360 struct dentry *new_parent_d; 361 362 if (!inited) 363 return; 364 365 if (new_parent) 366 new_parent_d = new_parent->dentry; 367 else 368 new_parent_d = orphandir; 369 370 d = debugfs_rename(clk->dentry->d_parent, clk->dentry, 371 new_parent_d, clk->name); 372 if (d) 373 clk->dentry = d; 374 else 375 pr_debug("%s: failed to rename debugfs entry for %s\n", 376 __func__, clk->name); 377} 378 379/** 380 * clk_debug_init - lazily create the debugfs clk tree visualization 381 * 382 * clks are often initialized very early during boot before memory can 383 * be dynamically allocated and well before debugfs is setup. 384 * clk_debug_init walks the clk tree hierarchy while holding 385 * prepare_lock and creates the topology as part of a late_initcall, 386 * thus insuring that clks initialized very early will still be 387 * represented in the debugfs clk tree. This function should only be 388 * called once at boot-time, and all other clks added dynamically will 389 * be done so with clk_debug_register. 390 */ 391static int __init clk_debug_init(void) 392{ 393 struct clk *clk; 394 struct dentry *d; 395 396 rootdir = debugfs_create_dir("clk", NULL); 397 398 if (!rootdir) 399 return -ENOMEM; 400 401 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL, 402 &clk_summary_fops); 403 if (!d) 404 return -ENOMEM; 405 406 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL, 407 &clk_dump_fops); 408 if (!d) 409 return -ENOMEM; 410 411 orphandir = debugfs_create_dir("orphans", rootdir); 412 413 if (!orphandir) 414 return -ENOMEM; 415 416 clk_prepare_lock(); 417 418 hlist_for_each_entry(clk, &clk_root_list, child_node) 419 clk_debug_create_subtree(clk, rootdir); 420 421 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 422 clk_debug_create_subtree(clk, orphandir); 423 424 inited = 1; 425 426 clk_prepare_unlock(); 427 428 return 0; 429} 430late_initcall(clk_debug_init); 431#else 432static inline int clk_debug_register(struct clk *clk) { return 0; } 433static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 434{ 435} 436#endif 437 438/* caller must hold prepare_lock */ 439static void clk_unprepare_unused_subtree(struct clk *clk) 440{ 441 struct clk *child; 442 443 if (!clk) 444 return; 445 446 hlist_for_each_entry(child, &clk->children, child_node) 447 clk_unprepare_unused_subtree(child); 448 449 if (clk->prepare_count) 450 return; 451 452 if (clk->flags & CLK_IGNORE_UNUSED) 453 return; 454 455 if (__clk_is_prepared(clk)) { 456 if (clk->ops->unprepare_unused) 457 clk->ops->unprepare_unused(clk->hw); 458 else if (clk->ops->unprepare) 459 clk->ops->unprepare(clk->hw); 460 } 461} 462 463/* caller must hold prepare_lock */ 464static void clk_disable_unused_subtree(struct clk *clk) 465{ 466 struct clk *child; 467 unsigned long flags; 468 469 if (!clk) 470 goto out; 471 472 hlist_for_each_entry(child, &clk->children, child_node) 473 clk_disable_unused_subtree(child); 474 475 flags = clk_enable_lock(); 476 477 if (clk->enable_count) 478 goto unlock_out; 479 480 if (clk->flags & CLK_IGNORE_UNUSED) 481 goto unlock_out; 482 483 /* 484 * some gate clocks have special needs during the disable-unused 485 * sequence. call .disable_unused if available, otherwise fall 486 * back to .disable 487 */ 488 if (__clk_is_enabled(clk)) { 489 if (clk->ops->disable_unused) 490 clk->ops->disable_unused(clk->hw); 491 else if (clk->ops->disable) 492 clk->ops->disable(clk->hw); 493 } 494 495unlock_out: 496 clk_enable_unlock(flags); 497 498out: 499 return; 500} 501 502static bool clk_ignore_unused; 503static int __init clk_ignore_unused_setup(char *__unused) 504{ 505 clk_ignore_unused = true; 506 return 1; 507} 508__setup("clk_ignore_unused", clk_ignore_unused_setup); 509 510static int clk_disable_unused(void) 511{ 512 struct clk *clk; 513 514 if (clk_ignore_unused) { 515 pr_warn("clk: Not disabling unused clocks\n"); 516 return 0; 517 } 518 519 clk_prepare_lock(); 520 521 hlist_for_each_entry(clk, &clk_root_list, child_node) 522 clk_disable_unused_subtree(clk); 523 524 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 525 clk_disable_unused_subtree(clk); 526 527 hlist_for_each_entry(clk, &clk_root_list, child_node) 528 clk_unprepare_unused_subtree(clk); 529 530 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 531 clk_unprepare_unused_subtree(clk); 532 533 clk_prepare_unlock(); 534 535 return 0; 536} 537late_initcall_sync(clk_disable_unused); 538 539/*** helper functions ***/ 540 541const char *__clk_get_name(struct clk *clk) 542{ 543 return !clk ? NULL : clk->name; 544} 545EXPORT_SYMBOL_GPL(__clk_get_name); 546 547struct clk_hw *__clk_get_hw(struct clk *clk) 548{ 549 return !clk ? NULL : clk->hw; 550} 551 552u8 __clk_get_num_parents(struct clk *clk) 553{ 554 return !clk ? 0 : clk->num_parents; 555} 556 557struct clk *__clk_get_parent(struct clk *clk) 558{ 559 return !clk ? NULL : clk->parent; 560} 561 562struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) 563{ 564 if (!clk || index >= clk->num_parents) 565 return NULL; 566 else if (!clk->parents) 567 return __clk_lookup(clk->parent_names[index]); 568 else if (!clk->parents[index]) 569 return clk->parents[index] = 570 __clk_lookup(clk->parent_names[index]); 571 else 572 return clk->parents[index]; 573} 574 575unsigned int __clk_get_enable_count(struct clk *clk) 576{ 577 return !clk ? 0 : clk->enable_count; 578} 579 580unsigned int __clk_get_prepare_count(struct clk *clk) 581{ 582 return !clk ? 0 : clk->prepare_count; 583} 584 585unsigned long __clk_get_rate(struct clk *clk) 586{ 587 unsigned long ret; 588 589 if (!clk) { 590 ret = 0; 591 goto out; 592 } 593 594 ret = clk->rate; 595 596 if (clk->flags & CLK_IS_ROOT) 597 goto out; 598 599 if (!clk->parent) 600 ret = 0; 601 602out: 603 return ret; 604} 605 606unsigned long __clk_get_flags(struct clk *clk) 607{ 608 return !clk ? 0 : clk->flags; 609} 610EXPORT_SYMBOL_GPL(__clk_get_flags); 611 612bool __clk_is_prepared(struct clk *clk) 613{ 614 int ret; 615 616 if (!clk) 617 return false; 618 619 /* 620 * .is_prepared is optional for clocks that can prepare 621 * fall back to software usage counter if it is missing 622 */ 623 if (!clk->ops->is_prepared) { 624 ret = clk->prepare_count ? 1 : 0; 625 goto out; 626 } 627 628 ret = clk->ops->is_prepared(clk->hw); 629out: 630 return !!ret; 631} 632 633bool __clk_is_enabled(struct clk *clk) 634{ 635 int ret; 636 637 if (!clk) 638 return false; 639 640 /* 641 * .is_enabled is only mandatory for clocks that gate 642 * fall back to software usage counter if .is_enabled is missing 643 */ 644 if (!clk->ops->is_enabled) { 645 ret = clk->enable_count ? 1 : 0; 646 goto out; 647 } 648 649 ret = clk->ops->is_enabled(clk->hw); 650out: 651 return !!ret; 652} 653 654static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) 655{ 656 struct clk *child; 657 struct clk *ret; 658 659 if (!strcmp(clk->name, name)) 660 return clk; 661 662 hlist_for_each_entry(child, &clk->children, child_node) { 663 ret = __clk_lookup_subtree(name, child); 664 if (ret) 665 return ret; 666 } 667 668 return NULL; 669} 670 671struct clk *__clk_lookup(const char *name) 672{ 673 struct clk *root_clk; 674 struct clk *ret; 675 676 if (!name) 677 return NULL; 678 679 /* search the 'proper' clk tree first */ 680 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 681 ret = __clk_lookup_subtree(name, root_clk); 682 if (ret) 683 return ret; 684 } 685 686 /* if not found, then search the orphan tree */ 687 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 688 ret = __clk_lookup_subtree(name, root_clk); 689 if (ret) 690 return ret; 691 } 692 693 return NULL; 694} 695 696/* 697 * Helper for finding best parent to provide a given frequency. This can be used 698 * directly as a determine_rate callback (e.g. for a mux), or from a more 699 * complex clock that may combine a mux with other operations. 700 */ 701long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 702 unsigned long *best_parent_rate, 703 struct clk **best_parent_p) 704{ 705 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 706 int i, num_parents; 707 unsigned long parent_rate, best = 0; 708 709 /* if NO_REPARENT flag set, pass through to current parent */ 710 if (clk->flags & CLK_SET_RATE_NO_REPARENT) { 711 parent = clk->parent; 712 if (clk->flags & CLK_SET_RATE_PARENT) 713 best = __clk_round_rate(parent, rate); 714 else if (parent) 715 best = __clk_get_rate(parent); 716 else 717 best = __clk_get_rate(clk); 718 goto out; 719 } 720 721 /* find the parent that can provide the fastest rate <= rate */ 722 num_parents = clk->num_parents; 723 for (i = 0; i < num_parents; i++) { 724 parent = clk_get_parent_by_index(clk, i); 725 if (!parent) 726 continue; 727 if (clk->flags & CLK_SET_RATE_PARENT) 728 parent_rate = __clk_round_rate(parent, rate); 729 else 730 parent_rate = __clk_get_rate(parent); 731 if (parent_rate <= rate && parent_rate > best) { 732 best_parent = parent; 733 best = parent_rate; 734 } 735 } 736 737out: 738 if (best_parent) 739 *best_parent_p = best_parent; 740 *best_parent_rate = best; 741 742 return best; 743} 744 745/*** clk api ***/ 746 747void __clk_unprepare(struct clk *clk) 748{ 749 if (!clk) 750 return; 751 752 if (WARN_ON(clk->prepare_count == 0)) 753 return; 754 755 if (--clk->prepare_count > 0) 756 return; 757 758 WARN_ON(clk->enable_count > 0); 759 760 if (clk->ops->unprepare) 761 clk->ops->unprepare(clk->hw); 762 763 __clk_unprepare(clk->parent); 764} 765 766/** 767 * clk_unprepare - undo preparation of a clock source 768 * @clk: the clk being unprepared 769 * 770 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 771 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 772 * if the operation may sleep. One example is a clk which is accessed over 773 * I2c. In the complex case a clk gate operation may require a fast and a slow 774 * part. It is this reason that clk_unprepare and clk_disable are not mutually 775 * exclusive. In fact clk_disable must be called before clk_unprepare. 776 */ 777void clk_unprepare(struct clk *clk) 778{ 779 clk_prepare_lock(); 780 __clk_unprepare(clk); 781 clk_prepare_unlock(); 782} 783EXPORT_SYMBOL_GPL(clk_unprepare); 784 785int __clk_prepare(struct clk *clk) 786{ 787 int ret = 0; 788 789 if (!clk) 790 return 0; 791 792 if (clk->prepare_count == 0) { 793 ret = __clk_prepare(clk->parent); 794 if (ret) 795 return ret; 796 797 if (clk->ops->prepare) { 798 ret = clk->ops->prepare(clk->hw); 799 if (ret) { 800 __clk_unprepare(clk->parent); 801 return ret; 802 } 803 } 804 } 805 806 clk->prepare_count++; 807 808 return 0; 809} 810 811/** 812 * clk_prepare - prepare a clock source 813 * @clk: the clk being prepared 814 * 815 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 816 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 817 * operation may sleep. One example is a clk which is accessed over I2c. In 818 * the complex case a clk ungate operation may require a fast and a slow part. 819 * It is this reason that clk_prepare and clk_enable are not mutually 820 * exclusive. In fact clk_prepare must be called before clk_enable. 821 * Returns 0 on success, -EERROR otherwise. 822 */ 823int clk_prepare(struct clk *clk) 824{ 825 int ret; 826 827 clk_prepare_lock(); 828 ret = __clk_prepare(clk); 829 clk_prepare_unlock(); 830 831 return ret; 832} 833EXPORT_SYMBOL_GPL(clk_prepare); 834 835static void __clk_disable(struct clk *clk) 836{ 837 if (!clk) 838 return; 839 840 if (WARN_ON(IS_ERR(clk))) 841 return; 842 843 if (WARN_ON(clk->enable_count == 0)) 844 return; 845 846 if (--clk->enable_count > 0) 847 return; 848 849 if (clk->ops->disable) 850 clk->ops->disable(clk->hw); 851 852 __clk_disable(clk->parent); 853} 854 855/** 856 * clk_disable - gate a clock 857 * @clk: the clk being gated 858 * 859 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 860 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 861 * clk if the operation is fast and will never sleep. One example is a 862 * SoC-internal clk which is controlled via simple register writes. In the 863 * complex case a clk gate operation may require a fast and a slow part. It is 864 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 865 * In fact clk_disable must be called before clk_unprepare. 866 */ 867void clk_disable(struct clk *clk) 868{ 869 unsigned long flags; 870 871 flags = clk_enable_lock(); 872 __clk_disable(clk); 873 clk_enable_unlock(flags); 874} 875EXPORT_SYMBOL_GPL(clk_disable); 876 877static int __clk_enable(struct clk *clk) 878{ 879 int ret = 0; 880 881 if (!clk) 882 return 0; 883 884 if (WARN_ON(clk->prepare_count == 0)) 885 return -ESHUTDOWN; 886 887 if (clk->enable_count == 0) { 888 ret = __clk_enable(clk->parent); 889 890 if (ret) 891 return ret; 892 893 if (clk->ops->enable) { 894 ret = clk->ops->enable(clk->hw); 895 if (ret) { 896 __clk_disable(clk->parent); 897 return ret; 898 } 899 } 900 } 901 902 clk->enable_count++; 903 return 0; 904} 905 906/** 907 * clk_enable - ungate a clock 908 * @clk: the clk being ungated 909 * 910 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 911 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 912 * if the operation will never sleep. One example is a SoC-internal clk which 913 * is controlled via simple register writes. In the complex case a clk ungate 914 * operation may require a fast and a slow part. It is this reason that 915 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 916 * must be called before clk_enable. Returns 0 on success, -EERROR 917 * otherwise. 918 */ 919int clk_enable(struct clk *clk) 920{ 921 unsigned long flags; 922 int ret; 923 924 flags = clk_enable_lock(); 925 ret = __clk_enable(clk); 926 clk_enable_unlock(flags); 927 928 return ret; 929} 930EXPORT_SYMBOL_GPL(clk_enable); 931 932/** 933 * __clk_round_rate - round the given rate for a clk 934 * @clk: round the rate of this clock 935 * @rate: the rate which is to be rounded 936 * 937 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate 938 */ 939unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) 940{ 941 unsigned long parent_rate = 0; 942 struct clk *parent; 943 944 if (!clk) 945 return 0; 946 947 parent = clk->parent; 948 if (parent) 949 parent_rate = parent->rate; 950 951 if (clk->ops->determine_rate) 952 return clk->ops->determine_rate(clk->hw, rate, &parent_rate, 953 &parent); 954 else if (clk->ops->round_rate) 955 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 956 else if (clk->flags & CLK_SET_RATE_PARENT) 957 return __clk_round_rate(clk->parent, rate); 958 else 959 return clk->rate; 960} 961 962/** 963 * clk_round_rate - round the given rate for a clk 964 * @clk: the clk for which we are rounding a rate 965 * @rate: the rate which is to be rounded 966 * 967 * Takes in a rate as input and rounds it to a rate that the clk can actually 968 * use which is then returned. If clk doesn't support round_rate operation 969 * then the parent rate is returned. 970 */ 971long clk_round_rate(struct clk *clk, unsigned long rate) 972{ 973 unsigned long ret; 974 975 clk_prepare_lock(); 976 ret = __clk_round_rate(clk, rate); 977 clk_prepare_unlock(); 978 979 return ret; 980} 981EXPORT_SYMBOL_GPL(clk_round_rate); 982 983/** 984 * __clk_notify - call clk notifier chain 985 * @clk: struct clk * that is changing rate 986 * @msg: clk notifier type (see include/linux/clk.h) 987 * @old_rate: old clk rate 988 * @new_rate: new clk rate 989 * 990 * Triggers a notifier call chain on the clk rate-change notification 991 * for 'clk'. Passes a pointer to the struct clk and the previous 992 * and current rates to the notifier callback. Intended to be called by 993 * internal clock code only. Returns NOTIFY_DONE from the last driver 994 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 995 * a driver returns that. 996 */ 997static int __clk_notify(struct clk *clk, unsigned long msg, 998 unsigned long old_rate, unsigned long new_rate) 999{ 1000 struct clk_notifier *cn; 1001 struct clk_notifier_data cnd; 1002 int ret = NOTIFY_DONE; 1003 1004 cnd.clk = clk; 1005 cnd.old_rate = old_rate; 1006 cnd.new_rate = new_rate; 1007 1008 list_for_each_entry(cn, &clk_notifier_list, node) { 1009 if (cn->clk == clk) { 1010 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1011 &cnd); 1012 break; 1013 } 1014 } 1015 1016 return ret; 1017} 1018 1019/** 1020 * __clk_recalc_rates 1021 * @clk: first clk in the subtree 1022 * @msg: notification type (see include/linux/clk.h) 1023 * 1024 * Walks the subtree of clks starting with clk and recalculates rates as it 1025 * goes. Note that if a clk does not implement the .recalc_rate callback then 1026 * it is assumed that the clock will take on the rate of its parent. 1027 * 1028 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1029 * if necessary. 1030 * 1031 * Caller must hold prepare_lock. 1032 */ 1033static void __clk_recalc_rates(struct clk *clk, unsigned long msg) 1034{ 1035 unsigned long old_rate; 1036 unsigned long parent_rate = 0; 1037 struct clk *child; 1038 1039 old_rate = clk->rate; 1040 1041 if (clk->parent) 1042 parent_rate = clk->parent->rate; 1043 1044 if (clk->ops->recalc_rate) 1045 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate); 1046 else 1047 clk->rate = parent_rate; 1048 1049 /* 1050 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1051 * & ABORT_RATE_CHANGE notifiers 1052 */ 1053 if (clk->notifier_count && msg) 1054 __clk_notify(clk, msg, old_rate, clk->rate); 1055 1056 hlist_for_each_entry(child, &clk->children, child_node) 1057 __clk_recalc_rates(child, msg); 1058} 1059 1060/** 1061 * clk_get_rate - return the rate of clk 1062 * @clk: the clk whose rate is being returned 1063 * 1064 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1065 * is set, which means a recalc_rate will be issued. 1066 * If clk is NULL then returns 0. 1067 */ 1068unsigned long clk_get_rate(struct clk *clk) 1069{ 1070 unsigned long rate; 1071 1072 clk_prepare_lock(); 1073 1074 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) 1075 __clk_recalc_rates(clk, 0); 1076 1077 rate = __clk_get_rate(clk); 1078 clk_prepare_unlock(); 1079 1080 return rate; 1081} 1082EXPORT_SYMBOL_GPL(clk_get_rate); 1083 1084static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) 1085{ 1086 int i; 1087 1088 if (!clk->parents) { 1089 clk->parents = kcalloc(clk->num_parents, 1090 sizeof(struct clk *), GFP_KERNEL); 1091 if (!clk->parents) 1092 return -ENOMEM; 1093 } 1094 1095 /* 1096 * find index of new parent clock using cached parent ptrs, 1097 * or if not yet cached, use string name comparison and cache 1098 * them now to avoid future calls to __clk_lookup. 1099 */ 1100 for (i = 0; i < clk->num_parents; i++) { 1101 if (clk->parents[i] == parent) 1102 return i; 1103 1104 if (clk->parents[i]) 1105 continue; 1106 1107 if (!strcmp(clk->parent_names[i], parent->name)) { 1108 clk->parents[i] = __clk_lookup(parent->name); 1109 return i; 1110 } 1111 } 1112 1113 return -EINVAL; 1114} 1115 1116static void clk_reparent(struct clk *clk, struct clk *new_parent) 1117{ 1118 hlist_del(&clk->child_node); 1119 1120 if (new_parent) { 1121 /* avoid duplicate POST_RATE_CHANGE notifications */ 1122 if (new_parent->new_child == clk) 1123 new_parent->new_child = NULL; 1124 1125 hlist_add_head(&clk->child_node, &new_parent->children); 1126 } else { 1127 hlist_add_head(&clk->child_node, &clk_orphan_list); 1128 } 1129 1130 clk->parent = new_parent; 1131} 1132 1133static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) 1134{ 1135 unsigned long flags; 1136 int ret = 0; 1137 struct clk *old_parent = clk->parent; 1138 1139 /* 1140 * Migrate prepare state between parents and prevent race with 1141 * clk_enable(). 1142 * 1143 * If the clock is not prepared, then a race with 1144 * clk_enable/disable() is impossible since we already have the 1145 * prepare lock (future calls to clk_enable() need to be preceded by 1146 * a clk_prepare()). 1147 * 1148 * If the clock is prepared, migrate the prepared state to the new 1149 * parent and also protect against a race with clk_enable() by 1150 * forcing the clock and the new parent on. This ensures that all 1151 * future calls to clk_enable() are practically NOPs with respect to 1152 * hardware and software states. 1153 * 1154 * See also: Comment for clk_set_parent() below. 1155 */ 1156 if (clk->prepare_count) { 1157 __clk_prepare(parent); 1158 clk_enable(parent); 1159 clk_enable(clk); 1160 } 1161 1162 /* update the clk tree topology */ 1163 flags = clk_enable_lock(); 1164 clk_reparent(clk, parent); 1165 clk_enable_unlock(flags); 1166 1167 /* change clock input source */ 1168 if (parent && clk->ops->set_parent) 1169 ret = clk->ops->set_parent(clk->hw, p_index); 1170 1171 if (ret) { 1172 flags = clk_enable_lock(); 1173 clk_reparent(clk, old_parent); 1174 clk_enable_unlock(flags); 1175 1176 if (clk->prepare_count) { 1177 clk_disable(clk); 1178 clk_disable(parent); 1179 __clk_unprepare(parent); 1180 } 1181 return ret; 1182 } 1183 1184 /* 1185 * Finish the migration of prepare state and undo the changes done 1186 * for preventing a race with clk_enable(). 1187 */ 1188 if (clk->prepare_count) { 1189 clk_disable(clk); 1190 clk_disable(old_parent); 1191 __clk_unprepare(old_parent); 1192 } 1193 1194 /* update debugfs with new clk tree topology */ 1195 clk_debug_reparent(clk, parent); 1196 return 0; 1197} 1198 1199/** 1200 * __clk_speculate_rates 1201 * @clk: first clk in the subtree 1202 * @parent_rate: the "future" rate of clk's parent 1203 * 1204 * Walks the subtree of clks starting with clk, speculating rates as it 1205 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1206 * 1207 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1208 * pre-rate change notifications and returns early if no clks in the 1209 * subtree have subscribed to the notifications. Note that if a clk does not 1210 * implement the .recalc_rate callback then it is assumed that the clock will 1211 * take on the rate of its parent. 1212 * 1213 * Caller must hold prepare_lock. 1214 */ 1215static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 1216{ 1217 struct clk *child; 1218 unsigned long new_rate; 1219 int ret = NOTIFY_DONE; 1220 1221 if (clk->ops->recalc_rate) 1222 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate); 1223 else 1224 new_rate = parent_rate; 1225 1226 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1227 if (clk->notifier_count) 1228 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); 1229 1230 if (ret & NOTIFY_STOP_MASK) 1231 goto out; 1232 1233 hlist_for_each_entry(child, &clk->children, child_node) { 1234 ret = __clk_speculate_rates(child, new_rate); 1235 if (ret & NOTIFY_STOP_MASK) 1236 break; 1237 } 1238 1239out: 1240 return ret; 1241} 1242 1243static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, 1244 struct clk *new_parent, u8 p_index) 1245{ 1246 struct clk *child; 1247 1248 clk->new_rate = new_rate; 1249 clk->new_parent = new_parent; 1250 clk->new_parent_index = p_index; 1251 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1252 clk->new_child = NULL; 1253 if (new_parent && new_parent != clk->parent) 1254 new_parent->new_child = clk; 1255 1256 hlist_for_each_entry(child, &clk->children, child_node) { 1257 if (child->ops->recalc_rate) 1258 child->new_rate = child->ops->recalc_rate(child->hw, new_rate); 1259 else 1260 child->new_rate = new_rate; 1261 clk_calc_subtree(child, child->new_rate, NULL, 0); 1262 } 1263} 1264 1265/* 1266 * calculate the new rates returning the topmost clock that has to be 1267 * changed. 1268 */ 1269static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) 1270{ 1271 struct clk *top = clk; 1272 struct clk *old_parent, *parent; 1273 unsigned long best_parent_rate = 0; 1274 unsigned long new_rate; 1275 int p_index = 0; 1276 1277 /* sanity */ 1278 if (IS_ERR_OR_NULL(clk)) 1279 return NULL; 1280 1281 /* save parent rate, if it exists */ 1282 parent = old_parent = clk->parent; 1283 if (parent) 1284 best_parent_rate = parent->rate; 1285 1286 /* find the closest rate and parent clk/rate */ 1287 if (clk->ops->determine_rate) { 1288 new_rate = clk->ops->determine_rate(clk->hw, rate, 1289 &best_parent_rate, 1290 &parent); 1291 } else if (clk->ops->round_rate) { 1292 new_rate = clk->ops->round_rate(clk->hw, rate, 1293 &best_parent_rate); 1294 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { 1295 /* pass-through clock without adjustable parent */ 1296 clk->new_rate = clk->rate; 1297 return NULL; 1298 } else { 1299 /* pass-through clock with adjustable parent */ 1300 top = clk_calc_new_rates(parent, rate); 1301 new_rate = parent->new_rate; 1302 goto out; 1303 } 1304 1305 /* some clocks must be gated to change parent */ 1306 if (parent != old_parent && 1307 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1308 pr_debug("%s: %s not gated but wants to reparent\n", 1309 __func__, clk->name); 1310 return NULL; 1311 } 1312 1313 /* try finding the new parent index */ 1314 if (parent) { 1315 p_index = clk_fetch_parent_index(clk, parent); 1316 if (p_index < 0) { 1317 pr_debug("%s: clk %s can not be parent of clk %s\n", 1318 __func__, parent->name, clk->name); 1319 return NULL; 1320 } 1321 } 1322 1323 if ((clk->flags & CLK_SET_RATE_PARENT) && parent && 1324 best_parent_rate != parent->rate) 1325 top = clk_calc_new_rates(parent, best_parent_rate); 1326 1327out: 1328 clk_calc_subtree(clk, new_rate, parent, p_index); 1329 1330 return top; 1331} 1332 1333/* 1334 * Notify about rate changes in a subtree. Always walk down the whole tree 1335 * so that in case of an error we can walk down the whole tree again and 1336 * abort the change. 1337 */ 1338static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 1339{ 1340 struct clk *child, *tmp_clk, *fail_clk = NULL; 1341 int ret = NOTIFY_DONE; 1342 1343 if (clk->rate == clk->new_rate) 1344 return NULL; 1345 1346 if (clk->notifier_count) { 1347 ret = __clk_notify(clk, event, clk->rate, clk->new_rate); 1348 if (ret & NOTIFY_STOP_MASK) 1349 fail_clk = clk; 1350 } 1351 1352 hlist_for_each_entry(child, &clk->children, child_node) { 1353 /* Skip children who will be reparented to another clock */ 1354 if (child->new_parent && child->new_parent != clk) 1355 continue; 1356 tmp_clk = clk_propagate_rate_change(child, event); 1357 if (tmp_clk) 1358 fail_clk = tmp_clk; 1359 } 1360 1361 /* handle the new child who might not be in clk->children yet */ 1362 if (clk->new_child) { 1363 tmp_clk = clk_propagate_rate_change(clk->new_child, event); 1364 if (tmp_clk) 1365 fail_clk = tmp_clk; 1366 } 1367 1368 return fail_clk; 1369} 1370 1371/* 1372 * walk down a subtree and set the new rates notifying the rate 1373 * change on the way 1374 */ 1375static void clk_change_rate(struct clk *clk) 1376{ 1377 struct clk *child; 1378 unsigned long old_rate; 1379 unsigned long best_parent_rate = 0; 1380 1381 old_rate = clk->rate; 1382 1383 /* set parent */ 1384 if (clk->new_parent && clk->new_parent != clk->parent) 1385 __clk_set_parent(clk, clk->new_parent, clk->new_parent_index); 1386 1387 if (clk->parent) 1388 best_parent_rate = clk->parent->rate; 1389 1390 if (clk->ops->set_rate) 1391 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate); 1392 1393 if (clk->ops->recalc_rate) 1394 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate); 1395 else 1396 clk->rate = best_parent_rate; 1397 1398 if (clk->notifier_count && old_rate != clk->rate) 1399 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 1400 1401 hlist_for_each_entry(child, &clk->children, child_node) { 1402 /* Skip children who will be reparented to another clock */ 1403 if (child->new_parent && child->new_parent != clk) 1404 continue; 1405 clk_change_rate(child); 1406 } 1407 1408 /* handle the new child who might not be in clk->children yet */ 1409 if (clk->new_child) 1410 clk_change_rate(clk->new_child); 1411} 1412 1413/** 1414 * clk_set_rate - specify a new rate for clk 1415 * @clk: the clk whose rate is being changed 1416 * @rate: the new rate for clk 1417 * 1418 * In the simplest case clk_set_rate will only adjust the rate of clk. 1419 * 1420 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1421 * propagate up to clk's parent; whether or not this happens depends on the 1422 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1423 * after calling .round_rate then upstream parent propagation is ignored. If 1424 * *parent_rate comes back with a new rate for clk's parent then we propagate 1425 * up to clk's parent and set its rate. Upward propagation will continue 1426 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1427 * .round_rate stops requesting changes to clk's parent_rate. 1428 * 1429 * Rate changes are accomplished via tree traversal that also recalculates the 1430 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1431 * 1432 * Returns 0 on success, -EERROR otherwise. 1433 */ 1434int clk_set_rate(struct clk *clk, unsigned long rate) 1435{ 1436 struct clk *top, *fail_clk; 1437 int ret = 0; 1438 1439 if (!clk) 1440 return 0; 1441 1442 /* prevent racing with updates to the clock topology */ 1443 clk_prepare_lock(); 1444 1445 /* bail early if nothing to do */ 1446 if (rate == clk_get_rate(clk)) 1447 goto out; 1448 1449 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { 1450 ret = -EBUSY; 1451 goto out; 1452 } 1453 1454 /* calculate new rates and get the topmost changed clock */ 1455 top = clk_calc_new_rates(clk, rate); 1456 if (!top) { 1457 ret = -EINVAL; 1458 goto out; 1459 } 1460 1461 /* notify that we are about to change rates */ 1462 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1463 if (fail_clk) { 1464 pr_warn("%s: failed to set %s rate\n", __func__, 1465 fail_clk->name); 1466 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1467 ret = -EBUSY; 1468 goto out; 1469 } 1470 1471 /* change the rates */ 1472 clk_change_rate(top); 1473 1474out: 1475 clk_prepare_unlock(); 1476 1477 return ret; 1478} 1479EXPORT_SYMBOL_GPL(clk_set_rate); 1480 1481/** 1482 * clk_get_parent - return the parent of a clk 1483 * @clk: the clk whose parent gets returned 1484 * 1485 * Simply returns clk->parent. Returns NULL if clk is NULL. 1486 */ 1487struct clk *clk_get_parent(struct clk *clk) 1488{ 1489 struct clk *parent; 1490 1491 clk_prepare_lock(); 1492 parent = __clk_get_parent(clk); 1493 clk_prepare_unlock(); 1494 1495 return parent; 1496} 1497EXPORT_SYMBOL_GPL(clk_get_parent); 1498 1499/* 1500 * .get_parent is mandatory for clocks with multiple possible parents. It is 1501 * optional for single-parent clocks. Always call .get_parent if it is 1502 * available and WARN if it is missing for multi-parent clocks. 1503 * 1504 * For single-parent clocks without .get_parent, first check to see if the 1505 * .parents array exists, and if so use it to avoid an expensive tree 1506 * traversal. If .parents does not exist then walk the tree with __clk_lookup. 1507 */ 1508static struct clk *__clk_init_parent(struct clk *clk) 1509{ 1510 struct clk *ret = NULL; 1511 u8 index; 1512 1513 /* handle the trivial cases */ 1514 1515 if (!clk->num_parents) 1516 goto out; 1517 1518 if (clk->num_parents == 1) { 1519 if (IS_ERR_OR_NULL(clk->parent)) 1520 ret = clk->parent = __clk_lookup(clk->parent_names[0]); 1521 ret = clk->parent; 1522 goto out; 1523 } 1524 1525 if (!clk->ops->get_parent) { 1526 WARN(!clk->ops->get_parent, 1527 "%s: multi-parent clocks must implement .get_parent\n", 1528 __func__); 1529 goto out; 1530 }; 1531 1532 /* 1533 * Do our best to cache parent clocks in clk->parents. This prevents 1534 * unnecessary and expensive calls to __clk_lookup. We don't set 1535 * clk->parent here; that is done by the calling function 1536 */ 1537 1538 index = clk->ops->get_parent(clk->hw); 1539 1540 if (!clk->parents) 1541 clk->parents = 1542 kcalloc(clk->num_parents, sizeof(struct clk *), 1543 GFP_KERNEL); 1544 1545 ret = clk_get_parent_by_index(clk, index); 1546 1547out: 1548 return ret; 1549} 1550 1551void __clk_reparent(struct clk *clk, struct clk *new_parent) 1552{ 1553 clk_reparent(clk, new_parent); 1554 clk_debug_reparent(clk, new_parent); 1555 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1556} 1557 1558/** 1559 * clk_set_parent - switch the parent of a mux clk 1560 * @clk: the mux clk whose input we are switching 1561 * @parent: the new input to clk 1562 * 1563 * Re-parent clk to use parent as its new input source. If clk is in 1564 * prepared state, the clk will get enabled for the duration of this call. If 1565 * that's not acceptable for a specific clk (Eg: the consumer can't handle 1566 * that, the reparenting is glitchy in hardware, etc), use the 1567 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 1568 * 1569 * After successfully changing clk's parent clk_set_parent will update the 1570 * clk topology, sysfs topology and propagate rate recalculation via 1571 * __clk_recalc_rates. 1572 * 1573 * Returns 0 on success, -EERROR otherwise. 1574 */ 1575int clk_set_parent(struct clk *clk, struct clk *parent) 1576{ 1577 int ret = 0; 1578 int p_index = 0; 1579 unsigned long p_rate = 0; 1580 1581 if (!clk) 1582 return 0; 1583 1584 if (!clk->ops) 1585 return -EINVAL; 1586 1587 /* verify ops for for multi-parent clks */ 1588 if ((clk->num_parents > 1) && (!clk->ops->set_parent)) 1589 return -ENOSYS; 1590 1591 /* prevent racing with updates to the clock topology */ 1592 clk_prepare_lock(); 1593 1594 if (clk->parent == parent) 1595 goto out; 1596 1597 /* check that we are allowed to re-parent if the clock is in use */ 1598 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1599 ret = -EBUSY; 1600 goto out; 1601 } 1602 1603 /* try finding the new parent index */ 1604 if (parent) { 1605 p_index = clk_fetch_parent_index(clk, parent); 1606 p_rate = parent->rate; 1607 if (p_index < 0) { 1608 pr_debug("%s: clk %s can not be parent of clk %s\n", 1609 __func__, parent->name, clk->name); 1610 ret = p_index; 1611 goto out; 1612 } 1613 } 1614 1615 /* propagate PRE_RATE_CHANGE notifications */ 1616 ret = __clk_speculate_rates(clk, p_rate); 1617 1618 /* abort if a driver objects */ 1619 if (ret & NOTIFY_STOP_MASK) 1620 goto out; 1621 1622 /* do the re-parent */ 1623 ret = __clk_set_parent(clk, parent, p_index); 1624 1625 /* propagate rate recalculation accordingly */ 1626 if (ret) 1627 __clk_recalc_rates(clk, ABORT_RATE_CHANGE); 1628 else 1629 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1630 1631out: 1632 clk_prepare_unlock(); 1633 1634 return ret; 1635} 1636EXPORT_SYMBOL_GPL(clk_set_parent); 1637 1638/** 1639 * __clk_init - initialize the data structures in a struct clk 1640 * @dev: device initializing this clk, placeholder for now 1641 * @clk: clk being initialized 1642 * 1643 * Initializes the lists in struct clk, queries the hardware for the 1644 * parent and rate and sets them both. 1645 */ 1646int __clk_init(struct device *dev, struct clk *clk) 1647{ 1648 int i, ret = 0; 1649 struct clk *orphan; 1650 struct hlist_node *tmp2; 1651 1652 if (!clk) 1653 return -EINVAL; 1654 1655 clk_prepare_lock(); 1656 1657 /* check to see if a clock with this name is already registered */ 1658 if (__clk_lookup(clk->name)) { 1659 pr_debug("%s: clk %s already initialized\n", 1660 __func__, clk->name); 1661 ret = -EEXIST; 1662 goto out; 1663 } 1664 1665 /* check that clk_ops are sane. See Documentation/clk.txt */ 1666 if (clk->ops->set_rate && 1667 !((clk->ops->round_rate || clk->ops->determine_rate) && 1668 clk->ops->recalc_rate)) { 1669 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 1670 __func__, clk->name); 1671 ret = -EINVAL; 1672 goto out; 1673 } 1674 1675 if (clk->ops->set_parent && !clk->ops->get_parent) { 1676 pr_warning("%s: %s must implement .get_parent & .set_parent\n", 1677 __func__, clk->name); 1678 ret = -EINVAL; 1679 goto out; 1680 } 1681 1682 /* throw a WARN if any entries in parent_names are NULL */ 1683 for (i = 0; i < clk->num_parents; i++) 1684 WARN(!clk->parent_names[i], 1685 "%s: invalid NULL in %s's .parent_names\n", 1686 __func__, clk->name); 1687 1688 /* 1689 * Allocate an array of struct clk *'s to avoid unnecessary string 1690 * look-ups of clk's possible parents. This can fail for clocks passed 1691 * in to clk_init during early boot; thus any access to clk->parents[] 1692 * must always check for a NULL pointer and try to populate it if 1693 * necessary. 1694 * 1695 * If clk->parents is not NULL we skip this entire block. This allows 1696 * for clock drivers to statically initialize clk->parents. 1697 */ 1698 if (clk->num_parents > 1 && !clk->parents) { 1699 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), 1700 GFP_KERNEL); 1701 /* 1702 * __clk_lookup returns NULL for parents that have not been 1703 * clk_init'd; thus any access to clk->parents[] must check 1704 * for a NULL pointer. We can always perform lazy lookups for 1705 * missing parents later on. 1706 */ 1707 if (clk->parents) 1708 for (i = 0; i < clk->num_parents; i++) 1709 clk->parents[i] = 1710 __clk_lookup(clk->parent_names[i]); 1711 } 1712 1713 clk->parent = __clk_init_parent(clk); 1714 1715 /* 1716 * Populate clk->parent if parent has already been __clk_init'd. If 1717 * parent has not yet been __clk_init'd then place clk in the orphan 1718 * list. If clk has set the CLK_IS_ROOT flag then place it in the root 1719 * clk list. 1720 * 1721 * Every time a new clk is clk_init'd then we walk the list of orphan 1722 * clocks and re-parent any that are children of the clock currently 1723 * being clk_init'd. 1724 */ 1725 if (clk->parent) 1726 hlist_add_head(&clk->child_node, 1727 &clk->parent->children); 1728 else if (clk->flags & CLK_IS_ROOT) 1729 hlist_add_head(&clk->child_node, &clk_root_list); 1730 else 1731 hlist_add_head(&clk->child_node, &clk_orphan_list); 1732 1733 /* 1734 * Set clk's rate. The preferred method is to use .recalc_rate. For 1735 * simple clocks and lazy developers the default fallback is to use the 1736 * parent's rate. If a clock doesn't have a parent (or is orphaned) 1737 * then rate is set to zero. 1738 */ 1739 if (clk->ops->recalc_rate) 1740 clk->rate = clk->ops->recalc_rate(clk->hw, 1741 __clk_get_rate(clk->parent)); 1742 else if (clk->parent) 1743 clk->rate = clk->parent->rate; 1744 else 1745 clk->rate = 0; 1746 1747 clk_debug_register(clk); 1748 /* 1749 * walk the list of orphan clocks and reparent any that are children of 1750 * this clock 1751 */ 1752 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 1753 if (orphan->num_parents && orphan->ops->get_parent) { 1754 i = orphan->ops->get_parent(orphan->hw); 1755 if (!strcmp(clk->name, orphan->parent_names[i])) 1756 __clk_reparent(orphan, clk); 1757 continue; 1758 } 1759 1760 for (i = 0; i < orphan->num_parents; i++) 1761 if (!strcmp(clk->name, orphan->parent_names[i])) { 1762 __clk_reparent(orphan, clk); 1763 break; 1764 } 1765 } 1766 1767 /* 1768 * optional platform-specific magic 1769 * 1770 * The .init callback is not used by any of the basic clock types, but 1771 * exists for weird hardware that must perform initialization magic. 1772 * Please consider other ways of solving initialization problems before 1773 * using this callback, as its use is discouraged. 1774 */ 1775 if (clk->ops->init) 1776 clk->ops->init(clk->hw); 1777 1778out: 1779 clk_prepare_unlock(); 1780 1781 return ret; 1782} 1783 1784/** 1785 * __clk_register - register a clock and return a cookie. 1786 * 1787 * Same as clk_register, except that the .clk field inside hw shall point to a 1788 * preallocated (generally statically allocated) struct clk. None of the fields 1789 * of the struct clk need to be initialized. 1790 * 1791 * The data pointed to by .init and .clk field shall NOT be marked as init 1792 * data. 1793 * 1794 * __clk_register is only exposed via clk-private.h and is intended for use with 1795 * very large numbers of clocks that need to be statically initialized. It is 1796 * a layering violation to include clk-private.h from any code which implements 1797 * a clock's .ops; as such any statically initialized clock data MUST be in a 1798 * separate C file from the logic that implements its operations. Returns 0 1799 * on success, otherwise an error code. 1800 */ 1801struct clk *__clk_register(struct device *dev, struct clk_hw *hw) 1802{ 1803 int ret; 1804 struct clk *clk; 1805 1806 clk = hw->clk; 1807 clk->name = hw->init->name; 1808 clk->ops = hw->init->ops; 1809 clk->hw = hw; 1810 clk->flags = hw->init->flags; 1811 clk->parent_names = hw->init->parent_names; 1812 clk->num_parents = hw->init->num_parents; 1813 1814 ret = __clk_init(dev, clk); 1815 if (ret) 1816 return ERR_PTR(ret); 1817 1818 return clk; 1819} 1820EXPORT_SYMBOL_GPL(__clk_register); 1821 1822static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) 1823{ 1824 int i, ret; 1825 1826 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 1827 if (!clk->name) { 1828 pr_err("%s: could not allocate clk->name\n", __func__); 1829 ret = -ENOMEM; 1830 goto fail_name; 1831 } 1832 clk->ops = hw->init->ops; 1833 clk->hw = hw; 1834 clk->flags = hw->init->flags; 1835 clk->num_parents = hw->init->num_parents; 1836 hw->clk = clk; 1837 1838 /* allocate local copy in case parent_names is __initdata */ 1839 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), 1840 GFP_KERNEL); 1841 1842 if (!clk->parent_names) { 1843 pr_err("%s: could not allocate clk->parent_names\n", __func__); 1844 ret = -ENOMEM; 1845 goto fail_parent_names; 1846 } 1847 1848 1849 /* copy each string name in case parent_names is __initdata */ 1850 for (i = 0; i < clk->num_parents; i++) { 1851 clk->parent_names[i] = kstrdup(hw->init->parent_names[i], 1852 GFP_KERNEL); 1853 if (!clk->parent_names[i]) { 1854 pr_err("%s: could not copy parent_names\n", __func__); 1855 ret = -ENOMEM; 1856 goto fail_parent_names_copy; 1857 } 1858 } 1859 1860 ret = __clk_init(dev, clk); 1861 if (!ret) 1862 return 0; 1863 1864fail_parent_names_copy: 1865 while (--i >= 0) 1866 kfree(clk->parent_names[i]); 1867 kfree(clk->parent_names); 1868fail_parent_names: 1869 kfree(clk->name); 1870fail_name: 1871 return ret; 1872} 1873 1874/** 1875 * clk_register - allocate a new clock, register it and return an opaque cookie 1876 * @dev: device that is registering this clock 1877 * @hw: link to hardware-specific clock data 1878 * 1879 * clk_register is the primary interface for populating the clock tree with new 1880 * clock nodes. It returns a pointer to the newly allocated struct clk which 1881 * cannot be dereferenced by driver code but may be used in conjuction with the 1882 * rest of the clock API. In the event of an error clk_register will return an 1883 * error code; drivers must test for an error code after calling clk_register. 1884 */ 1885struct clk *clk_register(struct device *dev, struct clk_hw *hw) 1886{ 1887 int ret; 1888 struct clk *clk; 1889 1890 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 1891 if (!clk) { 1892 pr_err("%s: could not allocate clk\n", __func__); 1893 ret = -ENOMEM; 1894 goto fail_out; 1895 } 1896 1897 ret = _clk_register(dev, hw, clk); 1898 if (!ret) 1899 return clk; 1900 1901 kfree(clk); 1902fail_out: 1903 return ERR_PTR(ret); 1904} 1905EXPORT_SYMBOL_GPL(clk_register); 1906 1907/** 1908 * clk_unregister - unregister a currently registered clock 1909 * @clk: clock to unregister 1910 * 1911 * Currently unimplemented. 1912 */ 1913void clk_unregister(struct clk *clk) {} 1914EXPORT_SYMBOL_GPL(clk_unregister); 1915 1916static void devm_clk_release(struct device *dev, void *res) 1917{ 1918 clk_unregister(res); 1919} 1920 1921/** 1922 * devm_clk_register - resource managed clk_register() 1923 * @dev: device that is registering this clock 1924 * @hw: link to hardware-specific clock data 1925 * 1926 * Managed clk_register(). Clocks returned from this function are 1927 * automatically clk_unregister()ed on driver detach. See clk_register() for 1928 * more information. 1929 */ 1930struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 1931{ 1932 struct clk *clk; 1933 int ret; 1934 1935 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); 1936 if (!clk) 1937 return ERR_PTR(-ENOMEM); 1938 1939 ret = _clk_register(dev, hw, clk); 1940 if (!ret) { 1941 devres_add(dev, clk); 1942 } else { 1943 devres_free(clk); 1944 clk = ERR_PTR(ret); 1945 } 1946 1947 return clk; 1948} 1949EXPORT_SYMBOL_GPL(devm_clk_register); 1950 1951static int devm_clk_match(struct device *dev, void *res, void *data) 1952{ 1953 struct clk *c = res; 1954 if (WARN_ON(!c)) 1955 return 0; 1956 return c == data; 1957} 1958 1959/** 1960 * devm_clk_unregister - resource managed clk_unregister() 1961 * @clk: clock to unregister 1962 * 1963 * Deallocate a clock allocated with devm_clk_register(). Normally 1964 * this function will not need to be called and the resource management 1965 * code will ensure that the resource is freed. 1966 */ 1967void devm_clk_unregister(struct device *dev, struct clk *clk) 1968{ 1969 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 1970} 1971EXPORT_SYMBOL_GPL(devm_clk_unregister); 1972 1973/*** clk rate change notifiers ***/ 1974 1975/** 1976 * clk_notifier_register - add a clk rate change notifier 1977 * @clk: struct clk * to watch 1978 * @nb: struct notifier_block * with callback info 1979 * 1980 * Request notification when clk's rate changes. This uses an SRCU 1981 * notifier because we want it to block and notifier unregistrations are 1982 * uncommon. The callbacks associated with the notifier must not 1983 * re-enter into the clk framework by calling any top-level clk APIs; 1984 * this will cause a nested prepare_lock mutex. 1985 * 1986 * Pre-change notifier callbacks will be passed the current, pre-change 1987 * rate of the clk via struct clk_notifier_data.old_rate. The new, 1988 * post-change rate of the clk is passed via struct 1989 * clk_notifier_data.new_rate. 1990 * 1991 * Post-change notifiers will pass the now-current, post-change rate of 1992 * the clk in both struct clk_notifier_data.old_rate and struct 1993 * clk_notifier_data.new_rate. 1994 * 1995 * Abort-change notifiers are effectively the opposite of pre-change 1996 * notifiers: the original pre-change clk rate is passed in via struct 1997 * clk_notifier_data.new_rate and the failed post-change rate is passed 1998 * in via struct clk_notifier_data.old_rate. 1999 * 2000 * clk_notifier_register() must be called from non-atomic context. 2001 * Returns -EINVAL if called with null arguments, -ENOMEM upon 2002 * allocation failure; otherwise, passes along the return value of 2003 * srcu_notifier_chain_register(). 2004 */ 2005int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 2006{ 2007 struct clk_notifier *cn; 2008 int ret = -ENOMEM; 2009 2010 if (!clk || !nb) 2011 return -EINVAL; 2012 2013 clk_prepare_lock(); 2014 2015 /* search the list of notifiers for this clk */ 2016 list_for_each_entry(cn, &clk_notifier_list, node) 2017 if (cn->clk == clk) 2018 break; 2019 2020 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 2021 if (cn->clk != clk) { 2022 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); 2023 if (!cn) 2024 goto out; 2025 2026 cn->clk = clk; 2027 srcu_init_notifier_head(&cn->notifier_head); 2028 2029 list_add(&cn->node, &clk_notifier_list); 2030 } 2031 2032 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 2033 2034 clk->notifier_count++; 2035 2036out: 2037 clk_prepare_unlock(); 2038 2039 return ret; 2040} 2041EXPORT_SYMBOL_GPL(clk_notifier_register); 2042 2043/** 2044 * clk_notifier_unregister - remove a clk rate change notifier 2045 * @clk: struct clk * 2046 * @nb: struct notifier_block * with callback info 2047 * 2048 * Request no further notification for changes to 'clk' and frees memory 2049 * allocated in clk_notifier_register. 2050 * 2051 * Returns -EINVAL if called with null arguments; otherwise, passes 2052 * along the return value of srcu_notifier_chain_unregister(). 2053 */ 2054int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 2055{ 2056 struct clk_notifier *cn = NULL; 2057 int ret = -EINVAL; 2058 2059 if (!clk || !nb) 2060 return -EINVAL; 2061 2062 clk_prepare_lock(); 2063 2064 list_for_each_entry(cn, &clk_notifier_list, node) 2065 if (cn->clk == clk) 2066 break; 2067 2068 if (cn->clk == clk) { 2069 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 2070 2071 clk->notifier_count--; 2072 2073 /* XXX the notifier code should handle this better */ 2074 if (!cn->notifier_head.head) { 2075 srcu_cleanup_notifier_head(&cn->notifier_head); 2076 list_del(&cn->node); 2077 kfree(cn); 2078 } 2079 2080 } else { 2081 ret = -ENOENT; 2082 } 2083 2084 clk_prepare_unlock(); 2085 2086 return ret; 2087} 2088EXPORT_SYMBOL_GPL(clk_notifier_unregister); 2089 2090#ifdef CONFIG_OF 2091/** 2092 * struct of_clk_provider - Clock provider registration structure 2093 * @link: Entry in global list of clock providers 2094 * @node: Pointer to device tree node of clock provider 2095 * @get: Get clock callback. Returns NULL or a struct clk for the 2096 * given clock specifier 2097 * @data: context pointer to be passed into @get callback 2098 */ 2099struct of_clk_provider { 2100 struct list_head link; 2101 2102 struct device_node *node; 2103 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 2104 void *data; 2105}; 2106 2107extern struct of_device_id __clk_of_table[]; 2108 2109static const struct of_device_id __clk_of_table_sentinel 2110 __used __section(__clk_of_table_end); 2111 2112static LIST_HEAD(of_clk_providers); 2113static DEFINE_MUTEX(of_clk_lock); 2114 2115struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 2116 void *data) 2117{ 2118 return data; 2119} 2120EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 2121 2122struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 2123{ 2124 struct clk_onecell_data *clk_data = data; 2125 unsigned int idx = clkspec->args[0]; 2126 2127 if (idx >= clk_data->clk_num) { 2128 pr_err("%s: invalid clock index %d\n", __func__, idx); 2129 return ERR_PTR(-EINVAL); 2130 } 2131 2132 return clk_data->clks[idx]; 2133} 2134EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 2135 2136/** 2137 * of_clk_add_provider() - Register a clock provider for a node 2138 * @np: Device node pointer associated with clock provider 2139 * @clk_src_get: callback for decoding clock 2140 * @data: context pointer for @clk_src_get callback. 2141 */ 2142int of_clk_add_provider(struct device_node *np, 2143 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 2144 void *data), 2145 void *data) 2146{ 2147 struct of_clk_provider *cp; 2148 2149 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL); 2150 if (!cp) 2151 return -ENOMEM; 2152 2153 cp->node = of_node_get(np); 2154 cp->data = data; 2155 cp->get = clk_src_get; 2156 2157 mutex_lock(&of_clk_lock); 2158 list_add(&cp->link, &of_clk_providers); 2159 mutex_unlock(&of_clk_lock); 2160 pr_debug("Added clock from %s\n", np->full_name); 2161 2162 return 0; 2163} 2164EXPORT_SYMBOL_GPL(of_clk_add_provider); 2165 2166/** 2167 * of_clk_del_provider() - Remove a previously registered clock provider 2168 * @np: Device node pointer associated with clock provider 2169 */ 2170void of_clk_del_provider(struct device_node *np) 2171{ 2172 struct of_clk_provider *cp; 2173 2174 mutex_lock(&of_clk_lock); 2175 list_for_each_entry(cp, &of_clk_providers, link) { 2176 if (cp->node == np) { 2177 list_del(&cp->link); 2178 of_node_put(cp->node); 2179 kfree(cp); 2180 break; 2181 } 2182 } 2183 mutex_unlock(&of_clk_lock); 2184} 2185EXPORT_SYMBOL_GPL(of_clk_del_provider); 2186 2187struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 2188{ 2189 struct of_clk_provider *provider; 2190 struct clk *clk = ERR_PTR(-ENOENT); 2191 2192 /* Check if we have such a provider in our array */ 2193 mutex_lock(&of_clk_lock); 2194 list_for_each_entry(provider, &of_clk_providers, link) { 2195 if (provider->node == clkspec->np) 2196 clk = provider->get(clkspec, provider->data); 2197 if (!IS_ERR(clk)) 2198 break; 2199 } 2200 mutex_unlock(&of_clk_lock); 2201 2202 return clk; 2203} 2204 2205int of_clk_get_parent_count(struct device_node *np) 2206{ 2207 return of_count_phandle_with_args(np, "clocks", "#clock-cells"); 2208} 2209EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 2210 2211const char *of_clk_get_parent_name(struct device_node *np, int index) 2212{ 2213 struct of_phandle_args clkspec; 2214 const char *clk_name; 2215 int rc; 2216 2217 if (index < 0) 2218 return NULL; 2219 2220 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 2221 &clkspec); 2222 if (rc) 2223 return NULL; 2224 2225 if (of_property_read_string_index(clkspec.np, "clock-output-names", 2226 clkspec.args_count ? clkspec.args[0] : 0, 2227 &clk_name) < 0) 2228 clk_name = clkspec.np->name; 2229 2230 of_node_put(clkspec.np); 2231 return clk_name; 2232} 2233EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 2234 2235/** 2236 * of_clk_init() - Scan and init clock providers from the DT 2237 * @matches: array of compatible values and init functions for providers. 2238 * 2239 * This function scans the device tree for matching clock providers and 2240 * calls their initialization functions 2241 */ 2242void __init of_clk_init(const struct of_device_id *matches) 2243{ 2244 const struct of_device_id *match; 2245 struct device_node *np; 2246 2247 if (!matches) 2248 matches = __clk_of_table; 2249 2250 for_each_matching_node_and_match(np, matches, &match) { 2251 of_clk_init_cb_t clk_init_cb = match->data; 2252 clk_init_cb(np); 2253 } 2254} 2255#endif 2256