1/* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/clk.txt 10 */ 11 12#include <linux/clk-private.h> 13#include <linux/clk/clk-conf.h> 14#include <linux/module.h> 15#include <linux/mutex.h> 16#include <linux/spinlock.h> 17#include <linux/err.h> 18#include <linux/list.h> 19#include <linux/slab.h> 20#include <linux/of.h> 21#include <linux/device.h> 22#include <linux/init.h> 23#include <linux/sched.h> 24 25#include "clk.h" 26 27static DEFINE_SPINLOCK(enable_lock); 28static DEFINE_MUTEX(prepare_lock); 29 30static struct task_struct *prepare_owner; 31static struct task_struct *enable_owner; 32 33static int prepare_refcnt; 34static int enable_refcnt; 35 36static HLIST_HEAD(clk_root_list); 37static HLIST_HEAD(clk_orphan_list); 38static LIST_HEAD(clk_notifier_list); 39 40/*** locking ***/ 41static void clk_prepare_lock(void) 42{ 43 if (!mutex_trylock(&prepare_lock)) { 44 if (prepare_owner == current) { 45 prepare_refcnt++; 46 return; 47 } 48 mutex_lock(&prepare_lock); 49 } 50 WARN_ON_ONCE(prepare_owner != NULL); 51 WARN_ON_ONCE(prepare_refcnt != 0); 52 prepare_owner = current; 53 prepare_refcnt = 1; 54} 55 56static void clk_prepare_unlock(void) 57{ 58 WARN_ON_ONCE(prepare_owner != current); 59 WARN_ON_ONCE(prepare_refcnt == 0); 60 61 if (--prepare_refcnt) 62 return; 63 prepare_owner = NULL; 64 mutex_unlock(&prepare_lock); 65} 66 67static unsigned long clk_enable_lock(void) 68{ 69 unsigned long flags; 70 71 if (!spin_trylock_irqsave(&enable_lock, flags)) { 72 if (enable_owner == current) { 73 enable_refcnt++; 74 return flags; 75 } 76 spin_lock_irqsave(&enable_lock, flags); 77 } 78 WARN_ON_ONCE(enable_owner != NULL); 79 WARN_ON_ONCE(enable_refcnt != 0); 80 enable_owner = current; 81 enable_refcnt = 1; 82 return flags; 83} 84 85static void clk_enable_unlock(unsigned long flags) 86{ 87 WARN_ON_ONCE(enable_owner != current); 88 WARN_ON_ONCE(enable_refcnt == 0); 89 90 if (--enable_refcnt) 91 return; 92 enable_owner = NULL; 93 spin_unlock_irqrestore(&enable_lock, flags); 94} 95 96/*** debugfs support ***/ 97 98#ifdef CONFIG_DEBUG_FS 99#include <linux/debugfs.h> 100 101static struct dentry *rootdir; 102static int inited = 0; 103static DEFINE_MUTEX(clk_debug_lock); 104static HLIST_HEAD(clk_debug_list); 105 106static struct hlist_head *all_lists[] = { 107 &clk_root_list, 108 &clk_orphan_list, 109 NULL, 110}; 111 112static struct hlist_head *orphan_list[] = { 113 &clk_orphan_list, 114 NULL, 115}; 116 117#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 118 119#ifdef CONFIG_COMMON_CLK_BEGIN_ACCOUNTING_FROM_BOOT 120static bool freq_stats_on = true; 121#else 122static bool freq_stats_on; 123#endif /*CONFIG_COMMON_CLK_BEGIN_ACCOUNTING_FROM_BOOT*/ 124 125static void free_tree(struct rb_node *node) 126{ 127 struct freq_stats *this; 128 129 if (!node) 130 return; 131 132 free_tree(node->rb_left); 133 free_tree(node->rb_right); 134 135 this = rb_entry(node, struct freq_stats, node); 136 kfree(this); 137} 138 139static struct freq_stats *freq_stats_insert(struct rb_root *freq_stats_table, 140 unsigned long rate) 141{ 142 struct rb_node **new = &(freq_stats_table->rb_node), *parent = NULL; 143 struct freq_stats *this; 144 145 /* Figure out where to put new node */ 146 while (*new) { 147 this = rb_entry(*new, struct freq_stats, node); 148 parent = *new; 149 150 if (rate < this->rate) 151 new = &((*new)->rb_left); 152 else if (rate > this->rate) 153 new = &((*new)->rb_right); 154 else 155 return this; 156 } 157 158 this = kzalloc(sizeof(*this), GFP_ATOMIC); 159 this->rate = rate; 160 161 /* Add new node and rebalance tree. */ 162 rb_link_node(&this->node, parent, new); 163 rb_insert_color(&this->node, freq_stats_table); 164 165 return this; 166} 167 168static void generic_print_freq_stats_table(struct seq_file *m, 169 struct clk *clk, 170 bool indent, int level) 171{ 172 struct rb_node *pos; 173 struct freq_stats *cur; 174 175 if (indent) 176 seq_printf(m, "%*s*%s%20s", level * 3 + 1, "", 177 !clk->current_freq_stats ? "[" : "", 178 "default_freq"); 179 else 180 seq_printf(m, "%2s%20s", !clk->current_freq_stats ? "[" : "", 181 "default_freq"); 182 183 if (!clk->current_freq_stats && !ktime_equal(clk->start_time, 184 ktime_set(0, 0))) 185 seq_printf(m, "%40llu", 186 ktime_to_ms(ktime_add(clk->default_freq_time, 187 ktime_sub(ktime_get(), clk->start_time)))); 188 else 189 seq_printf(m, "%40llu", ktime_to_ms(clk->default_freq_time)); 190 191 if (!clk->current_freq_stats) 192 seq_puts(m, "]"); 193 194 seq_puts(m, "\n"); 195 196 for (pos = rb_first(&clk->freq_stats_table); pos; pos = rb_next(pos)) { 197 cur = rb_entry(pos, typeof(*cur), node); 198 199 if (indent) 200 seq_printf(m, "%*s*%s%20lu", level * 3 + 1, "", 201 cur->rate == clk->rate ? "[" : "", cur->rate); 202 else 203 seq_printf(m, "%2s%20lu", cur->rate == clk->rate ? 204 "[" : "", cur->rate); 205 206 if (cur->rate == clk->rate && !ktime_equal(clk->start_time, 207 ktime_set(0, 0))) 208 seq_printf(m, "%40llu", 209 ktime_to_ms(ktime_add(cur->time_spent, 210 ktime_sub(ktime_get(), clk->start_time)))); 211 else 212 seq_printf(m, "%40llu", ktime_to_ms(cur->time_spent)); 213 214 if (cur->rate == clk->rate) 215 seq_puts(m, "]"); 216 seq_puts(m, "\n"); 217 } 218} 219 220static int clock_print_freq_stats_table(struct seq_file *m, void *unused) 221{ 222 struct clk *clk = m->private; 223 224 if (!(clk->flags & CLK_GET_RATE_NOCACHE)) 225 generic_print_freq_stats_table(m, clk, false, 0); 226 227 return 0; 228} 229 230static int freq_stats_table_open(struct inode *inode, struct file *file) 231{ 232 return single_open(file, clock_print_freq_stats_table, 233 inode->i_private); 234} 235 236static const struct file_operations freq_stats_table_fops = { 237 .open = freq_stats_table_open, 238 .read = seq_read, 239 .llseek = seq_lseek, 240 .release = seq_release, 241}; 242#endif /*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 243 244 245static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) 246{ 247 if (!c) 248 return; 249 250 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", 251 level * 3 + 1, "", 252 30 - level * 3, c->name, 253 c->enable_count, c->prepare_count, clk_get_rate(c), 254 clk_get_accuracy(c), clk_get_phase(c)); 255 256#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 257 if (!(c->flags & CLK_GET_RATE_NOCACHE)) 258 generic_print_freq_stats_table(s, c, true, level); 259#endif /*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 260 261} 262 263static void clk_summary_show_subtree(struct seq_file *s, struct clk *c, 264 int level) 265{ 266 struct clk *child; 267 268 if (!c) 269 return; 270 271 clk_summary_show_one(s, c, level); 272 273 hlist_for_each_entry(child, &c->children, child_node) 274 clk_summary_show_subtree(s, child, level + 1); 275} 276 277static int clk_summary_show(struct seq_file *s, void *data) 278{ 279 struct clk *c; 280 struct hlist_head **lists = (struct hlist_head **)s->private; 281 282 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); 283 seq_puts(s, "----------------------------------------------------------------------------------------\n"); 284 285 clk_prepare_lock(); 286 287 for (; *lists; lists++) 288 hlist_for_each_entry(c, *lists, child_node) 289 clk_summary_show_subtree(s, c, 0); 290 291 clk_prepare_unlock(); 292 293 return 0; 294} 295 296 297static int clk_summary_open(struct inode *inode, struct file *file) 298{ 299 return single_open(file, clk_summary_show, inode->i_private); 300} 301 302static const struct file_operations clk_summary_fops = { 303 .open = clk_summary_open, 304 .read = seq_read, 305 .llseek = seq_lseek, 306 .release = single_release, 307}; 308 309static void clk_dump_one(struct seq_file *s, struct clk *c, int level) 310{ 311 if (!c) 312 return; 313 314 seq_printf(s, "\"%s\": { ", c->name); 315 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 316 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 317 seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); 318 seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c)); 319 seq_printf(s, "\"phase\": %d", clk_get_phase(c)); 320} 321 322static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 323{ 324 struct clk *child; 325 326 if (!c) 327 return; 328 329 clk_dump_one(s, c, level); 330 331 hlist_for_each_entry(child, &c->children, child_node) { 332 seq_printf(s, ","); 333 clk_dump_subtree(s, child, level + 1); 334 } 335 336 seq_printf(s, "}"); 337} 338 339static int clk_dump(struct seq_file *s, void *data) 340{ 341 struct clk *c; 342 bool first_node = true; 343 struct hlist_head **lists = (struct hlist_head **)s->private; 344 345 seq_printf(s, "{"); 346 347 clk_prepare_lock(); 348 349 for (; *lists; lists++) { 350 hlist_for_each_entry(c, *lists, child_node) { 351 if (!first_node) 352 seq_puts(s, ","); 353 first_node = false; 354 clk_dump_subtree(s, c, 0); 355 } 356 } 357 358 clk_prepare_unlock(); 359 360 seq_printf(s, "}"); 361 return 0; 362} 363 364 365static int clk_dump_open(struct inode *inode, struct file *file) 366{ 367 return single_open(file, clk_dump, inode->i_private); 368} 369 370static const struct file_operations clk_dump_fops = { 371 .open = clk_dump_open, 372 .read = seq_read, 373 .llseek = seq_lseek, 374 .release = single_release, 375}; 376 377#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 378static int freq_stats_get(void *unused, u64 *val) 379{ 380 *val = freq_stats_on; 381 return 0; 382} 383 384static void clk_traverse_subtree(struct clk *clk, int freq_stats_on) 385{ 386 struct clk *child; 387 struct rb_node *node; 388 389 if (!clk) 390 return; 391 392 if (freq_stats_on) { 393 for (node = rb_first(&clk->freq_stats_table); 394 node; node = rb_next(node)) 395 rb_entry(node, struct freq_stats, node)->time_spent = 396 ktime_set(0, 0); 397 398 clk->current_freq_stats = freq_stats_insert( 399 &clk->freq_stats_table, 400 clk_get_rate(clk)); 401 402 if (clk->enable_count > 0) 403 clk->start_time = ktime_get(); 404 } else { 405 if (clk->enable_count > 0) { 406 if (!clk->current_freq_stats) 407 clk->default_freq_time = 408 ktime_add(clk->default_freq_time, 409 ktime_sub(ktime_get(), clk->start_time)); 410 else 411 clk->current_freq_stats->time_spent = 412 ktime_add(clk->current_freq_stats->time_spent, 413 ktime_sub(ktime_get(), clk->start_time)); 414 415 clk->start_time = ktime_set(0, 0); 416 } 417 } 418 hlist_for_each_entry(child, &clk->children, child_node) 419 clk_traverse_subtree(child, freq_stats_on); 420} 421 422static int freq_stats_set(void *data, u64 val) 423{ 424 struct clk *c; 425 unsigned long flags; 426 struct hlist_head **lists = (struct hlist_head **)data; 427 428 clk_prepare_lock(); 429 flags = clk_enable_lock(); 430 431 if (val == 0) 432 freq_stats_on = 0; 433 else 434 freq_stats_on = 1; 435 436 for (; *lists; lists++) 437 hlist_for_each_entry(c, *lists, child_node) 438 clk_traverse_subtree(c, freq_stats_on); 439 440 clk_enable_unlock(flags); 441 clk_prepare_unlock(); 442 443 return 0; 444} 445DEFINE_SIMPLE_ATTRIBUTE(freq_stats_fops, freq_stats_get, 446 freq_stats_set, "%llu\n"); 447#endif /*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 448 449/* caller must hold prepare_lock */ 450static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 451{ 452 struct dentry *d; 453 int ret = -ENOMEM; 454 455 if (!clk || !pdentry) { 456 ret = -EINVAL; 457 goto out; 458 } 459 460 d = debugfs_create_dir(clk->name, pdentry); 461 if (!d) 462 goto out; 463 464 clk->dentry = d; 465 466 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry, 467 (u32 *)&clk->rate); 468 if (!d) 469 goto err_out; 470 471 d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry, 472 (u32 *)&clk->accuracy); 473 if (!d) 474 goto err_out; 475 476 d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry, 477 (u32 *)&clk->phase); 478 if (!d) 479 goto err_out; 480 481 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry, 482 (u32 *)&clk->flags); 483 if (!d) 484 goto err_out; 485 486 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry, 487 (u32 *)&clk->prepare_count); 488 if (!d) 489 goto err_out; 490 491 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry, 492 (u32 *)&clk->enable_count); 493 if (!d) 494 goto err_out; 495 496 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry, 497 (u32 *)&clk->notifier_count); 498 if (!d) 499 goto err_out; 500 501#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 502 d = debugfs_create_file("frequency_stats_table", S_IRUGO, clk->dentry, 503 clk, &freq_stats_table_fops); 504 505 if (!d) 506 goto err_out; 507#endif /*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 508 509 if (clk->ops->debug_init) { 510 ret = clk->ops->debug_init(clk->hw, clk->dentry); 511 if (ret) 512 goto err_out; 513 } 514 515 ret = 0; 516 goto out; 517 518err_out: 519 debugfs_remove_recursive(clk->dentry); 520 clk->dentry = NULL; 521out: 522 return ret; 523} 524 525/** 526 * clk_debug_register - add a clk node to the debugfs clk tree 527 * @clk: the clk being added to the debugfs clk tree 528 * 529 * Dynamically adds a clk to the debugfs clk tree if debugfs has been 530 * initialized. Otherwise it bails out early since the debugfs clk tree 531 * will be created lazily by clk_debug_init as part of a late_initcall. 532 */ 533static int clk_debug_register(struct clk *clk) 534{ 535 int ret = 0; 536 537 mutex_lock(&clk_debug_lock); 538 hlist_add_head(&clk->debug_node, &clk_debug_list); 539 540 if (!inited) 541 goto unlock; 542 543 ret = clk_debug_create_one(clk, rootdir); 544unlock: 545 mutex_unlock(&clk_debug_lock); 546 547 return ret; 548} 549 550 /** 551 * clk_debug_unregister - remove a clk node from the debugfs clk tree 552 * @clk: the clk being removed from the debugfs clk tree 553 * 554 * Dynamically removes a clk and all it's children clk nodes from the 555 * debugfs clk tree if clk->dentry points to debugfs created by 556 * clk_debug_register in __clk_init. 557 */ 558static void clk_debug_unregister(struct clk *clk) 559{ 560 mutex_lock(&clk_debug_lock); 561 if (!clk->dentry) 562 goto out; 563 564 hlist_del_init(&clk->debug_node); 565 debugfs_remove_recursive(clk->dentry); 566 clk->dentry = NULL; 567out: 568 mutex_unlock(&clk_debug_lock); 569} 570 571struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode, 572 void *data, const struct file_operations *fops) 573{ 574 struct dentry *d = NULL; 575 576 if (clk->dentry) 577 d = debugfs_create_file(name, mode, clk->dentry, data, fops); 578 579 return d; 580} 581EXPORT_SYMBOL_GPL(clk_debugfs_add_file); 582 583/** 584 * clk_debug_init - lazily create the debugfs clk tree visualization 585 * 586 * clks are often initialized very early during boot before memory can 587 * be dynamically allocated and well before debugfs is setup. 588 * clk_debug_init walks the clk tree hierarchy while holding 589 * prepare_lock and creates the topology as part of a late_initcall, 590 * thus insuring that clks initialized very early will still be 591 * represented in the debugfs clk tree. This function should only be 592 * called once at boot-time, and all other clks added dynamically will 593 * be done so with clk_debug_register. 594 */ 595static int __init clk_debug_init(void) 596{ 597 struct clk *clk; 598 struct dentry *d; 599 600 rootdir = debugfs_create_dir("clk", NULL); 601 602 if (!rootdir) 603 return -ENOMEM; 604 605 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, 606 &clk_summary_fops); 607 if (!d) 608 return -ENOMEM; 609 610 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, 611 &clk_dump_fops); 612 if (!d) 613 return -ENOMEM; 614 615 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, 616 &orphan_list, &clk_summary_fops); 617 if (!d) 618 return -ENOMEM; 619 620 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, 621 &orphan_list, &clk_dump_fops); 622 if (!d) 623 return -ENOMEM; 624 625#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 626 d = debugfs_create_file("freq_stats_on", S_IRUGO|S_IWUSR, 627 rootdir, &all_lists, &freq_stats_fops); 628 if (!d) 629 return -ENOMEM; 630#endif /*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 631 632 mutex_lock(&clk_debug_lock); 633 hlist_for_each_entry(clk, &clk_debug_list, debug_node) 634 clk_debug_create_one(clk, rootdir); 635 636 inited = 1; 637 mutex_unlock(&clk_debug_lock); 638 639 return 0; 640} 641late_initcall(clk_debug_init); 642#else 643static inline int clk_debug_register(struct clk *clk) { return 0; } 644static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 645{ 646} 647static inline void clk_debug_unregister(struct clk *clk) 648{ 649} 650#endif 651 652/* caller must hold prepare_lock */ 653static void clk_unprepare_unused_subtree(struct clk *clk) 654{ 655 struct clk *child; 656 657 if (!clk) 658 return; 659 660 hlist_for_each_entry(child, &clk->children, child_node) 661 clk_unprepare_unused_subtree(child); 662 663 if (clk->prepare_count) 664 return; 665 666 if (clk->flags & CLK_IGNORE_UNUSED) 667 return; 668 669 if (__clk_is_prepared(clk)) { 670 if (clk->ops->unprepare_unused) 671 clk->ops->unprepare_unused(clk->hw); 672 else if (clk->ops->unprepare) 673 clk->ops->unprepare(clk->hw); 674 } 675} 676 677/* caller must hold prepare_lock */ 678static void clk_disable_unused_subtree(struct clk *clk) 679{ 680 struct clk *child; 681 unsigned long flags; 682 683 if (!clk) 684 goto out; 685 686 hlist_for_each_entry(child, &clk->children, child_node) 687 clk_disable_unused_subtree(child); 688 689 flags = clk_enable_lock(); 690 691 if (clk->enable_count) 692 goto unlock_out; 693 694 if (clk->flags & CLK_IGNORE_UNUSED) 695 goto unlock_out; 696 697 /* 698 * some gate clocks have special needs during the disable-unused 699 * sequence. call .disable_unused if available, otherwise fall 700 * back to .disable 701 */ 702 if (__clk_is_enabled(clk)) { 703 if (clk->ops->disable_unused) 704 clk->ops->disable_unused(clk->hw); 705 else if (clk->ops->disable) 706 clk->ops->disable(clk->hw); 707 } 708 709unlock_out: 710 clk_enable_unlock(flags); 711 712out: 713 return; 714} 715 716static bool clk_ignore_unused; 717static int __init clk_ignore_unused_setup(char *__unused) 718{ 719 clk_ignore_unused = true; 720 return 1; 721} 722__setup("clk_ignore_unused", clk_ignore_unused_setup); 723 724static int clk_disable_unused(void) 725{ 726 struct clk *clk; 727 728 if (clk_ignore_unused) { 729 pr_warn("clk: Not disabling unused clocks\n"); 730 return 0; 731 } 732 733 clk_prepare_lock(); 734 735 hlist_for_each_entry(clk, &clk_root_list, child_node) 736 clk_disable_unused_subtree(clk); 737 738 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 739 clk_disable_unused_subtree(clk); 740 741 hlist_for_each_entry(clk, &clk_root_list, child_node) 742 clk_unprepare_unused_subtree(clk); 743 744 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 745 clk_unprepare_unused_subtree(clk); 746 747 clk_prepare_unlock(); 748 749 return 0; 750} 751late_initcall_sync(clk_disable_unused); 752 753/*** helper functions ***/ 754 755const char *__clk_get_name(struct clk *clk) 756{ 757 return !clk ? NULL : clk->name; 758} 759EXPORT_SYMBOL_GPL(__clk_get_name); 760 761struct clk_hw *__clk_get_hw(struct clk *clk) 762{ 763 return !clk ? NULL : clk->hw; 764} 765EXPORT_SYMBOL_GPL(__clk_get_hw); 766 767u8 __clk_get_num_parents(struct clk *clk) 768{ 769 return !clk ? 0 : clk->num_parents; 770} 771EXPORT_SYMBOL_GPL(__clk_get_num_parents); 772 773struct clk *__clk_get_parent(struct clk *clk) 774{ 775 return !clk ? NULL : clk->parent; 776} 777EXPORT_SYMBOL_GPL(__clk_get_parent); 778 779struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) 780{ 781 if (!clk || index >= clk->num_parents) 782 return NULL; 783 else if (!clk->parents) 784 return __clk_lookup(clk->parent_names[index]); 785 else if (!clk->parents[index]) 786 return clk->parents[index] = 787 __clk_lookup(clk->parent_names[index]); 788 else 789 return clk->parents[index]; 790} 791EXPORT_SYMBOL_GPL(clk_get_parent_by_index); 792 793unsigned int __clk_get_enable_count(struct clk *clk) 794{ 795 return !clk ? 0 : clk->enable_count; 796} 797 798unsigned int __clk_get_prepare_count(struct clk *clk) 799{ 800 return !clk ? 0 : clk->prepare_count; 801} 802 803unsigned long __clk_get_rate(struct clk *clk) 804{ 805 unsigned long ret; 806 807 if (!clk) { 808 ret = 0; 809 goto out; 810 } 811 812 ret = clk->rate; 813 814 if (clk->flags & CLK_IS_ROOT) 815 goto out; 816 817 if (!clk->parent) 818 ret = 0; 819 820out: 821 return ret; 822} 823EXPORT_SYMBOL_GPL(__clk_get_rate); 824 825unsigned long __clk_get_accuracy(struct clk *clk) 826{ 827 if (!clk) 828 return 0; 829 830 return clk->accuracy; 831} 832 833unsigned long __clk_get_flags(struct clk *clk) 834{ 835 return !clk ? 0 : clk->flags; 836} 837EXPORT_SYMBOL_GPL(__clk_get_flags); 838 839bool __clk_is_prepared(struct clk *clk) 840{ 841 int ret; 842 843 if (!clk) 844 return false; 845 846 /* 847 * .is_prepared is optional for clocks that can prepare 848 * fall back to software usage counter if it is missing 849 */ 850 if (!clk->ops->is_prepared) { 851 ret = clk->prepare_count ? 1 : 0; 852 goto out; 853 } 854 855 ret = clk->ops->is_prepared(clk->hw); 856out: 857 return !!ret; 858} 859 860bool __clk_is_enabled(struct clk *clk) 861{ 862 int ret; 863 864 if (!clk) 865 return false; 866 867 /* 868 * .is_enabled is only mandatory for clocks that gate 869 * fall back to software usage counter if .is_enabled is missing 870 */ 871 if (!clk->ops->is_enabled) { 872 ret = clk->enable_count ? 1 : 0; 873 goto out; 874 } 875 876 ret = clk->ops->is_enabled(clk->hw); 877out: 878 return !!ret; 879} 880EXPORT_SYMBOL_GPL(__clk_is_enabled); 881 882static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) 883{ 884 struct clk *child; 885 struct clk *ret; 886 887 if (!strcmp(clk->name, name)) 888 return clk; 889 890 hlist_for_each_entry(child, &clk->children, child_node) { 891 ret = __clk_lookup_subtree(name, child); 892 if (ret) 893 return ret; 894 } 895 896 return NULL; 897} 898 899struct clk *__clk_lookup(const char *name) 900{ 901 struct clk *root_clk; 902 struct clk *ret; 903 904 if (!name) 905 return NULL; 906 907 /* search the 'proper' clk tree first */ 908 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 909 ret = __clk_lookup_subtree(name, root_clk); 910 if (ret) 911 return ret; 912 } 913 914 /* if not found, then search the orphan tree */ 915 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 916 ret = __clk_lookup_subtree(name, root_clk); 917 if (ret) 918 return ret; 919 } 920 921 return NULL; 922} 923 924/* 925 * Helper for finding best parent to provide a given frequency. This can be used 926 * directly as a determine_rate callback (e.g. for a mux), or from a more 927 * complex clock that may combine a mux with other operations. 928 */ 929long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 930 unsigned long *best_parent_rate, 931 struct clk **best_parent_p) 932{ 933 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 934 int i, num_parents; 935 unsigned long parent_rate, best = 0; 936 937 /* if NO_REPARENT flag set, pass through to current parent */ 938 if (clk->flags & CLK_SET_RATE_NO_REPARENT) { 939 parent = clk->parent; 940 if (clk->flags & CLK_SET_RATE_PARENT) 941 best = __clk_round_rate(parent, rate); 942 else if (parent) 943 best = __clk_get_rate(parent); 944 else 945 best = __clk_get_rate(clk); 946 goto out; 947 } 948 949 /* find the parent that can provide the fastest rate <= rate */ 950 num_parents = clk->num_parents; 951 for (i = 0; i < num_parents; i++) { 952 parent = clk_get_parent_by_index(clk, i); 953 if (!parent) 954 continue; 955 if (clk->flags & CLK_SET_RATE_PARENT) 956 parent_rate = __clk_round_rate(parent, rate); 957 else 958 parent_rate = __clk_get_rate(parent); 959 if (parent_rate <= rate && parent_rate > best) { 960 best_parent = parent; 961 best = parent_rate; 962 } 963 } 964 965out: 966 if (best_parent) 967 *best_parent_p = best_parent; 968 *best_parent_rate = best; 969 970 return best; 971} 972EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 973 974/*** clk api ***/ 975 976void __clk_unprepare(struct clk *clk) 977{ 978 if (!clk) 979 return; 980 981 if (WARN_ON(clk->prepare_count == 0)) 982 return; 983 984 if (--clk->prepare_count > 0) 985 return; 986 987 WARN_ON(clk->enable_count > 0); 988 989 if (clk->ops->unprepare) 990 clk->ops->unprepare(clk->hw); 991 992 __clk_unprepare(clk->parent); 993} 994 995/** 996 * clk_unprepare - undo preparation of a clock source 997 * @clk: the clk being unprepared 998 * 999 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 1000 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 1001 * if the operation may sleep. One example is a clk which is accessed over 1002 * I2c. In the complex case a clk gate operation may require a fast and a slow 1003 * part. It is this reason that clk_unprepare and clk_disable are not mutually 1004 * exclusive. In fact clk_disable must be called before clk_unprepare. 1005 */ 1006void clk_unprepare(struct clk *clk) 1007{ 1008 if (IS_ERR_OR_NULL(clk)) 1009 return; 1010 1011 clk_prepare_lock(); 1012 __clk_unprepare(clk); 1013 clk_prepare_unlock(); 1014} 1015EXPORT_SYMBOL_GPL(clk_unprepare); 1016 1017int __clk_prepare(struct clk *clk) 1018{ 1019 int ret = 0; 1020 1021 if (!clk) 1022 return 0; 1023 1024 if (clk->prepare_count == 0) { 1025 ret = __clk_prepare(clk->parent); 1026 if (ret) 1027 return ret; 1028 1029 if (clk->ops->prepare) { 1030 ret = clk->ops->prepare(clk->hw); 1031 if (ret) { 1032 __clk_unprepare(clk->parent); 1033 return ret; 1034 } 1035 } 1036 } 1037 1038 clk->prepare_count++; 1039 1040 return 0; 1041} 1042 1043/** 1044 * clk_prepare - prepare a clock source 1045 * @clk: the clk being prepared 1046 * 1047 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 1048 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 1049 * operation may sleep. One example is a clk which is accessed over I2c. In 1050 * the complex case a clk ungate operation may require a fast and a slow part. 1051 * It is this reason that clk_prepare and clk_enable are not mutually 1052 * exclusive. In fact clk_prepare must be called before clk_enable. 1053 * Returns 0 on success, -EERROR otherwise. 1054 */ 1055int clk_prepare(struct clk *clk) 1056{ 1057 int ret; 1058 1059 clk_prepare_lock(); 1060 ret = __clk_prepare(clk); 1061 clk_prepare_unlock(); 1062 1063 return ret; 1064} 1065EXPORT_SYMBOL_GPL(clk_prepare); 1066 1067static void __clk_disable(struct clk *clk) 1068{ 1069 if (!clk) 1070 return; 1071 1072 if (WARN_ON(clk->enable_count == 0)) 1073 return; 1074 1075 if (--clk->enable_count > 0) 1076 return; 1077 1078 if (clk->ops->disable) 1079 clk->ops->disable(clk->hw); 1080 1081#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 1082 1083 if (freq_stats_on) { 1084 if (!clk->current_freq_stats) 1085 clk->default_freq_time = 1086 ktime_add(clk->default_freq_time, 1087 ktime_sub(ktime_get(), clk->start_time)); 1088 else 1089 clk->current_freq_stats->time_spent = 1090 ktime_add(clk->current_freq_stats->time_spent, 1091 ktime_sub(ktime_get(), clk->start_time)); 1092 1093 clk->start_time = ktime_set(0, 0); 1094 } 1095#endif /*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 1096 1097 __clk_disable(clk->parent); 1098} 1099 1100/** 1101 * clk_disable - gate a clock 1102 * @clk: the clk being gated 1103 * 1104 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 1105 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 1106 * clk if the operation is fast and will never sleep. One example is a 1107 * SoC-internal clk which is controlled via simple register writes. In the 1108 * complex case a clk gate operation may require a fast and a slow part. It is 1109 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 1110 * In fact clk_disable must be called before clk_unprepare. 1111 */ 1112void clk_disable(struct clk *clk) 1113{ 1114 unsigned long flags; 1115 1116 if (IS_ERR_OR_NULL(clk)) 1117 return; 1118 1119 flags = clk_enable_lock(); 1120 __clk_disable(clk); 1121 clk_enable_unlock(flags); 1122} 1123EXPORT_SYMBOL_GPL(clk_disable); 1124 1125static int __clk_enable(struct clk *clk) 1126{ 1127 int ret = 0; 1128 1129 if (!clk) 1130 return 0; 1131 1132 if (WARN_ON(clk->prepare_count == 0)) 1133 return -ESHUTDOWN; 1134 1135 if (clk->enable_count == 0) { 1136 ret = __clk_enable(clk->parent); 1137 1138 if (ret) 1139 return ret; 1140 1141 if (clk->ops->enable) { 1142 ret = clk->ops->enable(clk->hw); 1143 if (ret) { 1144 __clk_disable(clk->parent); 1145 return ret; 1146 } 1147 } 1148 1149#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 1150 if (freq_stats_on) 1151 clk->start_time = ktime_get(); 1152#endif /*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 1153 } 1154 1155 clk->enable_count++; 1156 return 0; 1157} 1158 1159/** 1160 * clk_enable - ungate a clock 1161 * @clk: the clk being ungated 1162 * 1163 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 1164 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 1165 * if the operation will never sleep. One example is a SoC-internal clk which 1166 * is controlled via simple register writes. In the complex case a clk ungate 1167 * operation may require a fast and a slow part. It is this reason that 1168 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 1169 * must be called before clk_enable. Returns 0 on success, -EERROR 1170 * otherwise. 1171 */ 1172int clk_enable(struct clk *clk) 1173{ 1174 unsigned long flags; 1175 int ret; 1176 1177 flags = clk_enable_lock(); 1178 ret = __clk_enable(clk); 1179 clk_enable_unlock(flags); 1180 1181 return ret; 1182} 1183EXPORT_SYMBOL_GPL(clk_enable); 1184 1185/** 1186 * __clk_round_rate - round the given rate for a clk 1187 * @clk: round the rate of this clock 1188 * @rate: the rate which is to be rounded 1189 * 1190 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate 1191 */ 1192unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) 1193{ 1194 unsigned long parent_rate = 0; 1195 struct clk *parent; 1196 1197 if (!clk) 1198 return 0; 1199 1200 parent = clk->parent; 1201 if (parent) 1202 parent_rate = parent->rate; 1203 1204 if (clk->ops->determine_rate) 1205 return clk->ops->determine_rate(clk->hw, rate, &parent_rate, 1206 &parent); 1207 else if (clk->ops->round_rate) 1208 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 1209 else if (clk->flags & CLK_SET_RATE_PARENT) 1210 return __clk_round_rate(clk->parent, rate); 1211 else 1212 return clk->rate; 1213} 1214EXPORT_SYMBOL_GPL(__clk_round_rate); 1215 1216/** 1217 * clk_round_rate - round the given rate for a clk 1218 * @clk: the clk for which we are rounding a rate 1219 * @rate: the rate which is to be rounded 1220 * 1221 * Takes in a rate as input and rounds it to a rate that the clk can actually 1222 * use which is then returned. If clk doesn't support round_rate operation 1223 * then the parent rate is returned. 1224 */ 1225long clk_round_rate(struct clk *clk, unsigned long rate) 1226{ 1227 unsigned long ret; 1228 1229 clk_prepare_lock(); 1230 ret = __clk_round_rate(clk, rate); 1231 clk_prepare_unlock(); 1232 1233 return ret; 1234} 1235EXPORT_SYMBOL_GPL(clk_round_rate); 1236 1237/** 1238 * __clk_notify - call clk notifier chain 1239 * @clk: struct clk * that is changing rate 1240 * @msg: clk notifier type (see include/linux/clk.h) 1241 * @old_rate: old clk rate 1242 * @new_rate: new clk rate 1243 * 1244 * Triggers a notifier call chain on the clk rate-change notification 1245 * for 'clk'. Passes a pointer to the struct clk and the previous 1246 * and current rates to the notifier callback. Intended to be called by 1247 * internal clock code only. Returns NOTIFY_DONE from the last driver 1248 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1249 * a driver returns that. 1250 */ 1251static int __clk_notify(struct clk *clk, unsigned long msg, 1252 unsigned long old_rate, unsigned long new_rate) 1253{ 1254 struct clk_notifier *cn; 1255 struct clk_notifier_data cnd; 1256 int ret = NOTIFY_DONE; 1257 1258 cnd.clk = clk; 1259 cnd.old_rate = old_rate; 1260 cnd.new_rate = new_rate; 1261 1262 list_for_each_entry(cn, &clk_notifier_list, node) { 1263 if (cn->clk == clk) { 1264 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1265 &cnd); 1266 break; 1267 } 1268 } 1269 1270 return ret; 1271} 1272 1273/** 1274 * __clk_recalc_accuracies 1275 * @clk: first clk in the subtree 1276 * 1277 * Walks the subtree of clks starting with clk and recalculates accuracies as 1278 * it goes. Note that if a clk does not implement the .recalc_accuracy 1279 * callback then it is assumed that the clock will take on the accuracy of it's 1280 * parent. 1281 * 1282 * Caller must hold prepare_lock. 1283 */ 1284static void __clk_recalc_accuracies(struct clk *clk) 1285{ 1286 unsigned long parent_accuracy = 0; 1287 struct clk *child; 1288 1289 if (clk->parent) 1290 parent_accuracy = clk->parent->accuracy; 1291 1292 if (clk->ops->recalc_accuracy) 1293 clk->accuracy = clk->ops->recalc_accuracy(clk->hw, 1294 parent_accuracy); 1295 else 1296 clk->accuracy = parent_accuracy; 1297 1298 hlist_for_each_entry(child, &clk->children, child_node) 1299 __clk_recalc_accuracies(child); 1300} 1301 1302/** 1303 * clk_get_accuracy - return the accuracy of clk 1304 * @clk: the clk whose accuracy is being returned 1305 * 1306 * Simply returns the cached accuracy of the clk, unless 1307 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1308 * issued. 1309 * If clk is NULL then returns 0. 1310 */ 1311long clk_get_accuracy(struct clk *clk) 1312{ 1313 unsigned long accuracy; 1314 1315 clk_prepare_lock(); 1316 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE)) 1317 __clk_recalc_accuracies(clk); 1318 1319 accuracy = __clk_get_accuracy(clk); 1320 clk_prepare_unlock(); 1321 1322 return accuracy; 1323} 1324EXPORT_SYMBOL_GPL(clk_get_accuracy); 1325 1326static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate) 1327{ 1328 if (clk->ops->recalc_rate) 1329 return clk->ops->recalc_rate(clk->hw, parent_rate); 1330 return parent_rate; 1331} 1332 1333/** 1334 * __clk_recalc_rates 1335 * @clk: first clk in the subtree 1336 * @msg: notification type (see include/linux/clk.h) 1337 * 1338 * Walks the subtree of clks starting with clk and recalculates rates as it 1339 * goes. Note that if a clk does not implement the .recalc_rate callback then 1340 * it is assumed that the clock will take on the rate of its parent. 1341 * 1342 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1343 * if necessary. 1344 * 1345 * Caller must hold prepare_lock. 1346 */ 1347static void __clk_recalc_rates(struct clk *clk, unsigned long msg) 1348{ 1349 unsigned long old_rate; 1350 unsigned long parent_rate = 0; 1351 struct clk *child; 1352 1353 old_rate = clk->rate; 1354 1355 if (clk->parent) 1356 parent_rate = clk->parent->rate; 1357 1358 clk->rate = clk_recalc(clk, parent_rate); 1359 1360 /* 1361 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1362 * & ABORT_RATE_CHANGE notifiers 1363 */ 1364 if (clk->notifier_count && msg) 1365 __clk_notify(clk, msg, old_rate, clk->rate); 1366 1367 hlist_for_each_entry(child, &clk->children, child_node) 1368 __clk_recalc_rates(child, msg); 1369} 1370 1371/** 1372 * clk_get_rate - return the rate of clk 1373 * @clk: the clk whose rate is being returned 1374 * 1375 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1376 * is set, which means a recalc_rate will be issued. 1377 * If clk is NULL then returns 0. 1378 */ 1379unsigned long clk_get_rate(struct clk *clk) 1380{ 1381 unsigned long rate; 1382 1383 clk_prepare_lock(); 1384 1385 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) 1386 __clk_recalc_rates(clk, 0); 1387 1388 rate = __clk_get_rate(clk); 1389 clk_prepare_unlock(); 1390 1391 return rate; 1392} 1393EXPORT_SYMBOL_GPL(clk_get_rate); 1394 1395static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) 1396{ 1397 int i; 1398 1399 if (!clk->parents) { 1400 clk->parents = kcalloc(clk->num_parents, 1401 sizeof(struct clk *), GFP_KERNEL); 1402 if (!clk->parents) 1403 return -ENOMEM; 1404 } 1405 1406 /* 1407 * find index of new parent clock using cached parent ptrs, 1408 * or if not yet cached, use string name comparison and cache 1409 * them now to avoid future calls to __clk_lookup. 1410 */ 1411 for (i = 0; i < clk->num_parents; i++) { 1412 if (clk->parents[i] == parent) 1413 return i; 1414 1415 if (clk->parents[i]) 1416 continue; 1417 1418 if (!strcmp(clk->parent_names[i], parent->name)) { 1419 clk->parents[i] = __clk_lookup(parent->name); 1420 return i; 1421 } 1422 } 1423 1424 return -EINVAL; 1425} 1426 1427static void clk_reparent(struct clk *clk, struct clk *new_parent) 1428{ 1429 hlist_del(&clk->child_node); 1430 1431 if (new_parent) { 1432 /* avoid duplicate POST_RATE_CHANGE notifications */ 1433 if (new_parent->new_child == clk) 1434 new_parent->new_child = NULL; 1435 1436 hlist_add_head(&clk->child_node, &new_parent->children); 1437 } else { 1438 hlist_add_head(&clk->child_node, &clk_orphan_list); 1439 } 1440 1441 clk->parent = new_parent; 1442} 1443 1444static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent) 1445{ 1446 unsigned long flags; 1447 struct clk *old_parent = clk->parent; 1448 1449 /* 1450 * Migrate prepare state between parents and prevent race with 1451 * clk_enable(). 1452 * 1453 * If the clock is not prepared, then a race with 1454 * clk_enable/disable() is impossible since we already have the 1455 * prepare lock (future calls to clk_enable() need to be preceded by 1456 * a clk_prepare()). 1457 * 1458 * If the clock is prepared, migrate the prepared state to the new 1459 * parent and also protect against a race with clk_enable() by 1460 * forcing the clock and the new parent on. This ensures that all 1461 * future calls to clk_enable() are practically NOPs with respect to 1462 * hardware and software states. 1463 * 1464 * See also: Comment for clk_set_parent() below. 1465 */ 1466 if (clk->prepare_count) { 1467 __clk_prepare(parent); 1468 clk_enable(parent); 1469 clk_enable(clk); 1470 } 1471 1472 /* update the clk tree topology */ 1473 flags = clk_enable_lock(); 1474 clk_reparent(clk, parent); 1475 clk_enable_unlock(flags); 1476 1477 return old_parent; 1478} 1479 1480static void __clk_set_parent_after(struct clk *clk, struct clk *parent, 1481 struct clk *old_parent) 1482{ 1483 /* 1484 * Finish the migration of prepare state and undo the changes done 1485 * for preventing a race with clk_enable(). 1486 */ 1487 if (clk->prepare_count) { 1488 clk_disable(clk); 1489 clk_disable(old_parent); 1490 __clk_unprepare(old_parent); 1491 } 1492} 1493 1494static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) 1495{ 1496 unsigned long flags; 1497 int ret = 0; 1498 struct clk *old_parent; 1499 1500 old_parent = __clk_set_parent_before(clk, parent); 1501 1502 /* change clock input source */ 1503 if (parent && clk->ops->set_parent) 1504 ret = clk->ops->set_parent(clk->hw, p_index); 1505 1506 if (ret) { 1507 flags = clk_enable_lock(); 1508 clk_reparent(clk, old_parent); 1509 clk_enable_unlock(flags); 1510 1511 if (clk->prepare_count) { 1512 clk_disable(clk); 1513 clk_disable(parent); 1514 __clk_unprepare(parent); 1515 } 1516 return ret; 1517 } 1518 1519 __clk_set_parent_after(clk, parent, old_parent); 1520 1521 return 0; 1522} 1523 1524/** 1525 * __clk_speculate_rates 1526 * @clk: first clk in the subtree 1527 * @parent_rate: the "future" rate of clk's parent 1528 * 1529 * Walks the subtree of clks starting with clk, speculating rates as it 1530 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1531 * 1532 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1533 * pre-rate change notifications and returns early if no clks in the 1534 * subtree have subscribed to the notifications. Note that if a clk does not 1535 * implement the .recalc_rate callback then it is assumed that the clock will 1536 * take on the rate of its parent. 1537 * 1538 * Caller must hold prepare_lock. 1539 */ 1540static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 1541{ 1542 struct clk *child; 1543 unsigned long new_rate; 1544 int ret = NOTIFY_DONE; 1545 1546 new_rate = clk_recalc(clk, parent_rate); 1547 1548 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1549 if (clk->notifier_count) 1550 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); 1551 1552 if (ret & NOTIFY_STOP_MASK) { 1553 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1554 __func__, clk->name, ret); 1555 goto out; 1556 } 1557 1558 hlist_for_each_entry(child, &clk->children, child_node) { 1559 ret = __clk_speculate_rates(child, new_rate); 1560 if (ret & NOTIFY_STOP_MASK) 1561 break; 1562 } 1563 1564out: 1565 return ret; 1566} 1567 1568static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, 1569 struct clk *new_parent, u8 p_index) 1570{ 1571 struct clk *child; 1572 1573 clk->new_rate = new_rate; 1574 clk->new_parent = new_parent; 1575 clk->new_parent_index = p_index; 1576 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1577 clk->new_child = NULL; 1578 if (new_parent && new_parent != clk->parent) 1579 new_parent->new_child = clk; 1580 1581 hlist_for_each_entry(child, &clk->children, child_node) { 1582 child->new_rate = clk_recalc(child, new_rate); 1583 clk_calc_subtree(child, child->new_rate, NULL, 0); 1584 } 1585} 1586 1587/* 1588 * calculate the new rates returning the topmost clock that has to be 1589 * changed. 1590 */ 1591static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) 1592{ 1593 struct clk *top = clk; 1594 struct clk *old_parent, *parent; 1595 unsigned long best_parent_rate = 0; 1596 unsigned long new_rate; 1597 int p_index = 0; 1598 1599 /* sanity */ 1600 if (IS_ERR_OR_NULL(clk)) 1601 return NULL; 1602 1603 /* save parent rate, if it exists */ 1604 parent = old_parent = clk->parent; 1605 if (parent) 1606 best_parent_rate = parent->rate; 1607 1608 /* find the closest rate and parent clk/rate */ 1609 if (clk->ops->determine_rate) { 1610 new_rate = clk->ops->determine_rate(clk->hw, rate, 1611 &best_parent_rate, 1612 &parent); 1613 } else if (clk->ops->round_rate) { 1614 new_rate = clk->ops->round_rate(clk->hw, rate, 1615 &best_parent_rate); 1616 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { 1617 /* pass-through clock without adjustable parent */ 1618 clk->new_rate = clk->rate; 1619 return NULL; 1620 } else { 1621 /* pass-through clock with adjustable parent */ 1622 top = clk_calc_new_rates(parent, rate); 1623 new_rate = parent->new_rate; 1624 goto out; 1625 } 1626 1627 /* some clocks must be gated to change parent */ 1628 if (parent != old_parent && 1629 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1630 pr_debug("%s: %s not gated but wants to reparent\n", 1631 __func__, clk->name); 1632 return NULL; 1633 } 1634 1635 /* try finding the new parent index */ 1636 if (parent) { 1637 p_index = clk_fetch_parent_index(clk, parent); 1638 if (p_index < 0) { 1639 pr_debug("%s: clk %s can not be parent of clk %s\n", 1640 __func__, parent->name, clk->name); 1641 return NULL; 1642 } 1643 } 1644 1645 if ((clk->flags & CLK_SET_RATE_PARENT) && parent && 1646 best_parent_rate != parent->rate) 1647 top = clk_calc_new_rates(parent, best_parent_rate); 1648 1649out: 1650 clk_calc_subtree(clk, new_rate, parent, p_index); 1651 1652 return top; 1653} 1654 1655/* 1656 * Notify about rate changes in a subtree. Always walk down the whole tree 1657 * so that in case of an error we can walk down the whole tree again and 1658 * abort the change. 1659 */ 1660static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 1661{ 1662 struct clk *child, *tmp_clk, *fail_clk = NULL; 1663 int ret = NOTIFY_DONE; 1664 1665 if (clk->rate == clk->new_rate) 1666 return NULL; 1667 1668 if (clk->notifier_count) { 1669 ret = __clk_notify(clk, event, clk->rate, clk->new_rate); 1670 if (ret & NOTIFY_STOP_MASK) 1671 fail_clk = clk; 1672 } 1673 1674 hlist_for_each_entry(child, &clk->children, child_node) { 1675 /* Skip children who will be reparented to another clock */ 1676 if (child->new_parent && child->new_parent != clk) 1677 continue; 1678 tmp_clk = clk_propagate_rate_change(child, event); 1679 if (tmp_clk) 1680 fail_clk = tmp_clk; 1681 } 1682 1683 /* handle the new child who might not be in clk->children yet */ 1684 if (clk->new_child) { 1685 tmp_clk = clk_propagate_rate_change(clk->new_child, event); 1686 if (tmp_clk) 1687 fail_clk = tmp_clk; 1688 } 1689 1690 return fail_clk; 1691} 1692 1693/* 1694 * walk down a subtree and set the new rates notifying the rate 1695 * change on the way 1696 */ 1697static void clk_change_rate(struct clk *clk) 1698{ 1699 struct clk *child; 1700 struct hlist_node *tmp; 1701 unsigned long old_rate; 1702 unsigned long best_parent_rate = 0; 1703 bool skip_set_rate = false; 1704 struct clk *old_parent; 1705 1706 old_rate = clk->rate; 1707 1708 if (clk->new_parent) 1709 best_parent_rate = clk->new_parent->rate; 1710 else if (clk->parent) 1711 best_parent_rate = clk->parent->rate; 1712 1713 if (clk->new_parent && clk->new_parent != clk->parent) { 1714 old_parent = __clk_set_parent_before(clk, clk->new_parent); 1715 1716 if (clk->ops->set_rate_and_parent) { 1717 skip_set_rate = true; 1718 clk->ops->set_rate_and_parent(clk->hw, clk->new_rate, 1719 best_parent_rate, 1720 clk->new_parent_index); 1721 } else if (clk->ops->set_parent) { 1722 clk->ops->set_parent(clk->hw, clk->new_parent_index); 1723 } 1724 1725 __clk_set_parent_after(clk, clk->new_parent, old_parent); 1726 } 1727 1728 if (!skip_set_rate && clk->ops->set_rate) 1729 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate); 1730 1731 clk->rate = clk_recalc(clk, best_parent_rate); 1732 1733#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 1734 if (freq_stats_on) { 1735 if (!ktime_equal(clk->start_time, ktime_set(0, 0))) { 1736 if (!clk->current_freq_stats) 1737 clk->default_freq_time = 1738 ktime_add(clk->default_freq_time, 1739 ktime_sub(ktime_get(), 1740 clk->start_time)); 1741 else 1742 clk->current_freq_stats->time_spent = 1743 ktime_add( 1744 clk->current_freq_stats->time_spent, 1745 ktime_sub(ktime_get(), 1746 clk->start_time)); 1747 } 1748 1749 clk->current_freq_stats = freq_stats_insert( 1750 &clk->freq_stats_table, 1751 clk->rate); 1752 1753 if (clk->enable_count > 0) 1754 clk->start_time = ktime_get(); 1755 } 1756#endif /*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 1757 1758 1759 if (clk->notifier_count && old_rate != clk->rate) 1760 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 1761 1762 /* 1763 * Use safe iteration, as change_rate can actually swap parents 1764 * for certain clock types. 1765 */ 1766 hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) { 1767 /* Skip children who will be reparented to another clock */ 1768 if (child->new_parent && child->new_parent != clk) 1769 continue; 1770 clk_change_rate(child); 1771 } 1772 1773 /* handle the new child who might not be in clk->children yet */ 1774 if (clk->new_child) 1775 clk_change_rate(clk->new_child); 1776} 1777 1778/** 1779 * clk_set_rate - specify a new rate for clk 1780 * @clk: the clk whose rate is being changed 1781 * @rate: the new rate for clk 1782 * 1783 * In the simplest case clk_set_rate will only adjust the rate of clk. 1784 * 1785 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1786 * propagate up to clk's parent; whether or not this happens depends on the 1787 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1788 * after calling .round_rate then upstream parent propagation is ignored. If 1789 * *parent_rate comes back with a new rate for clk's parent then we propagate 1790 * up to clk's parent and set its rate. Upward propagation will continue 1791 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1792 * .round_rate stops requesting changes to clk's parent_rate. 1793 * 1794 * Rate changes are accomplished via tree traversal that also recalculates the 1795 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1796 * 1797 * Returns 0 on success, -EERROR otherwise. 1798 */ 1799int clk_set_rate(struct clk *clk, unsigned long rate) 1800{ 1801 struct clk *top, *fail_clk; 1802 int ret = 0; 1803 1804 if (!clk) 1805 return 0; 1806 1807 /* prevent racing with updates to the clock topology */ 1808 clk_prepare_lock(); 1809 1810 /* bail early if nothing to do */ 1811 if (rate == clk_get_rate(clk)) 1812 goto out; 1813 1814 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { 1815 ret = -EBUSY; 1816 goto out; 1817 } 1818 1819 /* calculate new rates and get the topmost changed clock */ 1820 top = clk_calc_new_rates(clk, rate); 1821 if (!top) { 1822 ret = -EINVAL; 1823 goto out; 1824 } 1825 1826 /* notify that we are about to change rates */ 1827 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1828 if (fail_clk) { 1829 pr_debug("%s: failed to set %s rate\n", __func__, 1830 fail_clk->name); 1831 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1832 ret = -EBUSY; 1833 goto out; 1834 } 1835 1836 /* change the rates */ 1837 clk_change_rate(top); 1838 1839out: 1840 clk_prepare_unlock(); 1841 1842 return ret; 1843} 1844EXPORT_SYMBOL_GPL(clk_set_rate); 1845 1846/** 1847 * clk_get_parent - return the parent of a clk 1848 * @clk: the clk whose parent gets returned 1849 * 1850 * Simply returns clk->parent. Returns NULL if clk is NULL. 1851 */ 1852struct clk *clk_get_parent(struct clk *clk) 1853{ 1854 struct clk *parent; 1855 1856 clk_prepare_lock(); 1857 parent = __clk_get_parent(clk); 1858 clk_prepare_unlock(); 1859 1860 return parent; 1861} 1862EXPORT_SYMBOL_GPL(clk_get_parent); 1863 1864/* 1865 * .get_parent is mandatory for clocks with multiple possible parents. It is 1866 * optional for single-parent clocks. Always call .get_parent if it is 1867 * available and WARN if it is missing for multi-parent clocks. 1868 * 1869 * For single-parent clocks without .get_parent, first check to see if the 1870 * .parents array exists, and if so use it to avoid an expensive tree 1871 * traversal. If .parents does not exist then walk the tree with __clk_lookup. 1872 */ 1873static struct clk *__clk_init_parent(struct clk *clk) 1874{ 1875 struct clk *ret = NULL; 1876 u8 index; 1877 1878 /* handle the trivial cases */ 1879 1880 if (!clk->num_parents) 1881 goto out; 1882 1883 if (clk->num_parents == 1) { 1884 if (IS_ERR_OR_NULL(clk->parent)) 1885 ret = clk->parent = __clk_lookup(clk->parent_names[0]); 1886 ret = clk->parent; 1887 goto out; 1888 } 1889 1890 if (!clk->ops->get_parent) { 1891 WARN(!clk->ops->get_parent, 1892 "%s: multi-parent clocks must implement .get_parent\n", 1893 __func__); 1894 goto out; 1895 }; 1896 1897 /* 1898 * Do our best to cache parent clocks in clk->parents. This prevents 1899 * unnecessary and expensive calls to __clk_lookup. We don't set 1900 * clk->parent here; that is done by the calling function 1901 */ 1902 1903 index = clk->ops->get_parent(clk->hw); 1904 1905 if (!clk->parents) 1906 clk->parents = 1907 kcalloc(clk->num_parents, sizeof(struct clk *), 1908 GFP_KERNEL); 1909 1910 ret = clk_get_parent_by_index(clk, index); 1911 1912out: 1913 return ret; 1914} 1915 1916void __clk_reparent(struct clk *clk, struct clk *new_parent) 1917{ 1918 clk_reparent(clk, new_parent); 1919 __clk_recalc_accuracies(clk); 1920 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1921} 1922 1923/** 1924 * clk_set_parent - switch the parent of a mux clk 1925 * @clk: the mux clk whose input we are switching 1926 * @parent: the new input to clk 1927 * 1928 * Re-parent clk to use parent as its new input source. If clk is in 1929 * prepared state, the clk will get enabled for the duration of this call. If 1930 * that's not acceptable for a specific clk (Eg: the consumer can't handle 1931 * that, the reparenting is glitchy in hardware, etc), use the 1932 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 1933 * 1934 * After successfully changing clk's parent clk_set_parent will update the 1935 * clk topology, sysfs topology and propagate rate recalculation via 1936 * __clk_recalc_rates. 1937 * 1938 * Returns 0 on success, -EERROR otherwise. 1939 */ 1940int clk_set_parent(struct clk *clk, struct clk *parent) 1941{ 1942 int ret = 0; 1943 int p_index = 0; 1944 unsigned long p_rate = 0; 1945 1946 if (!clk) 1947 return 0; 1948 1949 /* verify ops for for multi-parent clks */ 1950 if ((clk->num_parents > 1) && (!clk->ops->set_parent)) 1951 return -ENOSYS; 1952 1953 /* prevent racing with updates to the clock topology */ 1954 clk_prepare_lock(); 1955 1956 if (clk->parent == parent) 1957 goto out; 1958 1959 /* check that we are allowed to re-parent if the clock is in use */ 1960 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1961 ret = -EBUSY; 1962 goto out; 1963 } 1964 1965 /* try finding the new parent index */ 1966 if (parent) { 1967 p_index = clk_fetch_parent_index(clk, parent); 1968 p_rate = parent->rate; 1969 if (p_index < 0) { 1970 pr_debug("%s: clk %s can not be parent of clk %s\n", 1971 __func__, parent->name, clk->name); 1972 ret = p_index; 1973 goto out; 1974 } 1975 } 1976 1977 /* propagate PRE_RATE_CHANGE notifications */ 1978 ret = __clk_speculate_rates(clk, p_rate); 1979 1980 /* abort if a driver objects */ 1981 if (ret & NOTIFY_STOP_MASK) 1982 goto out; 1983 1984 /* do the re-parent */ 1985 ret = __clk_set_parent(clk, parent, p_index); 1986 1987 /* propagate rate an accuracy recalculation accordingly */ 1988 if (ret) { 1989 __clk_recalc_rates(clk, ABORT_RATE_CHANGE); 1990 } else { 1991 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1992 __clk_recalc_accuracies(clk); 1993 } 1994 1995out: 1996 clk_prepare_unlock(); 1997 1998 return ret; 1999} 2000EXPORT_SYMBOL_GPL(clk_set_parent); 2001 2002/** 2003 * clk_set_phase - adjust the phase shift of a clock signal 2004 * @clk: clock signal source 2005 * @degrees: number of degrees the signal is shifted 2006 * 2007 * Shifts the phase of a clock signal by the specified 2008 * degrees. Returns 0 on success, -EERROR otherwise. 2009 * 2010 * This function makes no distinction about the input or reference 2011 * signal that we adjust the clock signal phase against. For example 2012 * phase locked-loop clock signal generators we may shift phase with 2013 * respect to feedback clock signal input, but for other cases the 2014 * clock phase may be shifted with respect to some other, unspecified 2015 * signal. 2016 * 2017 * Additionally the concept of phase shift does not propagate through 2018 * the clock tree hierarchy, which sets it apart from clock rates and 2019 * clock accuracy. A parent clock phase attribute does not have an 2020 * impact on the phase attribute of a child clock. 2021 */ 2022int clk_set_phase(struct clk *clk, int degrees) 2023{ 2024 int ret = 0; 2025 2026 if (!clk) 2027 goto out; 2028 2029 /* sanity check degrees */ 2030 degrees %= 360; 2031 if (degrees < 0) 2032 degrees += 360; 2033 2034 clk_prepare_lock(); 2035 2036 if (!clk->ops->set_phase) 2037 goto out_unlock; 2038 2039 ret = clk->ops->set_phase(clk->hw, degrees); 2040 2041 if (!ret) 2042 clk->phase = degrees; 2043 2044out_unlock: 2045 clk_prepare_unlock(); 2046 2047out: 2048 return ret; 2049} 2050 2051/** 2052 * clk_get_phase - return the phase shift of a clock signal 2053 * @clk: clock signal source 2054 * 2055 * Returns the phase shift of a clock node in degrees, otherwise returns 2056 * -EERROR. 2057 */ 2058int clk_get_phase(struct clk *clk) 2059{ 2060 int ret = 0; 2061 2062 if (!clk) 2063 goto out; 2064 2065 clk_prepare_lock(); 2066 ret = clk->phase; 2067 clk_prepare_unlock(); 2068 2069out: 2070 return ret; 2071} 2072 2073/** 2074 * __clk_init - initialize the data structures in a struct clk 2075 * @dev: device initializing this clk, placeholder for now 2076 * @clk: clk being initialized 2077 * 2078 * Initializes the lists in struct clk, queries the hardware for the 2079 * parent and rate and sets them both. 2080 */ 2081int __clk_init(struct device *dev, struct clk *clk) 2082{ 2083 int i, ret = 0; 2084 struct clk *orphan; 2085 struct hlist_node *tmp2; 2086 2087 if (!clk) 2088 return -EINVAL; 2089 2090 clk_prepare_lock(); 2091 2092 /* check to see if a clock with this name is already registered */ 2093 if (__clk_lookup(clk->name)) { 2094 pr_debug("%s: clk %s already initialized\n", 2095 __func__, clk->name); 2096 ret = -EEXIST; 2097 goto out; 2098 } 2099 2100 /* check that clk_ops are sane. See Documentation/clk.txt */ 2101 if (clk->ops->set_rate && 2102 !((clk->ops->round_rate || clk->ops->determine_rate) && 2103 clk->ops->recalc_rate)) { 2104 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 2105 __func__, clk->name); 2106 ret = -EINVAL; 2107 goto out; 2108 } 2109 2110 if (clk->ops->set_parent && !clk->ops->get_parent) { 2111 pr_warning("%s: %s must implement .get_parent & .set_parent\n", 2112 __func__, clk->name); 2113 ret = -EINVAL; 2114 goto out; 2115 } 2116 2117 if (clk->ops->set_rate_and_parent && 2118 !(clk->ops->set_parent && clk->ops->set_rate)) { 2119 pr_warn("%s: %s must implement .set_parent & .set_rate\n", 2120 __func__, clk->name); 2121 ret = -EINVAL; 2122 goto out; 2123 } 2124 2125 /* throw a WARN if any entries in parent_names are NULL */ 2126 for (i = 0; i < clk->num_parents; i++) 2127 WARN(!clk->parent_names[i], 2128 "%s: invalid NULL in %s's .parent_names\n", 2129 __func__, clk->name); 2130 2131 /* 2132 * Allocate an array of struct clk *'s to avoid unnecessary string 2133 * look-ups of clk's possible parents. This can fail for clocks passed 2134 * in to clk_init during early boot; thus any access to clk->parents[] 2135 * must always check for a NULL pointer and try to populate it if 2136 * necessary. 2137 * 2138 * If clk->parents is not NULL we skip this entire block. This allows 2139 * for clock drivers to statically initialize clk->parents. 2140 */ 2141 if (clk->num_parents > 1 && !clk->parents) { 2142 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), 2143 GFP_KERNEL); 2144 /* 2145 * __clk_lookup returns NULL for parents that have not been 2146 * clk_init'd; thus any access to clk->parents[] must check 2147 * for a NULL pointer. We can always perform lazy lookups for 2148 * missing parents later on. 2149 */ 2150 if (clk->parents) 2151 for (i = 0; i < clk->num_parents; i++) 2152 clk->parents[i] = 2153 __clk_lookup(clk->parent_names[i]); 2154 } 2155 2156 clk->parent = __clk_init_parent(clk); 2157 2158 /* 2159 * Populate clk->parent if parent has already been __clk_init'd. If 2160 * parent has not yet been __clk_init'd then place clk in the orphan 2161 * list. If clk has set the CLK_IS_ROOT flag then place it in the root 2162 * clk list. 2163 * 2164 * Every time a new clk is clk_init'd then we walk the list of orphan 2165 * clocks and re-parent any that are children of the clock currently 2166 * being clk_init'd. 2167 */ 2168 if (clk->parent) 2169 hlist_add_head(&clk->child_node, 2170 &clk->parent->children); 2171 else if (clk->flags & CLK_IS_ROOT) 2172 hlist_add_head(&clk->child_node, &clk_root_list); 2173 else 2174 hlist_add_head(&clk->child_node, &clk_orphan_list); 2175 2176 /* 2177 * Set clk's accuracy. The preferred method is to use 2178 * .recalc_accuracy. For simple clocks and lazy developers the default 2179 * fallback is to use the parent's accuracy. If a clock doesn't have a 2180 * parent (or is orphaned) then accuracy is set to zero (perfect 2181 * clock). 2182 */ 2183 if (clk->ops->recalc_accuracy) 2184 clk->accuracy = clk->ops->recalc_accuracy(clk->hw, 2185 __clk_get_accuracy(clk->parent)); 2186 else if (clk->parent) 2187 clk->accuracy = clk->parent->accuracy; 2188 else 2189 clk->accuracy = 0; 2190 2191 /* 2192 * Set clk's phase. 2193 * Since a phase is by definition relative to its parent, just 2194 * query the current clock phase, or just assume it's in phase. 2195 */ 2196 if (clk->ops->get_phase) 2197 clk->phase = clk->ops->get_phase(clk->hw); 2198 else 2199 clk->phase = 0; 2200 2201 /* 2202 * Set clk's rate. The preferred method is to use .recalc_rate. For 2203 * simple clocks and lazy developers the default fallback is to use the 2204 * parent's rate. If a clock doesn't have a parent (or is orphaned) 2205 * then rate is set to zero. 2206 */ 2207 if (clk->ops->recalc_rate) 2208 clk->rate = clk->ops->recalc_rate(clk->hw, 2209 __clk_get_rate(clk->parent)); 2210 else if (clk->parent) 2211 clk->rate = clk->parent->rate; 2212 else 2213 clk->rate = 0; 2214 2215 clk_debug_register(clk); 2216 /* 2217 * walk the list of orphan clocks and reparent any that are children of 2218 * this clock 2219 */ 2220 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 2221 if (orphan->num_parents && orphan->ops->get_parent) { 2222 i = orphan->ops->get_parent(orphan->hw); 2223 if (!strcmp(clk->name, orphan->parent_names[i])) 2224 __clk_reparent(orphan, clk); 2225 continue; 2226 } 2227 2228 for (i = 0; i < orphan->num_parents; i++) 2229 if (!strcmp(clk->name, orphan->parent_names[i])) { 2230 __clk_reparent(orphan, clk); 2231 break; 2232 } 2233 } 2234 2235 /* 2236 * optional platform-specific magic 2237 * 2238 * The .init callback is not used by any of the basic clock types, but 2239 * exists for weird hardware that must perform initialization magic. 2240 * Please consider other ways of solving initialization problems before 2241 * using this callback, as its use is discouraged. 2242 */ 2243 if (clk->ops->init) 2244 clk->ops->init(clk->hw); 2245 2246 kref_init(&clk->ref); 2247out: 2248 clk_prepare_unlock(); 2249 2250 return ret; 2251} 2252 2253/** 2254 * __clk_register - register a clock and return a cookie. 2255 * 2256 * Same as clk_register, except that the .clk field inside hw shall point to a 2257 * preallocated (generally statically allocated) struct clk. None of the fields 2258 * of the struct clk need to be initialized. 2259 * 2260 * The data pointed to by .init and .clk field shall NOT be marked as init 2261 * data. 2262 * 2263 * __clk_register is only exposed via clk-private.h and is intended for use with 2264 * very large numbers of clocks that need to be statically initialized. It is 2265 * a layering violation to include clk-private.h from any code which implements 2266 * a clock's .ops; as such any statically initialized clock data MUST be in a 2267 * separate C file from the logic that implements its operations. Returns 0 2268 * on success, otherwise an error code. 2269 */ 2270struct clk *__clk_register(struct device *dev, struct clk_hw *hw) 2271{ 2272 int ret; 2273 struct clk *clk; 2274 2275 clk = hw->clk; 2276 clk->name = hw->init->name; 2277 clk->ops = hw->init->ops; 2278 clk->hw = hw; 2279 clk->flags = hw->init->flags; 2280 clk->parent_names = hw->init->parent_names; 2281 clk->num_parents = hw->init->num_parents; 2282 if (dev && dev->driver) 2283 clk->owner = dev->driver->owner; 2284 else 2285 clk->owner = NULL; 2286 2287 ret = __clk_init(dev, clk); 2288 if (ret) 2289 return ERR_PTR(ret); 2290 2291 return clk; 2292} 2293EXPORT_SYMBOL_GPL(__clk_register); 2294 2295/** 2296 * clk_register - allocate a new clock, register it and return an opaque cookie 2297 * @dev: device that is registering this clock 2298 * @hw: link to hardware-specific clock data 2299 * 2300 * clk_register is the primary interface for populating the clock tree with new 2301 * clock nodes. It returns a pointer to the newly allocated struct clk which 2302 * cannot be dereferenced by driver code but may be used in conjuction with the 2303 * rest of the clock API. In the event of an error clk_register will return an 2304 * error code; drivers must test for an error code after calling clk_register. 2305 */ 2306struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2307{ 2308 int i, ret; 2309 struct clk *clk; 2310 2311 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2312 if (!clk) { 2313 pr_err("%s: could not allocate clk\n", __func__); 2314 ret = -ENOMEM; 2315 goto fail_out; 2316 } 2317 2318 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 2319 if (!clk->name) { 2320 pr_err("%s: could not allocate clk->name\n", __func__); 2321 ret = -ENOMEM; 2322 goto fail_name; 2323 } 2324 clk->ops = hw->init->ops; 2325 if (dev && dev->driver) 2326 clk->owner = dev->driver->owner; 2327 clk->hw = hw; 2328 clk->flags = hw->init->flags; 2329 clk->num_parents = hw->init->num_parents; 2330 hw->clk = clk; 2331 2332 /* allocate local copy in case parent_names is __initdata */ 2333 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), 2334 GFP_KERNEL); 2335 2336 if (!clk->parent_names) { 2337 pr_err("%s: could not allocate clk->parent_names\n", __func__); 2338 ret = -ENOMEM; 2339 goto fail_parent_names; 2340 } 2341 2342 2343 /* copy each string name in case parent_names is __initdata */ 2344 for (i = 0; i < clk->num_parents; i++) { 2345 clk->parent_names[i] = kstrdup(hw->init->parent_names[i], 2346 GFP_KERNEL); 2347 if (!clk->parent_names[i]) { 2348 pr_err("%s: could not copy parent_names\n", __func__); 2349 ret = -ENOMEM; 2350 goto fail_parent_names_copy; 2351 } 2352 } 2353 2354 ret = __clk_init(dev, clk); 2355 if (!ret) 2356 return clk; 2357 2358fail_parent_names_copy: 2359 while (--i >= 0) 2360 kfree(clk->parent_names[i]); 2361 kfree(clk->parent_names); 2362fail_parent_names: 2363 kfree(clk->name); 2364fail_name: 2365 kfree(clk); 2366fail_out: 2367 return ERR_PTR(ret); 2368} 2369EXPORT_SYMBOL_GPL(clk_register); 2370 2371/* 2372 * Free memory allocated for a clock. 2373 * Caller must hold prepare_lock. 2374 */ 2375static void __clk_release(struct kref *ref) 2376{ 2377 struct clk *clk = container_of(ref, struct clk, ref); 2378 int i = clk->num_parents; 2379 2380 kfree(clk->parents); 2381 while (--i >= 0) 2382 kfree(clk->parent_names[i]); 2383 2384 kfree(clk->parent_names); 2385 kfree(clk->name); 2386 2387#ifdef CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING 2388 free_tree(clk->freq_stats_table.rb_node); 2389#endif/*CONFIG_COMMON_CLK_FREQ_STATS_ACCOUNTING*/ 2390 2391 kfree(clk); 2392} 2393 2394/* 2395 * Empty clk_ops for unregistered clocks. These are used temporarily 2396 * after clk_unregister() was called on a clock and until last clock 2397 * consumer calls clk_put() and the struct clk object is freed. 2398 */ 2399static int clk_nodrv_prepare_enable(struct clk_hw *hw) 2400{ 2401 return -ENXIO; 2402} 2403 2404static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 2405{ 2406 WARN_ON_ONCE(1); 2407} 2408 2409static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 2410 unsigned long parent_rate) 2411{ 2412 return -ENXIO; 2413} 2414 2415static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 2416{ 2417 return -ENXIO; 2418} 2419 2420static const struct clk_ops clk_nodrv_ops = { 2421 .enable = clk_nodrv_prepare_enable, 2422 .disable = clk_nodrv_disable_unprepare, 2423 .prepare = clk_nodrv_prepare_enable, 2424 .unprepare = clk_nodrv_disable_unprepare, 2425 .set_rate = clk_nodrv_set_rate, 2426 .set_parent = clk_nodrv_set_parent, 2427}; 2428 2429/** 2430 * clk_unregister - unregister a currently registered clock 2431 * @clk: clock to unregister 2432 */ 2433void clk_unregister(struct clk *clk) 2434{ 2435 unsigned long flags; 2436 2437 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2438 return; 2439 2440 clk_debug_unregister(clk); 2441 2442 clk_prepare_lock(); 2443 2444 if (clk->ops == &clk_nodrv_ops) { 2445 pr_err("%s: unregistered clock: %s\n", __func__, clk->name); 2446 return; 2447 } 2448 /* 2449 * Assign empty clock ops for consumers that might still hold 2450 * a reference to this clock. 2451 */ 2452 flags = clk_enable_lock(); 2453 clk->ops = &clk_nodrv_ops; 2454 clk_enable_unlock(flags); 2455 2456 if (!hlist_empty(&clk->children)) { 2457 struct clk *child; 2458 struct hlist_node *t; 2459 2460 /* Reparent all children to the orphan list. */ 2461 hlist_for_each_entry_safe(child, t, &clk->children, child_node) 2462 clk_set_parent(child, NULL); 2463 } 2464 2465 hlist_del_init(&clk->child_node); 2466 2467 if (clk->prepare_count) 2468 pr_warn("%s: unregistering prepared clock: %s\n", 2469 __func__, clk->name); 2470 kref_put(&clk->ref, __clk_release); 2471 2472 clk_prepare_unlock(); 2473} 2474EXPORT_SYMBOL_GPL(clk_unregister); 2475 2476static void devm_clk_release(struct device *dev, void *res) 2477{ 2478 clk_unregister(*(struct clk **)res); 2479} 2480 2481/** 2482 * devm_clk_register - resource managed clk_register() 2483 * @dev: device that is registering this clock 2484 * @hw: link to hardware-specific clock data 2485 * 2486 * Managed clk_register(). Clocks returned from this function are 2487 * automatically clk_unregister()ed on driver detach. See clk_register() for 2488 * more information. 2489 */ 2490struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2491{ 2492 struct clk *clk; 2493 struct clk **clkp; 2494 2495 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 2496 if (!clkp) 2497 return ERR_PTR(-ENOMEM); 2498 2499 clk = clk_register(dev, hw); 2500 if (!IS_ERR(clk)) { 2501 *clkp = clk; 2502 devres_add(dev, clkp); 2503 } else { 2504 devres_free(clkp); 2505 } 2506 2507 return clk; 2508} 2509EXPORT_SYMBOL_GPL(devm_clk_register); 2510 2511static int devm_clk_match(struct device *dev, void *res, void *data) 2512{ 2513 struct clk *c = res; 2514 if (WARN_ON(!c)) 2515 return 0; 2516 return c == data; 2517} 2518 2519/** 2520 * devm_clk_unregister - resource managed clk_unregister() 2521 * @clk: clock to unregister 2522 * 2523 * Deallocate a clock allocated with devm_clk_register(). Normally 2524 * this function will not need to be called and the resource management 2525 * code will ensure that the resource is freed. 2526 */ 2527void devm_clk_unregister(struct device *dev, struct clk *clk) 2528{ 2529 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 2530} 2531EXPORT_SYMBOL_GPL(devm_clk_unregister); 2532 2533/* 2534 * clkdev helpers 2535 */ 2536int __clk_get(struct clk *clk) 2537{ 2538 if (clk) { 2539 if (!try_module_get(clk->owner)) 2540 return 0; 2541 2542 kref_get(&clk->ref); 2543 } 2544 return 1; 2545} 2546 2547void __clk_put(struct clk *clk) 2548{ 2549 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2550 return; 2551 2552 clk_prepare_lock(); 2553 kref_put(&clk->ref, __clk_release); 2554 clk_prepare_unlock(); 2555 2556 module_put(clk->owner); 2557} 2558 2559/*** clk rate change notifiers ***/ 2560 2561/** 2562 * clk_notifier_register - add a clk rate change notifier 2563 * @clk: struct clk * to watch 2564 * @nb: struct notifier_block * with callback info 2565 * 2566 * Request notification when clk's rate changes. This uses an SRCU 2567 * notifier because we want it to block and notifier unregistrations are 2568 * uncommon. The callbacks associated with the notifier must not 2569 * re-enter into the clk framework by calling any top-level clk APIs; 2570 * this will cause a nested prepare_lock mutex. 2571 * 2572 * In all notification cases cases (pre, post and abort rate change) the 2573 * original clock rate is passed to the callback via struct 2574 * clk_notifier_data.old_rate and the new frequency is passed via struct 2575 * clk_notifier_data.new_rate. 2576 * 2577 * clk_notifier_register() must be called from non-atomic context. 2578 * Returns -EINVAL if called with null arguments, -ENOMEM upon 2579 * allocation failure; otherwise, passes along the return value of 2580 * srcu_notifier_chain_register(). 2581 */ 2582int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 2583{ 2584 struct clk_notifier *cn; 2585 int ret = -ENOMEM; 2586 2587 if (!clk || !nb) 2588 return -EINVAL; 2589 2590 clk_prepare_lock(); 2591 2592 /* search the list of notifiers for this clk */ 2593 list_for_each_entry(cn, &clk_notifier_list, node) 2594 if (cn->clk == clk) 2595 break; 2596 2597 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 2598 if (cn->clk != clk) { 2599 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); 2600 if (!cn) 2601 goto out; 2602 2603 cn->clk = clk; 2604 srcu_init_notifier_head(&cn->notifier_head); 2605 2606 list_add(&cn->node, &clk_notifier_list); 2607 } 2608 2609 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 2610 2611 clk->notifier_count++; 2612 2613out: 2614 clk_prepare_unlock(); 2615 2616 return ret; 2617} 2618EXPORT_SYMBOL_GPL(clk_notifier_register); 2619 2620/** 2621 * clk_notifier_unregister - remove a clk rate change notifier 2622 * @clk: struct clk * 2623 * @nb: struct notifier_block * with callback info 2624 * 2625 * Request no further notification for changes to 'clk' and frees memory 2626 * allocated in clk_notifier_register. 2627 * 2628 * Returns -EINVAL if called with null arguments; otherwise, passes 2629 * along the return value of srcu_notifier_chain_unregister(). 2630 */ 2631int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 2632{ 2633 struct clk_notifier *cn = NULL; 2634 int ret = -EINVAL; 2635 2636 if (!clk || !nb) 2637 return -EINVAL; 2638 2639 clk_prepare_lock(); 2640 2641 list_for_each_entry(cn, &clk_notifier_list, node) 2642 if (cn->clk == clk) 2643 break; 2644 2645 if (cn->clk == clk) { 2646 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 2647 2648 clk->notifier_count--; 2649 2650 /* XXX the notifier code should handle this better */ 2651 if (!cn->notifier_head.head) { 2652 srcu_cleanup_notifier_head(&cn->notifier_head); 2653 list_del(&cn->node); 2654 kfree(cn); 2655 } 2656 2657 } else { 2658 ret = -ENOENT; 2659 } 2660 2661 clk_prepare_unlock(); 2662 2663 return ret; 2664} 2665EXPORT_SYMBOL_GPL(clk_notifier_unregister); 2666 2667#ifdef CONFIG_OF 2668/** 2669 * struct of_clk_provider - Clock provider registration structure 2670 * @link: Entry in global list of clock providers 2671 * @node: Pointer to device tree node of clock provider 2672 * @get: Get clock callback. Returns NULL or a struct clk for the 2673 * given clock specifier 2674 * @data: context pointer to be passed into @get callback 2675 */ 2676struct of_clk_provider { 2677 struct list_head link; 2678 2679 struct device_node *node; 2680 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 2681 void *data; 2682}; 2683 2684static const struct of_device_id __clk_of_table_sentinel 2685 __used __section(__clk_of_table_end); 2686 2687static LIST_HEAD(of_clk_providers); 2688static DEFINE_MUTEX(of_clk_mutex); 2689 2690/* of_clk_provider list locking helpers */ 2691void of_clk_lock(void) 2692{ 2693 mutex_lock(&of_clk_mutex); 2694} 2695 2696void of_clk_unlock(void) 2697{ 2698 mutex_unlock(&of_clk_mutex); 2699} 2700 2701struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 2702 void *data) 2703{ 2704 return data; 2705} 2706EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 2707 2708struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 2709{ 2710 struct clk_onecell_data *clk_data = data; 2711 unsigned int idx = clkspec->args[0]; 2712 2713 if (idx >= clk_data->clk_num) { 2714 pr_err("%s: invalid clock index %d\n", __func__, idx); 2715 return ERR_PTR(-EINVAL); 2716 } 2717 2718 return clk_data->clks[idx]; 2719} 2720EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 2721 2722/** 2723 * of_clk_add_provider() - Register a clock provider for a node 2724 * @np: Device node pointer associated with clock provider 2725 * @clk_src_get: callback for decoding clock 2726 * @data: context pointer for @clk_src_get callback. 2727 */ 2728int of_clk_add_provider(struct device_node *np, 2729 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 2730 void *data), 2731 void *data) 2732{ 2733 struct of_clk_provider *cp; 2734 int ret; 2735 2736 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL); 2737 if (!cp) 2738 return -ENOMEM; 2739 2740 cp->node = of_node_get(np); 2741 cp->data = data; 2742 cp->get = clk_src_get; 2743 2744 mutex_lock(&of_clk_mutex); 2745 list_add(&cp->link, &of_clk_providers); 2746 mutex_unlock(&of_clk_mutex); 2747 pr_debug("Added clock from %s\n", np->full_name); 2748 2749 ret = of_clk_set_defaults(np, true); 2750 if (ret < 0) 2751 of_clk_del_provider(np); 2752 2753 return ret; 2754} 2755EXPORT_SYMBOL_GPL(of_clk_add_provider); 2756 2757/** 2758 * of_clk_del_provider() - Remove a previously registered clock provider 2759 * @np: Device node pointer associated with clock provider 2760 */ 2761void of_clk_del_provider(struct device_node *np) 2762{ 2763 struct of_clk_provider *cp; 2764 2765 mutex_lock(&of_clk_mutex); 2766 list_for_each_entry(cp, &of_clk_providers, link) { 2767 if (cp->node == np) { 2768 list_del(&cp->link); 2769 of_node_put(cp->node); 2770 kfree(cp); 2771 break; 2772 } 2773 } 2774 mutex_unlock(&of_clk_mutex); 2775} 2776EXPORT_SYMBOL_GPL(of_clk_del_provider); 2777 2778struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) 2779{ 2780 struct of_clk_provider *provider; 2781 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 2782 2783 /* Check if we have such a provider in our array */ 2784 list_for_each_entry(provider, &of_clk_providers, link) { 2785 if (provider->node == clkspec->np) 2786 clk = provider->get(clkspec, provider->data); 2787 if (!IS_ERR(clk)) 2788 break; 2789 } 2790 2791 return clk; 2792} 2793 2794struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 2795{ 2796 struct clk *clk; 2797 2798 mutex_lock(&of_clk_mutex); 2799 clk = __of_clk_get_from_provider(clkspec); 2800 mutex_unlock(&of_clk_mutex); 2801 2802 return clk; 2803} 2804 2805int of_clk_get_parent_count(struct device_node *np) 2806{ 2807 return of_count_phandle_with_args(np, "clocks", "#clock-cells"); 2808} 2809EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 2810 2811const char *of_clk_get_parent_name(struct device_node *np, int index) 2812{ 2813 struct of_phandle_args clkspec; 2814 struct property *prop; 2815 const char *clk_name; 2816 const __be32 *vp; 2817 u32 pv; 2818 int rc; 2819 int count; 2820 2821 if (index < 0) 2822 return NULL; 2823 2824 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 2825 &clkspec); 2826 if (rc) 2827 return NULL; 2828 2829 index = clkspec.args_count ? clkspec.args[0] : 0; 2830 count = 0; 2831 2832 /* if there is an indices property, use it to transfer the index 2833 * specified into an array offset for the clock-output-names property. 2834 */ 2835 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 2836 if (index == pv) { 2837 index = count; 2838 break; 2839 } 2840 count++; 2841 } 2842 2843 if (of_property_read_string_index(clkspec.np, "clock-output-names", 2844 index, 2845 &clk_name) < 0) 2846 clk_name = clkspec.np->name; 2847 2848 of_node_put(clkspec.np); 2849 return clk_name; 2850} 2851EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 2852 2853struct clock_provider { 2854 of_clk_init_cb_t clk_init_cb; 2855 struct device_node *np; 2856 struct list_head node; 2857}; 2858 2859static LIST_HEAD(clk_provider_list); 2860 2861/* 2862 * This function looks for a parent clock. If there is one, then it 2863 * checks that the provider for this parent clock was initialized, in 2864 * this case the parent clock will be ready. 2865 */ 2866static int parent_ready(struct device_node *np) 2867{ 2868 int i = 0; 2869 2870 while (true) { 2871 struct clk *clk = of_clk_get(np, i); 2872 2873 /* this parent is ready we can check the next one */ 2874 if (!IS_ERR(clk)) { 2875 clk_put(clk); 2876 i++; 2877 continue; 2878 } 2879 2880 /* at least one parent is not ready, we exit now */ 2881 if (PTR_ERR(clk) == -EPROBE_DEFER) 2882 return 0; 2883 2884 /* 2885 * Here we make assumption that the device tree is 2886 * written correctly. So an error means that there is 2887 * no more parent. As we didn't exit yet, then the 2888 * previous parent are ready. If there is no clock 2889 * parent, no need to wait for them, then we can 2890 * consider their absence as being ready 2891 */ 2892 return 1; 2893 } 2894} 2895 2896/** 2897 * of_clk_init() - Scan and init clock providers from the DT 2898 * @matches: array of compatible values and init functions for providers. 2899 * 2900 * This function scans the device tree for matching clock providers 2901 * and calls their initialization functions. It also does it by trying 2902 * to follow the dependencies. 2903 */ 2904void __init of_clk_init(const struct of_device_id *matches) 2905{ 2906 const struct of_device_id *match; 2907 struct device_node *np; 2908 struct clock_provider *clk_provider, *next; 2909 bool is_init_done; 2910 bool force = false; 2911 2912 if (!matches) 2913 matches = &__clk_of_table; 2914 2915 /* First prepare the list of the clocks providers */ 2916 for_each_matching_node_and_match(np, matches, &match) { 2917 struct clock_provider *parent = 2918 kzalloc(sizeof(struct clock_provider), GFP_KERNEL); 2919 2920 parent->clk_init_cb = match->data; 2921 parent->np = np; 2922 list_add_tail(&parent->node, &clk_provider_list); 2923 } 2924 2925 while (!list_empty(&clk_provider_list)) { 2926 is_init_done = false; 2927 list_for_each_entry_safe(clk_provider, next, 2928 &clk_provider_list, node) { 2929 if (force || parent_ready(clk_provider->np)) { 2930 2931 clk_provider->clk_init_cb(clk_provider->np); 2932 of_clk_set_defaults(clk_provider->np, true); 2933 2934 list_del(&clk_provider->node); 2935 kfree(clk_provider); 2936 is_init_done = true; 2937 } 2938 } 2939 2940 /* 2941 * We didn't manage to initialize any of the 2942 * remaining providers during the last loop, so now we 2943 * initialize all the remaining ones unconditionally 2944 * in case the clock parent was not mandatory 2945 */ 2946 if (!is_init_done) 2947 force = true; 2948 } 2949} 2950#endif 2951