1/*
2 *
3 * Copyright (C) 2010 Google, Inc.
4 *
5 * Author:
6 *	Colin Cross <ccross@google.com>
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/clk.h>
21#include <linux/clkdev.h>
22#include <linux/debugfs.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/list.h>
26#include <linux/module.h>
27#include <linux/sched.h>
28#include <linux/seq_file.h>
29#include <linux/slab.h>
30
31#include <mach/clk.h>
32
33#include "board.h"
34#include "clock.h"
35
36/*
37 * Locking:
38 *
39 * Each struct clk has a spinlock.
40 *
41 * To avoid AB-BA locking problems, locks must always be traversed from child
42 * clock to parent clock.  For example, when enabling a clock, the clock's lock
43 * is taken, and then clk_enable is called on the parent, which take's the
44 * parent clock's lock.  There is one exceptions to this ordering: When dumping
45 * the clock tree through debugfs.  In this case, clk_lock_all is called,
46 * which attemps to iterate through the entire list of clocks and take every
47 * clock lock.  If any call to spin_trylock fails, all locked clocks are
48 * unlocked, and the process is retried.  When all the locks are held,
49 * the only clock operation that can be called is clk_get_rate_all_locked.
50 *
51 * Within a single clock, no clock operation can call another clock operation
52 * on itself, except for clk_get_rate_locked and clk_set_rate_locked.  Any
53 * clock operation can call any other clock operation on any of it's possible
54 * parents.
55 *
56 * An additional mutex, clock_list_lock, is used to protect the list of all
57 * clocks.
58 *
59 * The clock operations must lock internally to protect against
60 * read-modify-write on registers that are shared by multiple clocks
61 */
62static DEFINE_MUTEX(clock_list_lock);
63static LIST_HEAD(clocks);
64
65struct clk *tegra_get_clock_by_name(const char *name)
66{
67	struct clk *c;
68	struct clk *ret = NULL;
69	mutex_lock(&clock_list_lock);
70	list_for_each_entry(c, &clocks, node) {
71		if (strcmp(c->name, name) == 0) {
72			ret = c;
73			break;
74		}
75	}
76	mutex_unlock(&clock_list_lock);
77	return ret;
78}
79
80/* Must be called with c->spinlock held */
81static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
82{
83	u64 rate;
84
85	rate = clk_get_rate(p);
86
87	if (c->mul != 0 && c->div != 0) {
88		rate *= c->mul;
89		rate += c->div - 1; /* round up */
90		do_div(rate, c->div);
91	}
92
93	return rate;
94}
95
96/* Must be called with c->spinlock held */
97unsigned long clk_get_rate_locked(struct clk *c)
98{
99	unsigned long rate;
100
101	if (c->parent)
102		rate = clk_predict_rate_from_parent(c, c->parent);
103	else
104		rate = c->rate;
105
106	return rate;
107}
108
109unsigned long clk_get_rate(struct clk *c)
110{
111	unsigned long flags;
112	unsigned long rate;
113
114	spin_lock_irqsave(&c->spinlock, flags);
115
116	rate = clk_get_rate_locked(c);
117
118	spin_unlock_irqrestore(&c->spinlock, flags);
119
120	return rate;
121}
122EXPORT_SYMBOL(clk_get_rate);
123
124int clk_reparent(struct clk *c, struct clk *parent)
125{
126	c->parent = parent;
127	return 0;
128}
129
130void clk_init(struct clk *c)
131{
132	spin_lock_init(&c->spinlock);
133
134	if (c->ops && c->ops->init)
135		c->ops->init(c);
136
137	if (!c->ops || !c->ops->enable) {
138		c->refcnt++;
139		c->set = true;
140		if (c->parent)
141			c->state = c->parent->state;
142		else
143			c->state = ON;
144	}
145
146	mutex_lock(&clock_list_lock);
147	list_add(&c->node, &clocks);
148	mutex_unlock(&clock_list_lock);
149}
150
151int clk_enable(struct clk *c)
152{
153	int ret = 0;
154	unsigned long flags;
155
156	spin_lock_irqsave(&c->spinlock, flags);
157
158	if (c->refcnt == 0) {
159		if (c->parent) {
160			ret = clk_enable(c->parent);
161			if (ret)
162				goto out;
163		}
164
165		if (c->ops && c->ops->enable) {
166			ret = c->ops->enable(c);
167			if (ret) {
168				if (c->parent)
169					clk_disable(c->parent);
170				goto out;
171			}
172			c->state = ON;
173			c->set = true;
174		}
175	}
176	c->refcnt++;
177out:
178	spin_unlock_irqrestore(&c->spinlock, flags);
179	return ret;
180}
181EXPORT_SYMBOL(clk_enable);
182
183void clk_disable(struct clk *c)
184{
185	unsigned long flags;
186
187	spin_lock_irqsave(&c->spinlock, flags);
188
189	if (c->refcnt == 0) {
190		WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
191		spin_unlock_irqrestore(&c->spinlock, flags);
192		return;
193	}
194	if (c->refcnt == 1) {
195		if (c->ops && c->ops->disable)
196			c->ops->disable(c);
197
198		if (c->parent)
199			clk_disable(c->parent);
200
201		c->state = OFF;
202	}
203	c->refcnt--;
204
205	spin_unlock_irqrestore(&c->spinlock, flags);
206}
207EXPORT_SYMBOL(clk_disable);
208
209int clk_set_parent(struct clk *c, struct clk *parent)
210{
211	int ret;
212	unsigned long flags;
213	unsigned long new_rate;
214	unsigned long old_rate;
215
216	spin_lock_irqsave(&c->spinlock, flags);
217
218	if (!c->ops || !c->ops->set_parent) {
219		ret = -ENOSYS;
220		goto out;
221	}
222
223	new_rate = clk_predict_rate_from_parent(c, parent);
224	old_rate = clk_get_rate_locked(c);
225
226	ret = c->ops->set_parent(c, parent);
227	if (ret)
228		goto out;
229
230out:
231	spin_unlock_irqrestore(&c->spinlock, flags);
232	return ret;
233}
234EXPORT_SYMBOL(clk_set_parent);
235
236struct clk *clk_get_parent(struct clk *c)
237{
238	return c->parent;
239}
240EXPORT_SYMBOL(clk_get_parent);
241
242int clk_set_rate_locked(struct clk *c, unsigned long rate)
243{
244	long new_rate;
245
246	if (!c->ops || !c->ops->set_rate)
247		return -ENOSYS;
248
249	if (rate > c->max_rate)
250		rate = c->max_rate;
251
252	if (c->ops && c->ops->round_rate) {
253		new_rate = c->ops->round_rate(c, rate);
254
255		if (new_rate < 0)
256			return new_rate;
257
258		rate = new_rate;
259	}
260
261	return c->ops->set_rate(c, rate);
262}
263
264int clk_set_rate(struct clk *c, unsigned long rate)
265{
266	int ret;
267	unsigned long flags;
268
269	spin_lock_irqsave(&c->spinlock, flags);
270
271	ret = clk_set_rate_locked(c, rate);
272
273	spin_unlock_irqrestore(&c->spinlock, flags);
274
275	return ret;
276}
277EXPORT_SYMBOL(clk_set_rate);
278
279
280/* Must be called with clocks lock and all indvidual clock locks held */
281unsigned long clk_get_rate_all_locked(struct clk *c)
282{
283	u64 rate;
284	int mul = 1;
285	int div = 1;
286	struct clk *p = c;
287
288	while (p) {
289		c = p;
290		if (c->mul != 0 && c->div != 0) {
291			mul *= c->mul;
292			div *= c->div;
293		}
294		p = c->parent;
295	}
296
297	rate = c->rate;
298	rate *= mul;
299	do_div(rate, div);
300
301	return rate;
302}
303
304long clk_round_rate(struct clk *c, unsigned long rate)
305{
306	unsigned long flags;
307	long ret;
308
309	spin_lock_irqsave(&c->spinlock, flags);
310
311	if (!c->ops || !c->ops->round_rate) {
312		ret = -ENOSYS;
313		goto out;
314	}
315
316	if (rate > c->max_rate)
317		rate = c->max_rate;
318
319	ret = c->ops->round_rate(c, rate);
320
321out:
322	spin_unlock_irqrestore(&c->spinlock, flags);
323	return ret;
324}
325EXPORT_SYMBOL(clk_round_rate);
326
327static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
328{
329	struct clk *c;
330	struct clk *p;
331
332	int ret = 0;
333
334	c = tegra_get_clock_by_name(table->name);
335
336	if (!c) {
337		pr_warning("Unable to initialize clock %s\n",
338			table->name);
339		return -ENODEV;
340	}
341
342	if (table->parent) {
343		p = tegra_get_clock_by_name(table->parent);
344		if (!p) {
345			pr_warning("Unable to find parent %s of clock %s\n",
346				table->parent, table->name);
347			return -ENODEV;
348		}
349
350		if (c->parent != p) {
351			ret = clk_set_parent(c, p);
352			if (ret) {
353				pr_warning("Unable to set parent %s of clock %s: %d\n",
354					table->parent, table->name, ret);
355				return -EINVAL;
356			}
357		}
358	}
359
360	if (table->rate && table->rate != clk_get_rate(c)) {
361		ret = clk_set_rate(c, table->rate);
362		if (ret) {
363			pr_warning("Unable to set clock %s to rate %lu: %d\n",
364				table->name, table->rate, ret);
365			return -EINVAL;
366		}
367	}
368
369	if (table->enabled) {
370		ret = clk_enable(c);
371		if (ret) {
372			pr_warning("Unable to enable clock %s: %d\n",
373				table->name, ret);
374			return -EINVAL;
375		}
376	}
377
378	return 0;
379}
380
381void tegra_clk_init_from_table(struct tegra_clk_init_table *table)
382{
383	for (; table->name; table++)
384		tegra_clk_init_one_from_table(table);
385}
386EXPORT_SYMBOL(tegra_clk_init_from_table);
387
388void tegra_periph_reset_deassert(struct clk *c)
389{
390	BUG_ON(!c->ops->reset);
391	c->ops->reset(c, false);
392}
393EXPORT_SYMBOL(tegra_periph_reset_deassert);
394
395void tegra_periph_reset_assert(struct clk *c)
396{
397	BUG_ON(!c->ops->reset);
398	c->ops->reset(c, true);
399}
400EXPORT_SYMBOL(tegra_periph_reset_assert);
401
402/* Several extended clock configuration bits (e.g., clock routing, clock
403 * phase control) are included in PLL and peripheral clock source
404 * registers. */
405int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
406{
407	int ret = 0;
408	unsigned long flags;
409
410	spin_lock_irqsave(&c->spinlock, flags);
411
412	if (!c->ops || !c->ops->clk_cfg_ex) {
413		ret = -ENOSYS;
414		goto out;
415	}
416	ret = c->ops->clk_cfg_ex(c, p, setting);
417
418out:
419	spin_unlock_irqrestore(&c->spinlock, flags);
420
421	return ret;
422}
423
424#ifdef CONFIG_DEBUG_FS
425
426static int __clk_lock_all_spinlocks(void)
427{
428	struct clk *c;
429
430	list_for_each_entry(c, &clocks, node)
431		if (!spin_trylock(&c->spinlock))
432			goto unlock_spinlocks;
433
434	return 0;
435
436unlock_spinlocks:
437	list_for_each_entry_continue_reverse(c, &clocks, node)
438		spin_unlock(&c->spinlock);
439
440	return -EAGAIN;
441}
442
443static void __clk_unlock_all_spinlocks(void)
444{
445	struct clk *c;
446
447	list_for_each_entry_reverse(c, &clocks, node)
448		spin_unlock(&c->spinlock);
449}
450
451/*
452 * This function retries until it can take all locks, and may take
453 * an arbitrarily long time to complete.
454 * Must be called with irqs enabled, returns with irqs disabled
455 * Must be called with clock_list_lock held
456 */
457static void clk_lock_all(void)
458{
459	int ret;
460retry:
461	local_irq_disable();
462
463	ret = __clk_lock_all_spinlocks();
464	if (ret)
465		goto failed_spinlocks;
466
467	/* All locks taken successfully, return */
468	return;
469
470failed_spinlocks:
471	local_irq_enable();
472	yield();
473	goto retry;
474}
475
476/*
477 * Unlocks all clocks after a clk_lock_all
478 * Must be called with irqs disabled, returns with irqs enabled
479 * Must be called with clock_list_lock held
480 */
481static void clk_unlock_all(void)
482{
483	__clk_unlock_all_spinlocks();
484
485	local_irq_enable();
486}
487
488static struct dentry *clk_debugfs_root;
489
490
491static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
492{
493	struct clk *child;
494	const char *state = "uninit";
495	char div[8] = {0};
496
497	if (c->state == ON)
498		state = "on";
499	else if (c->state == OFF)
500		state = "off";
501
502	if (c->mul != 0 && c->div != 0) {
503		if (c->mul > c->div) {
504			int mul = c->mul / c->div;
505			int mul2 = (c->mul * 10 / c->div) % 10;
506			int mul3 = (c->mul * 10) % c->div;
507			if (mul2 == 0 && mul3 == 0)
508				snprintf(div, sizeof(div), "x%d", mul);
509			else if (mul3 == 0)
510				snprintf(div, sizeof(div), "x%d.%d", mul, mul2);
511			else
512				snprintf(div, sizeof(div), "x%d.%d..", mul, mul2);
513		} else {
514			snprintf(div, sizeof(div), "%d%s", c->div / c->mul,
515				(c->div % c->mul) ? ".5" : "");
516		}
517	}
518
519	seq_printf(s, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n",
520		level * 3 + 1, "",
521		c->rate > c->max_rate ? '!' : ' ',
522		!c->set ? '*' : ' ',
523		30 - level * 3, c->name,
524		state, c->refcnt, div, clk_get_rate_all_locked(c));
525
526	list_for_each_entry(child, &clocks, node) {
527		if (child->parent != c)
528			continue;
529
530		clock_tree_show_one(s, child, level + 1);
531	}
532}
533
534static int clock_tree_show(struct seq_file *s, void *data)
535{
536	struct clk *c;
537	seq_printf(s, "   clock                          state  ref div      rate\n");
538	seq_printf(s, "--------------------------------------------------------------\n");
539
540	mutex_lock(&clock_list_lock);
541
542	clk_lock_all();
543
544	list_for_each_entry(c, &clocks, node)
545		if (c->parent == NULL)
546			clock_tree_show_one(s, c, 0);
547
548	clk_unlock_all();
549
550	mutex_unlock(&clock_list_lock);
551	return 0;
552}
553
554static int clock_tree_open(struct inode *inode, struct file *file)
555{
556	return single_open(file, clock_tree_show, inode->i_private);
557}
558
559static const struct file_operations clock_tree_fops = {
560	.open		= clock_tree_open,
561	.read		= seq_read,
562	.llseek		= seq_lseek,
563	.release	= single_release,
564};
565
566static int possible_parents_show(struct seq_file *s, void *data)
567{
568	struct clk *c = s->private;
569	int i;
570
571	for (i = 0; c->inputs[i].input; i++) {
572		char *first = (i == 0) ? "" : " ";
573		seq_printf(s, "%s%s", first, c->inputs[i].input->name);
574	}
575	seq_printf(s, "\n");
576	return 0;
577}
578
579static int possible_parents_open(struct inode *inode, struct file *file)
580{
581	return single_open(file, possible_parents_show, inode->i_private);
582}
583
584static const struct file_operations possible_parents_fops = {
585	.open		= possible_parents_open,
586	.read		= seq_read,
587	.llseek		= seq_lseek,
588	.release	= single_release,
589};
590
591static int clk_debugfs_register_one(struct clk *c)
592{
593	struct dentry *d;
594
595	d = debugfs_create_dir(c->name, clk_debugfs_root);
596	if (!d)
597		return -ENOMEM;
598	c->dent = d;
599
600	d = debugfs_create_u8("refcnt", S_IRUGO, c->dent, (u8 *)&c->refcnt);
601	if (!d)
602		goto err_out;
603
604	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
605	if (!d)
606		goto err_out;
607
608	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
609	if (!d)
610		goto err_out;
611
612	if (c->inputs) {
613		d = debugfs_create_file("possible_parents", S_IRUGO, c->dent,
614			c, &possible_parents_fops);
615		if (!d)
616			goto err_out;
617	}
618
619	return 0;
620
621err_out:
622	debugfs_remove_recursive(c->dent);
623	return -ENOMEM;
624}
625
626static int clk_debugfs_register(struct clk *c)
627{
628	int err;
629	struct clk *pa = c->parent;
630
631	if (pa && !pa->dent) {
632		err = clk_debugfs_register(pa);
633		if (err)
634			return err;
635	}
636
637	if (!c->dent) {
638		err = clk_debugfs_register_one(c);
639		if (err)
640			return err;
641	}
642	return 0;
643}
644
645static int __init clk_debugfs_init(void)
646{
647	struct clk *c;
648	struct dentry *d;
649	int err = -ENOMEM;
650
651	d = debugfs_create_dir("clock", NULL);
652	if (!d)
653		return -ENOMEM;
654	clk_debugfs_root = d;
655
656	d = debugfs_create_file("clock_tree", S_IRUGO, clk_debugfs_root, NULL,
657		&clock_tree_fops);
658	if (!d)
659		goto err_out;
660
661	list_for_each_entry(c, &clocks, node) {
662		err = clk_debugfs_register(c);
663		if (err)
664			goto err_out;
665	}
666	return 0;
667err_out:
668	debugfs_remove_recursive(clk_debugfs_root);
669	return err;
670}
671
672late_initcall(clk_debugfs_init);
673#endif
674