exynos_mct.c revision 354599f460ba79c9fb00f220e42de5a7509ceeb4
1/* linux/arch/arm/mach-exynos4/mct.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 *		http://www.samsung.com
5 *
6 * EXYNOS4 MCT(Multi-Core Timer) support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/sched.h>
14#include <linux/interrupt.h>
15#include <linux/irq.h>
16#include <linux/err.h>
17#include <linux/clk.h>
18#include <linux/clockchips.h>
19#include <linux/platform_device.h>
20#include <linux/delay.h>
21#include <linux/percpu.h>
22#include <linux/of.h>
23#include <linux/of_irq.h>
24#include <linux/of_address.h>
25#include <linux/clocksource.h>
26
27#include <asm/arch_timer.h>
28#include <asm/localtimer.h>
29
30#include <plat/cpu.h>
31
32#include <mach/map.h>
33#include <mach/irqs.h>
34#include <asm/mach/time.h>
35
36#define EXYNOS4_MCTREG(x)		(x)
37#define EXYNOS4_MCT_G_CNT_L		EXYNOS4_MCTREG(0x100)
38#define EXYNOS4_MCT_G_CNT_U		EXYNOS4_MCTREG(0x104)
39#define EXYNOS4_MCT_G_CNT_WSTAT		EXYNOS4_MCTREG(0x110)
40#define EXYNOS4_MCT_G_COMP0_L		EXYNOS4_MCTREG(0x200)
41#define EXYNOS4_MCT_G_COMP0_U		EXYNOS4_MCTREG(0x204)
42#define EXYNOS4_MCT_G_COMP0_ADD_INCR	EXYNOS4_MCTREG(0x208)
43#define EXYNOS4_MCT_G_TCON		EXYNOS4_MCTREG(0x240)
44#define EXYNOS4_MCT_G_INT_CSTAT		EXYNOS4_MCTREG(0x244)
45#define EXYNOS4_MCT_G_INT_ENB		EXYNOS4_MCTREG(0x248)
46#define EXYNOS4_MCT_G_WSTAT		EXYNOS4_MCTREG(0x24C)
47#define _EXYNOS4_MCT_L_BASE		EXYNOS4_MCTREG(0x300)
48#define EXYNOS4_MCT_L_BASE(x)		(_EXYNOS4_MCT_L_BASE + (0x100 * x))
49#define EXYNOS4_MCT_L_MASK		(0xffffff00)
50
51#define MCT_L_TCNTB_OFFSET		(0x00)
52#define MCT_L_ICNTB_OFFSET		(0x08)
53#define MCT_L_TCON_OFFSET		(0x20)
54#define MCT_L_INT_CSTAT_OFFSET		(0x30)
55#define MCT_L_INT_ENB_OFFSET		(0x34)
56#define MCT_L_WSTAT_OFFSET		(0x40)
57#define MCT_G_TCON_START		(1 << 8)
58#define MCT_G_TCON_COMP0_AUTO_INC	(1 << 1)
59#define MCT_G_TCON_COMP0_ENABLE		(1 << 0)
60#define MCT_L_TCON_INTERVAL_MODE	(1 << 2)
61#define MCT_L_TCON_INT_START		(1 << 1)
62#define MCT_L_TCON_TIMER_START		(1 << 0)
63
64#define TICK_BASE_CNT	1
65
66enum {
67	MCT_INT_SPI,
68	MCT_INT_PPI
69};
70
71enum {
72	MCT_G0_IRQ,
73	MCT_G1_IRQ,
74	MCT_G2_IRQ,
75	MCT_G3_IRQ,
76	MCT_L0_IRQ,
77	MCT_L1_IRQ,
78	MCT_L2_IRQ,
79	MCT_L3_IRQ,
80	MCT_NR_IRQS,
81};
82
83static void __iomem *reg_base;
84static unsigned long clk_rate;
85static unsigned int mct_int_type;
86static int mct_irqs[MCT_NR_IRQS];
87
88struct mct_clock_event_device {
89	struct clock_event_device *evt;
90	unsigned long base;
91	char name[10];
92};
93
94static void exynos4_mct_write(unsigned int value, unsigned long offset)
95{
96	unsigned long stat_addr;
97	u32 mask;
98	u32 i;
99
100	__raw_writel(value, reg_base + offset);
101
102	if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
103		stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
104		switch (offset & EXYNOS4_MCT_L_MASK) {
105		case MCT_L_TCON_OFFSET:
106			mask = 1 << 3;		/* L_TCON write status */
107			break;
108		case MCT_L_ICNTB_OFFSET:
109			mask = 1 << 1;		/* L_ICNTB write status */
110			break;
111		case MCT_L_TCNTB_OFFSET:
112			mask = 1 << 0;		/* L_TCNTB write status */
113			break;
114		default:
115			return;
116		}
117	} else {
118		switch (offset) {
119		case EXYNOS4_MCT_G_TCON:
120			stat_addr = EXYNOS4_MCT_G_WSTAT;
121			mask = 1 << 16;		/* G_TCON write status */
122			break;
123		case EXYNOS4_MCT_G_COMP0_L:
124			stat_addr = EXYNOS4_MCT_G_WSTAT;
125			mask = 1 << 0;		/* G_COMP0_L write status */
126			break;
127		case EXYNOS4_MCT_G_COMP0_U:
128			stat_addr = EXYNOS4_MCT_G_WSTAT;
129			mask = 1 << 1;		/* G_COMP0_U write status */
130			break;
131		case EXYNOS4_MCT_G_COMP0_ADD_INCR:
132			stat_addr = EXYNOS4_MCT_G_WSTAT;
133			mask = 1 << 2;		/* G_COMP0_ADD_INCR w status */
134			break;
135		case EXYNOS4_MCT_G_CNT_L:
136			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
137			mask = 1 << 0;		/* G_CNT_L write status */
138			break;
139		case EXYNOS4_MCT_G_CNT_U:
140			stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
141			mask = 1 << 1;		/* G_CNT_U write status */
142			break;
143		default:
144			return;
145		}
146	}
147
148	/* Wait maximum 1 ms until written values are applied */
149	for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
150		if (__raw_readl(reg_base + stat_addr) & mask) {
151			__raw_writel(mask, reg_base + stat_addr);
152			return;
153		}
154
155	panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
156}
157
158/* Clocksource handling */
159static void exynos4_mct_frc_start(u32 hi, u32 lo)
160{
161	u32 reg;
162
163	exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
164	exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
165
166	reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
167	reg |= MCT_G_TCON_START;
168	exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
169}
170
171static cycle_t exynos4_frc_read(struct clocksource *cs)
172{
173	unsigned int lo, hi;
174	u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
175
176	do {
177		hi = hi2;
178		lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
179		hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
180	} while (hi != hi2);
181
182	return ((cycle_t)hi << 32) | lo;
183}
184
185static void exynos4_frc_resume(struct clocksource *cs)
186{
187	exynos4_mct_frc_start(0, 0);
188}
189
190struct clocksource mct_frc = {
191	.name		= "mct-frc",
192	.rating		= 400,
193	.read		= exynos4_frc_read,
194	.mask		= CLOCKSOURCE_MASK(64),
195	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
196	.resume		= exynos4_frc_resume,
197};
198
199static void __init exynos4_clocksource_init(void)
200{
201	exynos4_mct_frc_start(0, 0);
202
203	if (clocksource_register_hz(&mct_frc, clk_rate))
204		panic("%s: can't register clocksource\n", mct_frc.name);
205}
206
207static void exynos4_mct_comp0_stop(void)
208{
209	unsigned int tcon;
210
211	tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
212	tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
213
214	exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
215	exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
216}
217
218static void exynos4_mct_comp0_start(enum clock_event_mode mode,
219				    unsigned long cycles)
220{
221	unsigned int tcon;
222	cycle_t comp_cycle;
223
224	tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
225
226	if (mode == CLOCK_EVT_MODE_PERIODIC) {
227		tcon |= MCT_G_TCON_COMP0_AUTO_INC;
228		exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
229	}
230
231	comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
232	exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
233	exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
234
235	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
236
237	tcon |= MCT_G_TCON_COMP0_ENABLE;
238	exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
239}
240
241static int exynos4_comp_set_next_event(unsigned long cycles,
242				       struct clock_event_device *evt)
243{
244	exynos4_mct_comp0_start(evt->mode, cycles);
245
246	return 0;
247}
248
249static void exynos4_comp_set_mode(enum clock_event_mode mode,
250				  struct clock_event_device *evt)
251{
252	unsigned long cycles_per_jiffy;
253	exynos4_mct_comp0_stop();
254
255	switch (mode) {
256	case CLOCK_EVT_MODE_PERIODIC:
257		cycles_per_jiffy =
258			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
259		exynos4_mct_comp0_start(mode, cycles_per_jiffy);
260		break;
261
262	case CLOCK_EVT_MODE_ONESHOT:
263	case CLOCK_EVT_MODE_UNUSED:
264	case CLOCK_EVT_MODE_SHUTDOWN:
265	case CLOCK_EVT_MODE_RESUME:
266		break;
267	}
268}
269
270static struct clock_event_device mct_comp_device = {
271	.name		= "mct-comp",
272	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
273	.rating		= 250,
274	.set_next_event	= exynos4_comp_set_next_event,
275	.set_mode	= exynos4_comp_set_mode,
276};
277
278static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
279{
280	struct clock_event_device *evt = dev_id;
281
282	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
283
284	evt->event_handler(evt);
285
286	return IRQ_HANDLED;
287}
288
289static struct irqaction mct_comp_event_irq = {
290	.name		= "mct_comp_irq",
291	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
292	.handler	= exynos4_mct_comp_isr,
293	.dev_id		= &mct_comp_device,
294};
295
296static void exynos4_clockevent_init(void)
297{
298	mct_comp_device.cpumask = cpumask_of(0);
299	clockevents_config_and_register(&mct_comp_device, clk_rate,
300					0xf, 0xffffffff);
301	setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
302}
303
304#ifdef CONFIG_LOCAL_TIMERS
305
306static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
307
308/* Clock event handling */
309static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
310{
311	unsigned long tmp;
312	unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
313	unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
314
315	tmp = __raw_readl(reg_base + offset);
316	if (tmp & mask) {
317		tmp &= ~mask;
318		exynos4_mct_write(tmp, offset);
319	}
320}
321
322static void exynos4_mct_tick_start(unsigned long cycles,
323				   struct mct_clock_event_device *mevt)
324{
325	unsigned long tmp;
326
327	exynos4_mct_tick_stop(mevt);
328
329	tmp = (1 << 31) | cycles;	/* MCT_L_UPDATE_ICNTB */
330
331	/* update interrupt count buffer */
332	exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
333
334	/* enable MCT tick interrupt */
335	exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
336
337	tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
338	tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
339	       MCT_L_TCON_INTERVAL_MODE;
340	exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
341}
342
343static int exynos4_tick_set_next_event(unsigned long cycles,
344				       struct clock_event_device *evt)
345{
346	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
347
348	exynos4_mct_tick_start(cycles, mevt);
349
350	return 0;
351}
352
353static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
354					 struct clock_event_device *evt)
355{
356	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
357	unsigned long cycles_per_jiffy;
358
359	exynos4_mct_tick_stop(mevt);
360
361	switch (mode) {
362	case CLOCK_EVT_MODE_PERIODIC:
363		cycles_per_jiffy =
364			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
365		exynos4_mct_tick_start(cycles_per_jiffy, mevt);
366		break;
367
368	case CLOCK_EVT_MODE_ONESHOT:
369	case CLOCK_EVT_MODE_UNUSED:
370	case CLOCK_EVT_MODE_SHUTDOWN:
371	case CLOCK_EVT_MODE_RESUME:
372		break;
373	}
374}
375
376static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
377{
378	struct clock_event_device *evt = mevt->evt;
379
380	/*
381	 * This is for supporting oneshot mode.
382	 * Mct would generate interrupt periodically
383	 * without explicit stopping.
384	 */
385	if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
386		exynos4_mct_tick_stop(mevt);
387
388	/* Clear the MCT tick interrupt */
389	if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
390		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
391		return 1;
392	} else {
393		return 0;
394	}
395}
396
397static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
398{
399	struct mct_clock_event_device *mevt = dev_id;
400	struct clock_event_device *evt = mevt->evt;
401
402	exynos4_mct_tick_clear(mevt);
403
404	evt->event_handler(evt);
405
406	return IRQ_HANDLED;
407}
408
409static struct irqaction mct_tick0_event_irq = {
410	.name		= "mct_tick0_irq",
411	.flags		= IRQF_TIMER | IRQF_NOBALANCING,
412	.handler	= exynos4_mct_tick_isr,
413};
414
415static struct irqaction mct_tick1_event_irq = {
416	.name		= "mct_tick1_irq",
417	.flags		= IRQF_TIMER | IRQF_NOBALANCING,
418	.handler	= exynos4_mct_tick_isr,
419};
420
421static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
422{
423	struct mct_clock_event_device *mevt;
424	unsigned int cpu = smp_processor_id();
425
426	mevt = this_cpu_ptr(&percpu_mct_tick);
427	mevt->evt = evt;
428
429	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
430	sprintf(mevt->name, "mct_tick%d", cpu);
431
432	evt->name = mevt->name;
433	evt->cpumask = cpumask_of(cpu);
434	evt->set_next_event = exynos4_tick_set_next_event;
435	evt->set_mode = exynos4_tick_set_mode;
436	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
437	evt->rating = 450;
438	clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
439					0xf, 0x7fffffff);
440
441	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
442
443	if (mct_int_type == MCT_INT_SPI) {
444		if (cpu == 0) {
445			mct_tick0_event_irq.dev_id = mevt;
446			evt->irq = mct_irqs[MCT_L0_IRQ];
447			setup_irq(evt->irq, &mct_tick0_event_irq);
448		} else {
449			mct_tick1_event_irq.dev_id = mevt;
450			evt->irq = mct_irqs[MCT_L1_IRQ];
451			setup_irq(evt->irq, &mct_tick1_event_irq);
452			irq_set_affinity(evt->irq, cpumask_of(1));
453		}
454	} else {
455		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
456	}
457
458	return 0;
459}
460
461static void exynos4_local_timer_stop(struct clock_event_device *evt)
462{
463	unsigned int cpu = smp_processor_id();
464	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
465	if (mct_int_type == MCT_INT_SPI)
466		if (cpu == 0)
467			remove_irq(evt->irq, &mct_tick0_event_irq);
468		else
469			remove_irq(evt->irq, &mct_tick1_event_irq);
470	else
471		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
472}
473
474static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
475	.setup	= exynos4_local_timer_setup,
476	.stop	= exynos4_local_timer_stop,
477};
478#endif /* CONFIG_LOCAL_TIMERS */
479
480static void __init exynos4_timer_resources(struct device_node *np)
481{
482	struct clk *mct_clk;
483	mct_clk = clk_get(NULL, "xtal");
484
485	clk_rate = clk_get_rate(mct_clk);
486
487	reg_base = np ? of_iomap(np, 0) : S5P_VA_SYSTIMER;
488	if (!reg_base)
489		panic("%s: unable to ioremap mct address space\n", __func__);
490
491#ifdef CONFIG_LOCAL_TIMERS
492	if (mct_int_type == MCT_INT_PPI) {
493		int err;
494
495		err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
496					 exynos4_mct_tick_isr, "MCT",
497					 &percpu_mct_tick);
498		WARN(err, "MCT: can't request IRQ %d (%d)\n",
499		     mct_irqs[MCT_L0_IRQ], err);
500	}
501
502	local_timer_register(&exynos4_mct_tick_ops);
503#endif /* CONFIG_LOCAL_TIMERS */
504}
505
506static const struct of_device_id exynos_mct_ids[] = {
507	{ .compatible = "samsung,exynos4210-mct", .data = (void *)MCT_INT_SPI },
508	{ .compatible = "samsung,exynos4412-mct", .data = (void *)MCT_INT_PPI },
509	{ }
510};
511
512void __init mct_init(void)
513{
514	struct device_node *np = NULL;
515	const struct of_device_id *match;
516	u32 nr_irqs, i;
517
518#ifdef CONFIG_OF
519	np = of_find_matching_node_and_match(NULL, exynos_mct_ids, &match);
520#endif
521	if (np) {
522		mct_int_type = (u32)(match->data);
523
524		/* This driver uses only one global timer interrupt */
525		mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
526
527		/*
528		 * Find out the number of local irqs specified. The local
529		 * timer irqs are specified after the four global timer
530		 * irqs are specified.
531		 */
532#ifdef CONFIG_OF
533		nr_irqs = of_irq_count(np);
534#endif
535		for (i = MCT_L0_IRQ; i < nr_irqs; i++)
536			mct_irqs[i] = irq_of_parse_and_map(np, i);
537	} else if (soc_is_exynos4210()) {
538		mct_irqs[MCT_G0_IRQ] = EXYNOS4_IRQ_MCT_G0;
539		mct_irqs[MCT_L0_IRQ] = EXYNOS4_IRQ_MCT_L0;
540		mct_irqs[MCT_L1_IRQ] = EXYNOS4_IRQ_MCT_L1;
541		mct_int_type = MCT_INT_SPI;
542	} else {
543		panic("unable to determine mct controller type\n");
544	}
545
546	exynos4_timer_resources(np);
547	exynos4_clocksource_init();
548	exynos4_clockevent_init();
549}
550CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init);
551CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init);
552