1/*
2 * OMAP4+ CPU idle Routines
3 *
4 * Copyright (C) 2011-2013 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Rajendra Nayak <rnayak@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/sched.h>
14#include <linux/cpuidle.h>
15#include <linux/cpu_pm.h>
16#include <linux/export.h>
17#include <linux/clockchips.h>
18
19#include <asm/cpuidle.h>
20#include <asm/proc-fns.h>
21
22#include "common.h"
23#include "pm.h"
24#include "prm.h"
25#include "clockdomain.h"
26
27#define MAX_CPUS	2
28
29/* Machine specific information */
30struct idle_statedata {
31	u32 cpu_state;
32	u32 mpu_logic_state;
33	u32 mpu_state;
34};
35
36static struct idle_statedata omap4_idle_data[] = {
37	{
38		.cpu_state = PWRDM_POWER_ON,
39		.mpu_state = PWRDM_POWER_ON,
40		.mpu_logic_state = PWRDM_POWER_RET,
41	},
42	{
43		.cpu_state = PWRDM_POWER_OFF,
44		.mpu_state = PWRDM_POWER_RET,
45		.mpu_logic_state = PWRDM_POWER_RET,
46	},
47	{
48		.cpu_state = PWRDM_POWER_OFF,
49		.mpu_state = PWRDM_POWER_RET,
50		.mpu_logic_state = PWRDM_POWER_OFF,
51	},
52};
53
54static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
55static struct clockdomain *cpu_clkdm[MAX_CPUS];
56
57static atomic_t abort_barrier;
58static bool cpu_done[MAX_CPUS];
59static struct idle_statedata *state_ptr = &omap4_idle_data[0];
60
61/* Private functions */
62
63/**
64 * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
65 * @dev: cpuidle device
66 * @drv: cpuidle driver
67 * @index: the index of state to be entered
68 *
69 * Called from the CPUidle framework to program the device to the
70 * specified low power state selected by the governor.
71 * Returns the amount of time spent in the low power state.
72 */
73static int omap_enter_idle_simple(struct cpuidle_device *dev,
74			struct cpuidle_driver *drv,
75			int index)
76{
77	omap_do_wfi();
78	return index;
79}
80
81static int omap_enter_idle_coupled(struct cpuidle_device *dev,
82			struct cpuidle_driver *drv,
83			int index)
84{
85	struct idle_statedata *cx = state_ptr + index;
86	u32 mpuss_can_lose_context = 0;
87	int cpu_id = smp_processor_id();
88
89	/*
90	 * CPU0 has to wait and stay ON until CPU1 is OFF state.
91	 * This is necessary to honour hardware recommondation
92	 * of triggeing all the possible low power modes once CPU1 is
93	 * out of coherency and in OFF mode.
94	 */
95	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
96		while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
97			cpu_relax();
98
99			/*
100			 * CPU1 could have already entered & exited idle
101			 * without hitting off because of a wakeup
102			 * or a failed attempt to hit off mode.  Check for
103			 * that here, otherwise we could spin forever
104			 * waiting for CPU1 off.
105			 */
106			if (cpu_done[1])
107			    goto fail;
108
109		}
110	}
111
112	mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
113				 (cx->mpu_logic_state == PWRDM_POWER_OFF);
114
115	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
116
117	/*
118	 * Call idle CPU PM enter notifier chain so that
119	 * VFP and per CPU interrupt context is saved.
120	 */
121	cpu_pm_enter();
122
123	if (dev->cpu == 0) {
124		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
125		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
126
127		/*
128		 * Call idle CPU cluster PM enter notifier chain
129		 * to save GIC and wakeupgen context.
130		 */
131		if (mpuss_can_lose_context)
132			cpu_cluster_pm_enter();
133	}
134
135	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
136	cpu_done[dev->cpu] = true;
137
138	/* Wakeup CPU1 only if it is not offlined */
139	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
140
141		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
142		    mpuss_can_lose_context)
143			gic_dist_disable();
144
145		clkdm_wakeup(cpu_clkdm[1]);
146		omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
147		clkdm_allow_idle(cpu_clkdm[1]);
148
149		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
150		    mpuss_can_lose_context) {
151			while (gic_dist_disabled()) {
152				udelay(1);
153				cpu_relax();
154			}
155			gic_timer_retrigger();
156		}
157	}
158
159	/*
160	 * Call idle CPU PM exit notifier chain to restore
161	 * VFP and per CPU IRQ context.
162	 */
163	cpu_pm_exit();
164
165	/*
166	 * Call idle CPU cluster PM exit notifier chain
167	 * to restore GIC and wakeupgen context.
168	 */
169	if (dev->cpu == 0 && mpuss_can_lose_context)
170		cpu_cluster_pm_exit();
171
172	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
173
174fail:
175	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
176	cpu_done[dev->cpu] = false;
177
178	return index;
179}
180
181/*
182 * For each cpu, setup the broadcast timer because local timers
183 * stops for the states above C1.
184 */
185static void omap_setup_broadcast_timer(void *arg)
186{
187	int cpu = smp_processor_id();
188	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
189}
190
191static struct cpuidle_driver omap4_idle_driver = {
192	.name				= "omap4_idle",
193	.owner				= THIS_MODULE,
194	.states = {
195		{
196			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
197			.exit_latency = 2 + 2,
198			.target_residency = 5,
199			.flags = CPUIDLE_FLAG_TIME_VALID,
200			.enter = omap_enter_idle_simple,
201			.name = "C1",
202			.desc = "CPUx ON, MPUSS ON"
203		},
204		{
205			/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
206			.exit_latency = 328 + 440,
207			.target_residency = 960,
208			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
209			.enter = omap_enter_idle_coupled,
210			.name = "C2",
211			.desc = "CPUx OFF, MPUSS CSWR",
212		},
213		{
214			/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
215			.exit_latency = 460 + 518,
216			.target_residency = 1100,
217			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
218			.enter = omap_enter_idle_coupled,
219			.name = "C3",
220			.desc = "CPUx OFF, MPUSS OSWR",
221		},
222	},
223	.state_count = ARRAY_SIZE(omap4_idle_data),
224	.safe_state_index = 0,
225};
226
227/* Public functions */
228
229/**
230 * omap4_idle_init - Init routine for OMAP4+ idle
231 *
232 * Registers the OMAP4+ specific cpuidle driver to the cpuidle
233 * framework with the valid set of states.
234 */
235int __init omap4_idle_init(void)
236{
237	mpu_pd = pwrdm_lookup("mpu_pwrdm");
238	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
239	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
240	if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
241		return -ENODEV;
242
243	cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
244	cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
245	if (!cpu_clkdm[0] || !cpu_clkdm[1])
246		return -ENODEV;
247
248	/* Configure the broadcast timer on each cpu */
249	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
250
251	return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
252}
253