1/*
2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arch_helpers.h>
9#include <assert.h>
10#include <bl_common.h>
11#include <context.h>
12#include <context_mgmt.h>
13#include <debug.h>
14#include <platform.h>
15#include <string.h>
16#include <utils.h>
17#include "psci_private.h"
18
19/*
20 * SPD power management operations, expected to be supplied by the registered
21 * SPD on successful SP initialization
22 */
23const spd_pm_ops_t *psci_spd_pm;
24
25/*
26 * PSCI requested local power state map. This array is used to store the local
27 * power states requested by a CPU for power levels from level 1 to
28 * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
29 * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
30 * CPU are the same.
31 *
32 * During state coordination, the platform is passed an array containing the
33 * local states requested for a particular non cpu power domain by each cpu
34 * within the domain.
35 *
36 * TODO: Dense packing of the requested states will cause cache thrashing
37 * when multiple power domains write to it. If we allocate the requested
38 * states at each power level in a cache-line aligned per-domain memory,
39 * the cache thrashing can be avoided.
40 */
41static plat_local_state_t
42	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
43
44
45/*******************************************************************************
46 * Arrays that hold the platform's power domain tree information for state
47 * management of power domains.
48 * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
49 * which is an ancestor of a CPU power domain.
50 * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
51 ******************************************************************************/
52non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
53#if USE_COHERENT_MEM
54__section("tzfw_coherent_mem")
55#endif
56;
57
58/* Lock for PSCI state coordination */
59DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
60
61cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
62
63/*******************************************************************************
64 * Pointer to functions exported by the platform to complete power mgmt. ops
65 ******************************************************************************/
66const plat_psci_ops_t *psci_plat_pm_ops;
67
68/******************************************************************************
69 * Check that the maximum power level supported by the platform makes sense
70 *****************************************************************************/
71CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
72		PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
73		assert_platform_max_pwrlvl_check);
74
75/*
76 * The plat_local_state used by the platform is one of these types: RUN,
77 * RETENTION and OFF. The platform can define further sub-states for each type
78 * apart from RUN. This categorization is done to verify the sanity of the
79 * psci_power_state passed by the platform and to print debug information. The
80 * categorization is done on the basis of the following conditions:
81 *
82 * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
83 *
84 * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
85 *    STATE_TYPE_RETN.
86 *
87 * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
88 *    STATE_TYPE_OFF.
89 */
90typedef enum plat_local_state_type {
91	STATE_TYPE_RUN = 0,
92	STATE_TYPE_RETN,
93	STATE_TYPE_OFF
94} plat_local_state_type_t;
95
96/* The macro used to categorize plat_local_state. */
97#define find_local_state_type(plat_local_state)					\
98		((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE)	\
99		? STATE_TYPE_OFF : STATE_TYPE_RETN)				\
100		: STATE_TYPE_RUN)
101
102/******************************************************************************
103 * Check that the maximum retention level supported by the platform is less
104 * than the maximum off level.
105 *****************************************************************************/
106CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
107		assert_platform_max_off_and_retn_state_check);
108
109/******************************************************************************
110 * This function ensures that the power state parameter in a CPU_SUSPEND request
111 * is valid. If so, it returns the requested states for each power level.
112 *****************************************************************************/
113int psci_validate_power_state(unsigned int power_state,
114			      psci_power_state_t *state_info)
115{
116	/* Check SBZ bits in power state are zero */
117	if (psci_check_power_state(power_state))
118		return PSCI_E_INVALID_PARAMS;
119
120	assert(psci_plat_pm_ops->validate_power_state);
121
122	/* Validate the power_state using platform pm_ops */
123	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
124}
125
126/******************************************************************************
127 * This function retrieves the `psci_power_state_t` for system suspend from
128 * the platform.
129 *****************************************************************************/
130void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
131{
132	/*
133	 * Assert that the required pm_ops hook is implemented to ensure that
134	 * the capability detected during psci_setup() is valid.
135	 */
136	assert(psci_plat_pm_ops->get_sys_suspend_power_state);
137
138	/*
139	 * Query the platform for the power_state required for system suspend
140	 */
141	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
142}
143
144/*******************************************************************************
145 * This function verifies that the all the other cores in the system have been
146 * turned OFF and the current CPU is the last running CPU in the system.
147 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
148 * otherwise.
149 ******************************************************************************/
150unsigned int psci_is_last_on_cpu(void)
151{
152	unsigned int cpu_idx, my_idx = plat_my_core_pos();
153
154	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
155		if (cpu_idx == my_idx) {
156			assert(psci_get_aff_info_state() == AFF_STATE_ON);
157			continue;
158		}
159
160		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
161			return 0;
162	}
163
164	return 1;
165}
166
167/*******************************************************************************
168 * Routine to return the maximum power level to traverse to after a cpu has
169 * been physically powered up. It is expected to be called immediately after
170 * reset from assembler code.
171 ******************************************************************************/
172static unsigned int get_power_on_target_pwrlvl(void)
173{
174	unsigned int pwrlvl;
175
176	/*
177	 * Assume that this cpu was suspended and retrieve its target power
178	 * level. If it is invalid then it could only have been turned off
179	 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
180	 * cpu can be turned off to.
181	 */
182	pwrlvl = psci_get_suspend_pwrlvl();
183	if (pwrlvl == PSCI_INVALID_PWR_LVL)
184		pwrlvl = PLAT_MAX_PWR_LVL;
185	return pwrlvl;
186}
187
188/******************************************************************************
189 * Helper function to update the requested local power state array. This array
190 * does not store the requested state for the CPU power level. Hence an
191 * assertion is added to prevent us from accessing the wrong index.
192 *****************************************************************************/
193static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
194					 unsigned int cpu_idx,
195					 plat_local_state_t req_pwr_state)
196{
197	/*
198	 * This should never happen, we have this here to avoid
199	 * "array subscript is above array bounds" errors in GCC.
200	 */
201	assert(pwrlvl > PSCI_CPU_PWR_LVL);
202#pragma GCC diagnostic push
203#pragma GCC diagnostic ignored "-Warray-bounds"
204	psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
205#pragma GCC diagnostic pop
206}
207
208/******************************************************************************
209 * This function initializes the psci_req_local_pwr_states.
210 *****************************************************************************/
211void psci_init_req_local_pwr_states(void)
212{
213	/* Initialize the requested state of all non CPU power domains as OFF */
214	memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
215			sizeof(psci_req_local_pwr_states));
216}
217
218/******************************************************************************
219 * Helper function to return a reference to an array containing the local power
220 * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
221 * array will be the number of cpu power domains of which this power domain is
222 * an ancestor. These requested states will be used to determine a suitable
223 * target state for this power domain during psci state coordination. An
224 * assertion is added to prevent us from accessing the CPU power level.
225 *****************************************************************************/
226static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
227							 unsigned int cpu_idx)
228{
229	assert(pwrlvl > PSCI_CPU_PWR_LVL);
230
231	return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
232}
233
234/*
235 * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
236 * memory.
237 *
238 * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
239 * it's accessed by both cached and non-cached participants. To serve the common
240 * minimum, perform a cache flush before read and after write so that non-cached
241 * participants operate on latest data in main memory.
242 *
243 * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
244 * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
245 * In both cases, no cache operations are required.
246 */
247
248/*
249 * Retrieve local state of non-CPU power domain node from a non-cached CPU,
250 * after any required cache maintenance operation.
251 */
252static plat_local_state_t get_non_cpu_pd_node_local_state(
253		unsigned int parent_idx)
254{
255#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
256	flush_dcache_range(
257			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
258			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
259#endif
260	return psci_non_cpu_pd_nodes[parent_idx].local_state;
261}
262
263/*
264 * Update local state of non-CPU power domain node from a cached CPU; perform
265 * any required cache maintenance operation afterwards.
266 */
267static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
268		plat_local_state_t state)
269{
270	psci_non_cpu_pd_nodes[parent_idx].local_state = state;
271#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
272	flush_dcache_range(
273			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
274			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
275#endif
276}
277
278/******************************************************************************
279 * Helper function to return the current local power state of each power domain
280 * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
281 * function will be called after a cpu is powered on to find the local state
282 * each power domain has emerged from.
283 *****************************************************************************/
284void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
285				      psci_power_state_t *target_state)
286{
287	unsigned int parent_idx, lvl;
288	plat_local_state_t *pd_state = target_state->pwr_domain_state;
289
290	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
291	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
292
293	/* Copy the local power state from node to state_info */
294	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
295		pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
296		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
297	}
298
299	/* Set the the higher levels to RUN */
300	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
301		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
302}
303
304/******************************************************************************
305 * Helper function to set the target local power state that each power domain
306 * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
307 * enter. This function will be called after coordination of requested power
308 * states has been done for each power level.
309 *****************************************************************************/
310static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
311					const psci_power_state_t *target_state)
312{
313	unsigned int parent_idx, lvl;
314	const plat_local_state_t *pd_state = target_state->pwr_domain_state;
315
316	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
317
318	/*
319	 * Need to flush as local_state might be accessed with Data Cache
320	 * disabled during power on
321	 */
322	psci_flush_cpu_data(psci_svc_cpu_data.local_state);
323
324	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
325
326	/* Copy the local_state from state_info */
327	for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
328		set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
329		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
330	}
331}
332
333
334/*******************************************************************************
335 * PSCI helper function to get the parent nodes corresponding to a cpu_index.
336 ******************************************************************************/
337void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
338				      unsigned int end_lvl,
339				      unsigned int node_index[])
340{
341	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
342	unsigned int i;
343
344	for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
345		*node_index++ = parent_node;
346		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
347	}
348}
349
350/******************************************************************************
351 * This function is invoked post CPU power up and initialization. It sets the
352 * affinity info state, target power state and requested power state for the
353 * current CPU and all its ancestor power domains to RUN.
354 *****************************************************************************/
355void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
356{
357	unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
358	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
359
360	/* Reset the local_state to RUN for the non cpu power domains. */
361	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
362		set_non_cpu_pd_node_local_state(parent_idx,
363				PSCI_LOCAL_STATE_RUN);
364		psci_set_req_local_pwr_state(lvl,
365					     cpu_idx,
366					     PSCI_LOCAL_STATE_RUN);
367		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
368	}
369
370	/* Set the affinity info state to ON */
371	psci_set_aff_info_state(AFF_STATE_ON);
372
373	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
374	psci_flush_cpu_data(psci_svc_cpu_data);
375}
376
377/******************************************************************************
378 * This function is passed the local power states requested for each power
379 * domain (state_info) between the current CPU domain and its ancestors until
380 * the target power level (end_pwrlvl). It updates the array of requested power
381 * states with this information.
382 *
383 * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
384 * retrieves the states requested by all the cpus of which the power domain at
385 * that level is an ancestor. It passes this information to the platform to
386 * coordinate and return the target power state. If the target state for a level
387 * is RUN then subsequent levels are not considered. At the CPU level, state
388 * coordination is not required. Hence, the requested and the target states are
389 * the same.
390 *
391 * The 'state_info' is updated with the target state for each level between the
392 * CPU and the 'end_pwrlvl' and returned to the caller.
393 *
394 * This function will only be invoked with data cache enabled and while
395 * powering down a core.
396 *****************************************************************************/
397void psci_do_state_coordination(unsigned int end_pwrlvl,
398				psci_power_state_t *state_info)
399{
400	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
401	unsigned int start_idx, ncpus;
402	plat_local_state_t target_state, *req_states;
403
404	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
405	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
406
407	/* For level 0, the requested state will be equivalent
408	   to target state */
409	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
410
411		/* First update the requested power state */
412		psci_set_req_local_pwr_state(lvl, cpu_idx,
413					     state_info->pwr_domain_state[lvl]);
414
415		/* Get the requested power states for this power level */
416		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
417		req_states = psci_get_req_local_pwr_states(lvl, start_idx);
418
419		/*
420		 * Let the platform coordinate amongst the requested states at
421		 * this power level and return the target local power state.
422		 */
423		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
424		target_state = plat_get_target_pwr_state(lvl,
425							 req_states,
426							 ncpus);
427
428		state_info->pwr_domain_state[lvl] = target_state;
429
430		/* Break early if the negotiated target power state is RUN */
431		if (is_local_state_run(state_info->pwr_domain_state[lvl]))
432			break;
433
434		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
435	}
436
437	/*
438	 * This is for cases when we break out of the above loop early because
439	 * the target power state is RUN at a power level < end_pwlvl.
440	 * We update the requested power state from state_info and then
441	 * set the target state as RUN.
442	 */
443	for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
444		psci_set_req_local_pwr_state(lvl, cpu_idx,
445					     state_info->pwr_domain_state[lvl]);
446		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
447
448	}
449
450	/* Update the target state in the power domain nodes */
451	psci_set_target_local_pwr_states(end_pwrlvl, state_info);
452}
453
454/******************************************************************************
455 * This function validates a suspend request by making sure that if a standby
456 * state is requested then no power level is turned off and the highest power
457 * level is placed in a standby/retention state.
458 *
459 * It also ensures that the state level X will enter is not shallower than the
460 * state level X + 1 will enter.
461 *
462 * This validation will be enabled only for DEBUG builds as the platform is
463 * expected to perform these validations as well.
464 *****************************************************************************/
465int psci_validate_suspend_req(const psci_power_state_t *state_info,
466			      unsigned int is_power_down_state)
467{
468	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
469	plat_local_state_t state;
470	plat_local_state_type_t req_state_type, deepest_state_type;
471	int i;
472
473	/* Find the target suspend power level */
474	target_lvl = psci_find_target_suspend_lvl(state_info);
475	if (target_lvl == PSCI_INVALID_PWR_LVL)
476		return PSCI_E_INVALID_PARAMS;
477
478	/* All power domain levels are in a RUN state to begin with */
479	deepest_state_type = STATE_TYPE_RUN;
480
481	for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
482		state = state_info->pwr_domain_state[i];
483		req_state_type = find_local_state_type(state);
484
485		/*
486		 * While traversing from the highest power level to the lowest,
487		 * the state requested for lower levels has to be the same or
488		 * deeper i.e. equal to or greater than the state at the higher
489		 * levels. If this condition is true, then the requested state
490		 * becomes the deepest state encountered so far.
491		 */
492		if (req_state_type < deepest_state_type)
493			return PSCI_E_INVALID_PARAMS;
494		deepest_state_type = req_state_type;
495	}
496
497	/* Find the highest off power level */
498	max_off_lvl = psci_find_max_off_lvl(state_info);
499
500	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
501	max_retn_lvl = PSCI_INVALID_PWR_LVL;
502	if (target_lvl != max_off_lvl)
503		max_retn_lvl = target_lvl;
504
505	/*
506	 * If this is not a request for a power down state then max off level
507	 * has to be invalid and max retention level has to be a valid power
508	 * level.
509	 */
510	if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
511				    max_retn_lvl == PSCI_INVALID_PWR_LVL))
512		return PSCI_E_INVALID_PARAMS;
513
514	return PSCI_E_SUCCESS;
515}
516
517/******************************************************************************
518 * This function finds the highest power level which will be powered down
519 * amongst all the power levels specified in the 'state_info' structure
520 *****************************************************************************/
521unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
522{
523	int i;
524
525	for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
526		if (is_local_state_off(state_info->pwr_domain_state[i]))
527			return i;
528	}
529
530	return PSCI_INVALID_PWR_LVL;
531}
532
533/******************************************************************************
534 * This functions finds the level of the highest power domain which will be
535 * placed in a low power state during a suspend operation.
536 *****************************************************************************/
537unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
538{
539	int i;
540
541	for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
542		if (!is_local_state_run(state_info->pwr_domain_state[i]))
543			return i;
544	}
545
546	return PSCI_INVALID_PWR_LVL;
547}
548
549/*******************************************************************************
550 * This function is passed a cpu_index and the highest level in the topology
551 * tree that the operation should be applied to. It picks up locks in order of
552 * increasing power domain level in the range specified.
553 ******************************************************************************/
554void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
555				   unsigned int cpu_idx)
556{
557	unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
558	unsigned int level;
559
560	/* No locking required for level 0. Hence start locking from level 1 */
561	for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
562		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
563		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
564	}
565}
566
567/*******************************************************************************
568 * This function is passed a cpu_index and the highest level in the topology
569 * tree that the operation should be applied to. It releases the locks in order
570 * of decreasing power domain level in the range specified.
571 ******************************************************************************/
572void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
573				   unsigned int cpu_idx)
574{
575	unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
576	int level;
577
578	/* Get the parent nodes */
579	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
580
581	/* Unlock top down. No unlocking required for level 0. */
582	for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
583		parent_idx = parent_nodes[level - 1];
584		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
585	}
586}
587
588/*******************************************************************************
589 * Simple routine to determine whether a mpidr is valid or not.
590 ******************************************************************************/
591int psci_validate_mpidr(u_register_t mpidr)
592{
593	if (plat_core_pos_by_mpidr(mpidr) < 0)
594		return PSCI_E_INVALID_PARAMS;
595
596	return PSCI_E_SUCCESS;
597}
598
599/*******************************************************************************
600 * This function determines the full entrypoint information for the requested
601 * PSCI entrypoint on power on/resume and returns it.
602 ******************************************************************************/
603#ifdef AARCH32
604static int psci_get_ns_ep_info(entry_point_info_t *ep,
605			       uintptr_t entrypoint,
606			       u_register_t context_id)
607{
608	u_register_t ep_attr;
609	unsigned int aif, ee, mode;
610	u_register_t scr = read_scr();
611	u_register_t ns_sctlr, sctlr;
612
613	/* Switch to non secure state */
614	write_scr(scr | SCR_NS_BIT);
615	isb();
616	ns_sctlr = read_sctlr();
617
618	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
619
620	/* Return to original state */
621	write_scr(scr);
622	isb();
623	ee = 0;
624
625	ep_attr = NON_SECURE | EP_ST_DISABLE;
626	if (sctlr & SCTLR_EE_BIT) {
627		ep_attr |= EP_EE_BIG;
628		ee = 1;
629	}
630	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
631
632	ep->pc = entrypoint;
633	zeromem(&ep->args, sizeof(ep->args));
634	ep->args.arg0 = context_id;
635
636	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
637
638	/*
639	 * TODO: Choose async. exception bits if HYP mode is not
640	 * implemented according to the values of SCR.{AW, FW} bits
641	 */
642	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
643
644	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
645
646	return PSCI_E_SUCCESS;
647}
648
649#else
650static int psci_get_ns_ep_info(entry_point_info_t *ep,
651			       uintptr_t entrypoint,
652			       u_register_t context_id)
653{
654	u_register_t ep_attr, sctlr;
655	unsigned int daif, ee, mode;
656	u_register_t ns_scr_el3 = read_scr_el3();
657	u_register_t ns_sctlr_el1 = read_sctlr_el1();
658
659	sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
660	ee = 0;
661
662	ep_attr = NON_SECURE | EP_ST_DISABLE;
663	if (sctlr & SCTLR_EE_BIT) {
664		ep_attr |= EP_EE_BIG;
665		ee = 1;
666	}
667	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
668
669	ep->pc = entrypoint;
670	zeromem(&ep->args, sizeof(ep->args));
671	ep->args.arg0 = context_id;
672
673	/*
674	 * Figure out whether the cpu enters the non-secure address space
675	 * in aarch32 or aarch64
676	 */
677	if (ns_scr_el3 & SCR_RW_BIT) {
678
679		/*
680		 * Check whether a Thumb entry point has been provided for an
681		 * aarch64 EL
682		 */
683		if (entrypoint & 0x1)
684			return PSCI_E_INVALID_ADDRESS;
685
686		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
687
688		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
689	} else {
690
691		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
692
693		/*
694		 * TODO: Choose async. exception bits if HYP mode is not
695		 * implemented according to the values of SCR.{AW, FW} bits
696		 */
697		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
698
699		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
700	}
701
702	return PSCI_E_SUCCESS;
703}
704#endif
705
706/*******************************************************************************
707 * This function validates the entrypoint with the platform layer if the
708 * appropriate pm_ops hook is exported by the platform and returns the
709 * 'entry_point_info'.
710 ******************************************************************************/
711int psci_validate_entry_point(entry_point_info_t *ep,
712			      uintptr_t entrypoint,
713			      u_register_t context_id)
714{
715	int rc;
716
717	/* Validate the entrypoint using platform psci_ops */
718	if (psci_plat_pm_ops->validate_ns_entrypoint) {
719		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
720		if (rc != PSCI_E_SUCCESS)
721			return PSCI_E_INVALID_ADDRESS;
722	}
723
724	/*
725	 * Verify and derive the re-entry information for
726	 * the non-secure world from the non-secure state from
727	 * where this call originated.
728	 */
729	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
730	return rc;
731}
732
733/*******************************************************************************
734 * Generic handler which is called when a cpu is physically powered on. It
735 * traverses the node information and finds the highest power level powered
736 * off and performs generic, architectural, platform setup and state management
737 * to power on that power level and power levels below it.
738 * e.g. For a cpu that's been powered on, it will call the platform specific
739 * code to enable the gic cpu interface and for a cluster it will enable
740 * coherency at the interconnect level in addition to gic cpu interface.
741 ******************************************************************************/
742void psci_warmboot_entrypoint(void)
743{
744	unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
745	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
746
747	/*
748	 * Verify that we have been explicitly turned ON or resumed from
749	 * suspend.
750	 */
751	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
752		ERROR("Unexpected affinity info state");
753		panic();
754	}
755
756	/*
757	 * Get the maximum power domain level to traverse to after this cpu
758	 * has been physically powered up.
759	 */
760	end_pwrlvl = get_power_on_target_pwrlvl();
761
762	/*
763	 * This function acquires the lock corresponding to each power level so
764	 * that by the time all locks are taken, the system topology is snapshot
765	 * and state management can be done safely.
766	 */
767	psci_acquire_pwr_domain_locks(end_pwrlvl,
768				      cpu_idx);
769
770#if ENABLE_PSCI_STAT
771	plat_psci_stat_accounting_stop(&state_info);
772#endif
773
774	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
775
776	/*
777	 * This CPU could be resuming from suspend or it could have just been
778	 * turned on. To distinguish between these 2 cases, we examine the
779	 * affinity state of the CPU:
780	 *  - If the affinity state is ON_PENDING then it has just been
781	 *    turned on.
782	 *  - Else it is resuming from suspend.
783	 *
784	 * Depending on the type of warm reset identified, choose the right set
785	 * of power management handler and perform the generic, architecture
786	 * and platform specific handling.
787	 */
788	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
789		psci_cpu_on_finish(cpu_idx, &state_info);
790	else
791		psci_cpu_suspend_finish(cpu_idx, &state_info);
792
793	/*
794	 * Set the requested and target state of this CPU and all the higher
795	 * power domains which are ancestors of this CPU to run.
796	 */
797	psci_set_pwr_domains_to_run(end_pwrlvl);
798
799#if ENABLE_PSCI_STAT
800	/*
801	 * Update PSCI stats.
802	 * Caches are off when writing stats data on the power down path.
803	 * Since caches are now enabled, it's necessary to do cache
804	 * maintenance before reading that same data.
805	 */
806	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
807#endif
808
809	/*
810	 * This loop releases the lock corresponding to each power level
811	 * in the reverse order to which they were acquired.
812	 */
813	psci_release_pwr_domain_locks(end_pwrlvl,
814				      cpu_idx);
815}
816
817/*******************************************************************************
818 * This function initializes the set of hooks that PSCI invokes as part of power
819 * management operation. The power management hooks are expected to be provided
820 * by the SPD, after it finishes all its initialization
821 ******************************************************************************/
822void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
823{
824	assert(pm);
825	psci_spd_pm = pm;
826
827	if (pm->svc_migrate)
828		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
829
830	if (pm->svc_migrate_info)
831		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
832				| define_psci_cap(PSCI_MIG_INFO_TYPE);
833}
834
835/*******************************************************************************
836 * This function invokes the migrate info hook in the spd_pm_ops. It performs
837 * the necessary return value validation. If the Secure Payload is UP and
838 * migrate capable, it returns the mpidr of the CPU on which the Secure payload
839 * is resident through the mpidr parameter. Else the value of the parameter on
840 * return is undefined.
841 ******************************************************************************/
842int psci_spd_migrate_info(u_register_t *mpidr)
843{
844	int rc;
845
846	if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
847		return PSCI_E_NOT_SUPPORTED;
848
849	rc = psci_spd_pm->svc_migrate_info(mpidr);
850
851	assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
852		|| rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
853
854	return rc;
855}
856
857
858/*******************************************************************************
859 * This function prints the state of all power domains present in the
860 * system
861 ******************************************************************************/
862void psci_print_power_domain_map(void)
863{
864#if LOG_LEVEL >= LOG_LEVEL_INFO
865	unsigned int idx;
866	plat_local_state_t state;
867	plat_local_state_type_t state_type;
868
869	/* This array maps to the PSCI_STATE_X definitions in psci.h */
870	static const char * const psci_state_type_str[] = {
871		"ON",
872		"RETENTION",
873		"OFF",
874	};
875
876	INFO("PSCI Power Domain Map:\n");
877	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
878							idx++) {
879		state_type = find_local_state_type(
880				psci_non_cpu_pd_nodes[idx].local_state);
881		INFO("  Domain Node : Level %u, parent_node %d,"
882				" State %s (0x%x)\n",
883				psci_non_cpu_pd_nodes[idx].level,
884				psci_non_cpu_pd_nodes[idx].parent_node,
885				psci_state_type_str[state_type],
886				psci_non_cpu_pd_nodes[idx].local_state);
887	}
888
889	for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
890		state = psci_get_cpu_local_state_by_idx(idx);
891		state_type = find_local_state_type(state);
892		INFO("  CPU Node : MPID 0x%llx, parent_node %d,"
893				" State %s (0x%x)\n",
894				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
895				psci_cpu_pd_nodes[idx].parent_node,
896				psci_state_type_str[state_type],
897				psci_get_cpu_local_state_by_idx(idx));
898	}
899#endif
900}
901
902/******************************************************************************
903 * Return whether any secondaries were powered up with CPU_ON call. A CPU that
904 * have ever been powered up would have set its MPDIR value to something other
905 * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
906 * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
907 * meaningful only when called on the primary CPU during early boot.
908 *****************************************************************************/
909int psci_secondaries_brought_up(void)
910{
911	unsigned int idx, n_valid = 0;
912
913	for (idx = 0; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
914		if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
915			n_valid++;
916	}
917
918	assert(n_valid);
919
920	return (n_valid > 1);
921}
922
923#if ENABLE_PLAT_COMPAT
924/*******************************************************************************
925 * PSCI Compatibility helper function to return the 'power_state' parameter of
926 * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
927 * if not invoked within CPU_SUSPEND for the current CPU.
928 ******************************************************************************/
929int psci_get_suspend_powerstate(void)
930{
931	/* Sanity check to verify that CPU is within CPU_SUSPEND */
932	if (psci_get_aff_info_state() == AFF_STATE_ON &&
933		!is_local_state_run(psci_get_cpu_local_state()))
934		return psci_power_state_compat[plat_my_core_pos()];
935
936	return PSCI_INVALID_DATA;
937}
938
939/*******************************************************************************
940 * PSCI Compatibility helper function to return the state id of the current
941 * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
942 * if not invoked within CPU_SUSPEND for the current CPU.
943 ******************************************************************************/
944int psci_get_suspend_stateid(void)
945{
946	unsigned int power_state;
947	power_state = psci_get_suspend_powerstate();
948	if (power_state != PSCI_INVALID_DATA)
949		return psci_get_pstate_id(power_state);
950
951	return PSCI_INVALID_DATA;
952}
953
954/*******************************************************************************
955 * PSCI Compatibility helper function to return the state id encoded in the
956 * 'power_state' parameter of the CPU specified by 'mpidr'. Returns
957 * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
958 ******************************************************************************/
959int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
960{
961	int cpu_idx = plat_core_pos_by_mpidr(mpidr);
962
963	if (cpu_idx == -1)
964		return PSCI_INVALID_DATA;
965
966	/* Sanity check to verify that the CPU is in CPU_SUSPEND */
967	if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
968		!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
969		return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
970
971	return PSCI_INVALID_DATA;
972}
973
974/*******************************************************************************
975 * This function returns highest affinity level which is in OFF
976 * state. The affinity instance with which the level is associated is
977 * determined by the caller.
978 ******************************************************************************/
979unsigned int psci_get_max_phys_off_afflvl(void)
980{
981	psci_power_state_t state_info;
982
983	zeromem(&state_info, sizeof(state_info));
984	psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
985
986	return psci_find_target_suspend_lvl(&state_info);
987}
988
989/*******************************************************************************
990 * PSCI Compatibility helper function to return target affinity level requested
991 * for the CPU_SUSPEND. This function assumes affinity levels correspond to
992 * power domain levels on the platform.
993 ******************************************************************************/
994int psci_get_suspend_afflvl(void)
995{
996	return psci_get_suspend_pwrlvl();
997}
998
999#endif
1000
1001/*******************************************************************************
1002 * Initiate power down sequence, by calling power down operations registered for
1003 * this CPU.
1004 ******************************************************************************/
1005void psci_do_pwrdown_sequence(unsigned int power_level)
1006{
1007#if HW_ASSISTED_COHERENCY
1008	/*
1009	 * With hardware-assisted coherency, the CPU drivers only initiate the
1010	 * power down sequence, without performing cache-maintenance operations
1011	 * in software. Data caches and MMU remain enabled both before and after
1012	 * this call.
1013	 */
1014	prepare_cpu_pwr_dwn(power_level);
1015#else
1016	/*
1017	 * Without hardware-assisted coherency, the CPU drivers disable data
1018	 * caches and MMU, then perform cache-maintenance operations in
1019	 * software.
1020	 *
1021	 * We ought to call prepare_cpu_pwr_dwn() to initiate power down
1022	 * sequence. We currently have data caches and MMU enabled, but the
1023	 * function will return with data caches and MMU disabled. We must
1024	 * ensure that the stack memory is flushed out to memory before we start
1025	 * popping from it again.
1026	 */
1027	psci_do_pwrdown_cache_maintenance(power_level);
1028#endif
1029}
1030