sleep.S revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2
1/*
2 * Low-level PXA250/210 sleep/wakeUp support
3 *
4 * Initial SA1110 code:
5 * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
6 *
7 * Adapted for PXA by Nicolas Pitre:
8 * Copyright (c) 2002 Monta Vista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License.
12 */
13
14#include <linux/config.h>
15#include <linux/linkage.h>
16#include <asm/assembler.h>
17#include <asm/hardware.h>
18
19#include <asm/arch/pxa-regs.h>
20
21		.text
22
23/*
24 * pxa_cpu_suspend()
25 *
26 * Forces CPU into sleep state
27 */
28
29ENTRY(pxa_cpu_suspend)
30
31	mra	r2, r3, acc0
32	stmfd	sp!, {r2 - r12, lr}		@ save registers on stack
33
34	@ get coprocessor registers
35	mrc	p14, 0, r3, c6, c0, 0		@ clock configuration, for turbo mode
36	mrc	p15, 0, r4, c15, c1, 0		@ CP access reg
37	mrc	p15, 0, r5, c13, c0, 0		@ PID
38	mrc 	p15, 0, r6, c3, c0, 0		@ domain ID
39	mrc 	p15, 0, r7, c2, c0, 0		@ translation table base addr
40	mrc	p15, 0, r8, c1, c1, 0           @ auxiliary control reg
41	mrc 	p15, 0, r9, c1, c0, 0		@ control reg
42
43	bic	r3, r3, #2			@ clear frequency change bit
44
45	@ store them plus current virtual stack ptr on stack
46	mov	r10, sp
47	stmfd	sp!, {r3 - r10}
48
49	@ preserve phys address of stack
50	mov	r0, sp
51	bl	sleep_phys_sp
52	ldr	r1, =sleep_save_sp
53	str	r0, [r1]
54
55	@ clean data cache
56	bl	xscale_flush_kern_cache_all
57
58	@ Put the processor to sleep
59	@ (also workaround for sighting 28071)
60
61	@ prepare value for sleep mode
62	mov	r1, #3				@ sleep mode
63
64	@ prepare to put SDRAM into self-refresh manually
65	ldr	r4, =MDREFR
66	ldr	r5, [r4]
67	orr	r5, r5, #MDREFR_SLFRSH
68
69	@ prepare pointer to physical address 0 (virtual mapping in generic.c)
70	mov	r2, #UNCACHED_PHYS_0
71
72	@ Intel PXA255 Specification Update notes problems
73	@ about suspending with PXBus operating above 133MHz
74	@ (see Errata 31, GPIO output signals, ... unpredictable in sleep
75	@
76	@ We keep the change-down close to the actual suspend on SDRAM
77	@ as possible to eliminate messing about with the refresh clock
78	@ as the system will restore with the original speed settings
79	@
80	@ Ben Dooks, 13-Sep-2004
81
82	ldr	r6, =CCCR
83	ldr	r8, [r6]		@ keep original value for resume
84
85	@ ensure x1 for run and turbo mode with memory clock
86	bic	r7, r8, #CCCR_M_MASK | CCCR_N_MASK
87	orr	r7, r7, #(1<<5) | (2<<7)
88
89	@ check that the memory frequency is within limits
90	and	r14, r7, #CCCR_L_MASK
91	teq	r14, #1
92	bicne	r7, r7, #CCCR_L_MASK
93	orrne	r7, r7, #1			@@ 99.53MHz
94
95	@ get ready for the change
96
97	@ note, turbo is not preserved over sleep so there is no
98	@ point in preserving it here. we save it on the stack with the
99	@ other CP registers instead.
100	mov	r0, #0
101	mcr	p14, 0, r0, c6, c0, 0
102	orr	r0, r0, #2			@ initiate change bit
103
104	@ align execution to a cache line
105	b	1f
106
107	.ltorg
108	.align	5
1091:
110
111	@ All needed values are now in registers.
112	@ These last instructions should be in cache
113
114	@ initiate the frequency change...
115	str	r7, [r6]
116	mcr	p14, 0, r0, c6, c0, 0
117
118	@ restore the original cpu speed value for resume
119	str	r8, [r6]
120
121	@ put SDRAM into self-refresh
122	str	r5, [r4]
123
124	@ force address lines low by reading at physical address 0
125	ldr	r3, [r2]
126
127	@ enter sleep mode
128	mcr	p14, 0, r1, c7, c0, 0
129
13020:	b	20b				@ loop waiting for sleep
131
132/*
133 * cpu_pxa_resume()
134 *
135 * entry point from bootloader into kernel during resume
136 *
137 * Note: Yes, part of the following code is located into the .data section.
138 *       This is to allow sleep_save_sp to be accessed with a relative load
139 *       while we can't rely on any MMU translation.  We could have put
140 *       sleep_save_sp in the .text section as well, but some setups might
141 *       insist on it to be truly read-only.
142 */
143
144	.data
145	.align 5
146ENTRY(pxa_cpu_resume)
147	mov	r0, #PSR_I_BIT | PSR_F_BIT | MODE_SVC	@ set SVC, irqs off
148	msr	cpsr_c, r0
149
150	ldr	r0, sleep_save_sp		@ stack phys addr
151	ldr	r2, =resume_after_mmu		@ its absolute virtual address
152	ldmfd	r0, {r3 - r9, sp}		@ CP regs + virt stack ptr
153
154	mov	r1, #0
155	mcr	p15, 0, r1, c8, c7, 0   	@ invalidate I & D TLBs
156	mcr	p15, 0, r1, c7, c7, 0		@ invalidate I & D caches, BTB
157
158#ifdef CONFIG_XSCALE_CACHE_ERRATA
159	bic     r9, r9, #0x0004			@ see cpu_xscale_proc_init
160#endif
161
162	mcr	p14, 0, r3, c6, c0, 0		@ clock configuration, turbo mode.
163	mcr	p15, 0, r4, c15, c1, 0		@ CP access reg
164	mcr	p15, 0, r5, c13, c0, 0		@ PID
165	mcr 	p15, 0, r6, c3, c0, 0		@ domain ID
166	mcr 	p15, 0, r7, c2, c0, 0		@ translation table base addr
167	mcr	p15, 0, r8, c1, c1, 0           @ auxiliary control reg
168	b	resume_turn_on_mmu		@ cache align execution
169
170	.align 5
171resume_turn_on_mmu:
172	mcr 	p15, 0, r9, c1, c0, 0		@ turn on MMU, caches, etc.
173
174	@ Let us ensure we jump to resume_after_mmu only when the mcr above
175	@ actually took effect.  They call it the "cpwait" operation.
176	mrc	p15, 0, r1, c2, c0, 0		@ queue a dependency on CP15
177	sub	pc, r2, r1, lsr #32		@ jump to virtual addr
178	nop
179	nop
180	nop
181
182sleep_save_sp:
183	.word	0				@ preserve stack phys ptr here
184
185	.text
186resume_after_mmu:
187#ifdef CONFIG_XSCALE_CACHE_ERRATA
188	bl	cpu_xscale_proc_init
189#endif
190	ldmfd	sp!, {r2, r3}
191	mar	acc0, r2, r3
192	ldmfd	sp!, {r4 - r12, pc}		@ return to caller
193
194
195