1/*
2 *  linux/arch/arm/vfp/vfphw.S
3 *
4 *  Copyright (C) 2004 ARM Limited.
5 *  Written by Deep Blue Solutions Limited.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This code is called from the kernel's undefined instruction trap.
12 * r9 holds the return address for successful handling.
13 * lr holds the return address for unrecognised instructions.
14 * r10 points at the start of the private FP workspace in the thread structure
15 * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
16 */
17#include <asm/thread_info.h>
18#include <asm/vfpmacros.h>
19#include "../kernel/entry-header.S"
20
21	.macro	DBGSTR, str
22#ifdef DEBUG
23	stmfd	sp!, {r0-r3, ip, lr}
24	add	r0, pc, #4
25	bl	printk
26	b	1f
27	.asciz  "<7>VFP: \str\n"
28	.balign 4
291:	ldmfd	sp!, {r0-r3, ip, lr}
30#endif
31	.endm
32
33	.macro  DBGSTR1, str, arg
34#ifdef DEBUG
35	stmfd	sp!, {r0-r3, ip, lr}
36	mov	r1, \arg
37	add	r0, pc, #4
38	bl	printk
39	b	1f
40	.asciz  "<7>VFP: \str\n"
41	.balign 4
421:	ldmfd	sp!, {r0-r3, ip, lr}
43#endif
44	.endm
45
46	.macro  DBGSTR3, str, arg1, arg2, arg3
47#ifdef DEBUG
48	stmfd	sp!, {r0-r3, ip, lr}
49	mov	r3, \arg3
50	mov	r2, \arg2
51	mov	r1, \arg1
52	add	r0, pc, #4
53	bl	printk
54	b	1f
55	.asciz  "<7>VFP: \str\n"
56	.balign 4
571:	ldmfd	sp!, {r0-r3, ip, lr}
58#endif
59	.endm
60
61
62@ VFP hardware support entry point.
63@
64@  r0  = faulted instruction
65@  r2  = faulted PC+4
66@  r9  = successful return
67@  r10 = vfp_state union
68@  r11 = CPU number
69@  lr  = failure return
70
71ENTRY(vfp_support_entry)
72	DBGSTR3	"instr %08x pc %08x state %p", r0, r2, r10
73
74	VFPFMRX	r1, FPEXC		@ Is the VFP enabled?
75	DBGSTR1	"fpexc %08x", r1
76	tst	r1, #FPEXC_EN
77	bne	look_for_VFP_exceptions	@ VFP is already enabled
78
79	DBGSTR1 "enable %x", r10
80	ldr	r3, vfp_current_hw_state_address
81	orr	r1, r1, #FPEXC_EN	@ user FPEXC has the enable bit set
82	ldr	r4, [r3, r11, lsl #2]	@ vfp_current_hw_state pointer
83	bic	r5, r1, #FPEXC_EX	@ make sure exceptions are disabled
84	cmp	r4, r10			@ this thread owns the hw context?
85#ifndef CONFIG_SMP
86	@ For UP, checking that this thread owns the hw context is
87	@ sufficient to determine that the hardware state is valid.
88	beq	vfp_hw_state_valid
89
90	@ On UP, we lazily save the VFP context.  As a different
91	@ thread wants ownership of the VFP hardware, save the old
92	@ state if there was a previous (valid) owner.
93
94	VFPFMXR	FPEXC, r5		@ enable VFP, disable any pending
95					@ exceptions, so we can get at the
96					@ rest of it
97
98	DBGSTR1	"save old state %p", r4
99	cmp	r4, #0			@ if the vfp_current_hw_state is NULL
100	beq	vfp_reload_hw		@ then the hw state needs reloading
101	VFPFSTMIA r4, r5		@ save the working registers
102	VFPFMRX	r5, FPSCR		@ current status
103#ifndef CONFIG_CPU_FEROCEON
104	tst	r1, #FPEXC_EX		@ is there additional state to save?
105	beq	1f
106	VFPFMRX	r6, FPINST		@ FPINST (only if FPEXC.EX is set)
107	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to read?
108	beq	1f
109	VFPFMRX	r8, FPINST2		@ FPINST2 if needed (and present)
1101:
111#endif
112	stmia	r4, {r1, r5, r6, r8}	@ save FPEXC, FPSCR, FPINST, FPINST2
113vfp_reload_hw:
114
115#else
116	@ For SMP, if this thread does not own the hw context, then we
117	@ need to reload it.  No need to save the old state as on SMP,
118	@ we always save the state when we switch away from a thread.
119	bne	vfp_reload_hw
120
121	@ This thread has ownership of the current hardware context.
122	@ However, it may have been migrated to another CPU, in which
123	@ case the saved state is newer than the hardware context.
124	@ Check this by looking at the CPU number which the state was
125	@ last loaded onto.
126	ldr	ip, [r10, #VFP_CPU]
127	teq	ip, r11
128	beq	vfp_hw_state_valid
129
130vfp_reload_hw:
131	@ We're loading this threads state into the VFP hardware. Update
132	@ the CPU number which contains the most up to date VFP context.
133	str	r11, [r10, #VFP_CPU]
134
135	VFPFMXR	FPEXC, r5		@ enable VFP, disable any pending
136					@ exceptions, so we can get at the
137					@ rest of it
138#endif
139
140	DBGSTR1	"load state %p", r10
141	str	r10, [r3, r11, lsl #2]	@ update the vfp_current_hw_state pointer
142					@ Load the saved state back into the VFP
143	VFPFLDMIA r10, r5		@ reload the working registers while
144					@ FPEXC is in a safe state
145	ldmia	r10, {r1, r5, r6, r8}	@ load FPEXC, FPSCR, FPINST, FPINST2
146#ifndef CONFIG_CPU_FEROCEON
147	tst	r1, #FPEXC_EX		@ is there additional state to restore?
148	beq	1f
149	VFPFMXR	FPINST, r6		@ restore FPINST (only if FPEXC.EX is set)
150	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to write?
151	beq	1f
152	VFPFMXR	FPINST2, r8		@ FPINST2 if needed (and present)
1531:
154#endif
155	VFPFMXR	FPSCR, r5		@ restore status
156
157@ The context stored in the VFP hardware is up to date with this thread
158vfp_hw_state_valid:
159	tst	r1, #FPEXC_EX
160	bne	process_exception	@ might as well handle the pending
161					@ exception before retrying branch
162					@ out before setting an FPEXC that
163					@ stops us reading stuff
164	VFPFMXR	FPEXC, r1		@ restore FPEXC last
165	sub	r2, r2, #4
166	str	r2, [sp, #S_PC]		@ retry the instruction
167#ifdef CONFIG_PREEMPT
168	get_thread_info	r10
169	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
170	sub	r11, r4, #1		@ decrement it
171	str	r11, [r10, #TI_PREEMPT]
172#endif
173	mov	pc, r9			@ we think we have handled things
174
175
176look_for_VFP_exceptions:
177	@ Check for synchronous or asynchronous exception
178	tst	r1, #FPEXC_EX | FPEXC_DEX
179	bne	process_exception
180	@ On some implementations of the VFP subarch 1, setting FPSCR.IXE
181	@ causes all the CDP instructions to be bounced synchronously without
182	@ setting the FPEXC.EX bit
183	VFPFMRX	r5, FPSCR
184	tst	r5, #FPSCR_IXE
185	bne	process_exception
186
187	@ Fall into hand on to next handler - appropriate coproc instr
188	@ not recognised by VFP
189
190	DBGSTR	"not VFP"
191#ifdef CONFIG_PREEMPT
192	get_thread_info	r10
193	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
194	sub	r11, r4, #1		@ decrement it
195	str	r11, [r10, #TI_PREEMPT]
196#endif
197	mov	pc, lr
198
199process_exception:
200	DBGSTR	"bounce"
201	mov	r2, sp			@ nothing stacked - regdump is at TOS
202	mov	lr, r9			@ setup for a return to the user code.
203
204	@ Now call the C code to package up the bounce to the support code
205	@   r0 holds the trigger instruction
206	@   r1 holds the FPEXC value
207	@   r2 pointer to register dump
208	b	VFP_bounce		@ we have handled this - the support
209					@ code will raise an exception if
210					@ required. If not, the user code will
211					@ retry the faulted instruction
212ENDPROC(vfp_support_entry)
213
214ENTRY(vfp_save_state)
215	@ Save the current VFP state
216	@ r0 - save location
217	@ r1 - FPEXC
218	DBGSTR1	"save VFP state %p", r0
219	VFPFSTMIA r0, r2		@ save the working registers
220	VFPFMRX	r2, FPSCR		@ current status
221	tst	r1, #FPEXC_EX		@ is there additional state to save?
222	beq	1f
223	VFPFMRX	r3, FPINST		@ FPINST (only if FPEXC.EX is set)
224	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to read?
225	beq	1f
226	VFPFMRX	r12, FPINST2		@ FPINST2 if needed (and present)
2271:
228	stmia	r0, {r1, r2, r3, r12}	@ save FPEXC, FPSCR, FPINST, FPINST2
229	mov	pc, lr
230ENDPROC(vfp_save_state)
231
232	.align
233vfp_current_hw_state_address:
234	.word	vfp_current_hw_state
235
236	.macro	tbl_branch, base, tmp, shift
237#ifdef CONFIG_THUMB2_KERNEL
238	adr	\tmp, 1f
239	add	\tmp, \tmp, \base, lsl \shift
240	mov	pc, \tmp
241#else
242	add	pc, pc, \base, lsl \shift
243	mov	r0, r0
244#endif
2451:
246	.endm
247
248ENTRY(vfp_get_float)
249	tbl_branch r0, r3, #3
250	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2511:	mrc	p10, 0, r0, c\dr, c0, 0	@ fmrs	r0, s0
252	mov	pc, lr
253	.org	1b + 8
2541:	mrc	p10, 0, r0, c\dr, c0, 4	@ fmrs	r0, s1
255	mov	pc, lr
256	.org	1b + 8
257	.endr
258ENDPROC(vfp_get_float)
259
260ENTRY(vfp_put_float)
261	tbl_branch r1, r3, #3
262	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2631:	mcr	p10, 0, r0, c\dr, c0, 0	@ fmsr	r0, s0
264	mov	pc, lr
265	.org	1b + 8
2661:	mcr	p10, 0, r0, c\dr, c0, 4	@ fmsr	r0, s1
267	mov	pc, lr
268	.org	1b + 8
269	.endr
270ENDPROC(vfp_put_float)
271
272ENTRY(vfp_get_double)
273	tbl_branch r0, r3, #3
274	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2751:	fmrrd	r0, r1, d\dr
276	mov	pc, lr
277	.org	1b + 8
278	.endr
279#ifdef CONFIG_VFPv3
280	@ d16 - d31 registers
281	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2821:	mrrc	p11, 3, r0, r1, c\dr	@ fmrrd	r0, r1, d\dr
283	mov	pc, lr
284	.org	1b + 8
285	.endr
286#endif
287
288	@ virtual register 16 (or 32 if VFPv3) for compare with zero
289	mov	r0, #0
290	mov	r1, #0
291	mov	pc, lr
292ENDPROC(vfp_get_double)
293
294ENTRY(vfp_put_double)
295	tbl_branch r2, r3, #3
296	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2971:	fmdrr	d\dr, r0, r1
298	mov	pc, lr
299	.org	1b + 8
300	.endr
301#ifdef CONFIG_VFPv3
302	@ d16 - d31 registers
303	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3041:	mcrr	p11, 3, r0, r1, c\dr	@ fmdrr	r0, r1, d\dr
305	mov	pc, lr
306	.org	1b + 8
307	.endr
308#endif
309ENDPROC(vfp_put_double)
310