1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
30#include <asm/firmware.h>
31#include <asm/bug.h>
32#include <asm/ptrace.h>
33#include <asm/irqflags.h>
34#include <asm/ftrace.h>
35#include <asm/hw_irq.h>
36#include <asm/context_tracking.h>
37
38/*
39 * System calls.
40 */
41	.section	".toc","aw"
42SYS_CALL_TABLE:
43	.tc sys_call_table[TC],sys_call_table
44
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
47	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
48
49	.section	".text"
50	.align 7
51
52#undef SHOW_SYSCALLS
53
54	.globl system_call_common
55system_call_common:
56	andi.	r10,r12,MSR_PR
57	mr	r10,r1
58	addi	r1,r1,-INT_FRAME_SIZE
59	beq-	1f
60	ld	r1,PACAKSAVE(r13)
611:	std	r10,0(r1)
62	std	r11,_NIP(r1)
63	std	r12,_MSR(r1)
64	std	r0,GPR0(r1)
65	std	r10,GPR1(r1)
66	beq	2f			/* if from kernel mode */
67	ACCOUNT_CPU_USER_ENTRY(r10, r11)
682:	std	r2,GPR2(r1)
69	std	r3,GPR3(r1)
70	mfcr	r2
71	std	r4,GPR4(r1)
72	std	r5,GPR5(r1)
73	std	r6,GPR6(r1)
74	std	r7,GPR7(r1)
75	std	r8,GPR8(r1)
76	li	r11,0
77	std	r11,GPR9(r1)
78	std	r11,GPR10(r1)
79	std	r11,GPR11(r1)
80	std	r11,GPR12(r1)
81	std	r11,_XER(r1)
82	std	r11,_CTR(r1)
83	std	r9,GPR13(r1)
84	mflr	r10
85	/*
86	 * This clears CR0.SO (bit 28), which is the error indication on
87	 * return from this system call.
88	 */
89	rldimi	r2,r11,28,(63-28)
90	li	r11,0xc01
91	std	r10,_LINK(r1)
92	std	r11,_TRAP(r1)
93	std	r3,ORIG_GPR3(r1)
94	std	r2,_CCR(r1)
95	ld	r2,PACATOC(r13)
96	addi	r9,r1,STACK_FRAME_OVERHEAD
97	ld	r11,exception_marker@toc(r2)
98	std	r11,-16(r9)		/* "regshere" marker */
99#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
100BEGIN_FW_FTR_SECTION
101	beq	33f
102	/* if from user, see if there are any DTL entries to process */
103	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
104	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
105	addi	r10,r10,LPPACA_DTLIDX
106	LDX_BE	r10,0,r10		/* get log write index */
107	cmpd	cr1,r11,r10
108	beq+	cr1,33f
109	bl	accumulate_stolen_time
110	REST_GPR(0,r1)
111	REST_4GPRS(3,r1)
112	REST_2GPRS(7,r1)
113	addi	r9,r1,STACK_FRAME_OVERHEAD
11433:
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
116#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
117
118	/*
119	 * A syscall should always be called with interrupts enabled
120	 * so we just unconditionally hard-enable here. When some kind
121	 * of irq tracing is used, we additionally check that condition
122	 * is correct
123	 */
124#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
125	lbz	r10,PACASOFTIRQEN(r13)
126	xori	r10,r10,1
1271:	tdnei	r10,0
128	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
129#endif
130
131#ifdef CONFIG_PPC_BOOK3E
132	wrteei	1
133#else
134	ld	r11,PACAKMSR(r13)
135	ori	r11,r11,MSR_EE
136	mtmsrd	r11,1
137#endif /* CONFIG_PPC_BOOK3E */
138
139	/* We do need to set SOFTE in the stack frame or the return
140	 * from interrupt will be painful
141	 */
142	li	r10,1
143	std	r10,SOFTE(r1)
144
145#ifdef SHOW_SYSCALLS
146	bl	do_show_syscall
147	REST_GPR(0,r1)
148	REST_4GPRS(3,r1)
149	REST_2GPRS(7,r1)
150	addi	r9,r1,STACK_FRAME_OVERHEAD
151#endif
152	CURRENT_THREAD_INFO(r11, r1)
153	ld	r10,TI_FLAGS(r11)
154	andi.	r11,r10,_TIF_SYSCALL_T_OR_A
155	bne	syscall_dotrace
156.Lsyscall_dotrace_cont:
157	cmpldi	0,r0,NR_syscalls
158	bge-	syscall_enosys
159
160system_call:			/* label this so stack traces look sane */
161/*
162 * Need to vector to 32 Bit or default sys_call_table here,
163 * based on caller's run-mode / personality.
164 */
165	ld	r11,SYS_CALL_TABLE@toc(2)
166	andi.	r10,r10,_TIF_32BIT
167	beq	15f
168	addi	r11,r11,8	/* use 32-bit syscall entries */
169	clrldi	r3,r3,32
170	clrldi	r4,r4,32
171	clrldi	r5,r5,32
172	clrldi	r6,r6,32
173	clrldi	r7,r7,32
174	clrldi	r8,r8,32
17515:
176	slwi	r0,r0,4
177	ldx	r12,r11,r0	/* Fetch system call handler [ptr] */
178	mtctr   r12
179	bctrl			/* Call handler */
180
181syscall_exit:
182	std	r3,RESULT(r1)
183#ifdef SHOW_SYSCALLS
184	bl	do_show_syscall_exit
185	ld	r3,RESULT(r1)
186#endif
187	CURRENT_THREAD_INFO(r12, r1)
188
189	ld	r8,_MSR(r1)
190#ifdef CONFIG_PPC_BOOK3S
191	/* No MSR:RI on BookE */
192	andi.	r10,r8,MSR_RI
193	beq-	unrecov_restore
194#endif
195	/*
196	 * Disable interrupts so current_thread_info()->flags can't change,
197	 * and so that we don't get interrupted after loading SRR0/1.
198	 */
199#ifdef CONFIG_PPC_BOOK3E
200	wrteei	0
201#else
202	ld	r10,PACAKMSR(r13)
203	/*
204	 * For performance reasons we clear RI the same time that we
205	 * clear EE. We only need to clear RI just before we restore r13
206	 * below, but batching it with EE saves us one expensive mtmsrd call.
207	 * We have to be careful to restore RI if we branch anywhere from
208	 * here (eg syscall_exit_work).
209	 */
210	li	r9,MSR_RI
211	andc	r11,r10,r9
212	mtmsrd	r11,1
213#endif /* CONFIG_PPC_BOOK3E */
214
215	ld	r9,TI_FLAGS(r12)
216	li	r11,-_LAST_ERRNO
217	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
218	bne-	syscall_exit_work
219	cmpld	r3,r11
220	ld	r5,_CCR(r1)
221	bge-	syscall_error
222.Lsyscall_error_cont:
223	ld	r7,_NIP(r1)
224BEGIN_FTR_SECTION
225	stdcx.	r0,0,r1			/* to clear the reservation */
226END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
227	andi.	r6,r8,MSR_PR
228	ld	r4,_LINK(r1)
229
230	beq-	1f
231	ACCOUNT_CPU_USER_EXIT(r11, r12)
232	HMT_MEDIUM_LOW_HAS_PPR
233	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
2341:	ld	r2,GPR2(r1)
235	ld	r1,GPR1(r1)
236	mtlr	r4
237	mtcr	r5
238	mtspr	SPRN_SRR0,r7
239	mtspr	SPRN_SRR1,r8
240	RFI
241	b	.	/* prevent speculative execution */
242
243syscall_error:
244	oris	r5,r5,0x1000	/* Set SO bit in CR */
245	neg	r3,r3
246	std	r5,_CCR(r1)
247	b	.Lsyscall_error_cont
248
249/* Traced system call support */
250syscall_dotrace:
251	bl	save_nvgprs
252	addi	r3,r1,STACK_FRAME_OVERHEAD
253	bl	do_syscall_trace_enter
254	/*
255	 * Restore argument registers possibly just changed.
256	 * We use the return value of do_syscall_trace_enter
257	 * for the call number to look up in the table (r0).
258	 */
259	mr	r0,r3
260	ld	r3,GPR3(r1)
261	ld	r4,GPR4(r1)
262	ld	r5,GPR5(r1)
263	ld	r6,GPR6(r1)
264	ld	r7,GPR7(r1)
265	ld	r8,GPR8(r1)
266	addi	r9,r1,STACK_FRAME_OVERHEAD
267	CURRENT_THREAD_INFO(r10, r1)
268	ld	r10,TI_FLAGS(r10)
269	b	.Lsyscall_dotrace_cont
270
271syscall_enosys:
272	li	r3,-ENOSYS
273	b	syscall_exit
274
275syscall_exit_work:
276#ifdef CONFIG_PPC_BOOK3S
277	mtmsrd	r10,1		/* Restore RI */
278#endif
279	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
280	 If TIF_NOERROR is set, just save r3 as it is. */
281
282	andi.	r0,r9,_TIF_RESTOREALL
283	beq+	0f
284	REST_NVGPRS(r1)
285	b	2f
2860:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
287	blt+	1f
288	andi.	r0,r9,_TIF_NOERROR
289	bne-	1f
290	ld	r5,_CCR(r1)
291	neg	r3,r3
292	oris	r5,r5,0x1000	/* Set SO bit in CR */
293	std	r5,_CCR(r1)
2941:	std	r3,GPR3(r1)
2952:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
296	beq	4f
297
298	/* Clear per-syscall TIF flags if any are set.  */
299
300	li	r11,_TIF_PERSYSCALL_MASK
301	addi	r12,r12,TI_FLAGS
3023:	ldarx	r10,0,r12
303	andc	r10,r10,r11
304	stdcx.	r10,0,r12
305	bne-	3b
306	subi	r12,r12,TI_FLAGS
307
3084:	/* Anything else left to do? */
309	SET_DEFAULT_THREAD_PPR(r3, r10)		/* Set thread.ppr = 3 */
310	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
311	beq	ret_from_except_lite
312
313	/* Re-enable interrupts */
314#ifdef CONFIG_PPC_BOOK3E
315	wrteei	1
316#else
317	ld	r10,PACAKMSR(r13)
318	ori	r10,r10,MSR_EE
319	mtmsrd	r10,1
320#endif /* CONFIG_PPC_BOOK3E */
321
322	bl	save_nvgprs
323	addi	r3,r1,STACK_FRAME_OVERHEAD
324	bl	do_syscall_trace_leave
325	b	ret_from_except
326
327/* Save non-volatile GPRs, if not already saved. */
328_GLOBAL(save_nvgprs)
329	ld	r11,_TRAP(r1)
330	andi.	r0,r11,1
331	beqlr-
332	SAVE_NVGPRS(r1)
333	clrrdi	r0,r11,1
334	std	r0,_TRAP(r1)
335	blr
336
337
338/*
339 * The sigsuspend and rt_sigsuspend system calls can call do_signal
340 * and thus put the process into the stopped state where we might
341 * want to examine its user state with ptrace.  Therefore we need
342 * to save all the nonvolatile registers (r14 - r31) before calling
343 * the C code.  Similarly, fork, vfork and clone need the full
344 * register state on the stack so that it can be copied to the child.
345 */
346
347_GLOBAL(ppc_fork)
348	bl	save_nvgprs
349	bl	sys_fork
350	b	syscall_exit
351
352_GLOBAL(ppc_vfork)
353	bl	save_nvgprs
354	bl	sys_vfork
355	b	syscall_exit
356
357_GLOBAL(ppc_clone)
358	bl	save_nvgprs
359	bl	sys_clone
360	b	syscall_exit
361
362_GLOBAL(ppc32_swapcontext)
363	bl	save_nvgprs
364	bl	compat_sys_swapcontext
365	b	syscall_exit
366
367_GLOBAL(ppc64_swapcontext)
368	bl	save_nvgprs
369	bl	sys_swapcontext
370	b	syscall_exit
371
372_GLOBAL(ret_from_fork)
373	bl	schedule_tail
374	REST_NVGPRS(r1)
375	li	r3,0
376	b	syscall_exit
377
378_GLOBAL(ret_from_kernel_thread)
379	bl	schedule_tail
380	REST_NVGPRS(r1)
381	mtlr	r14
382	mr	r3,r15
383#if defined(_CALL_ELF) && _CALL_ELF == 2
384	mr	r12,r14
385#endif
386	blrl
387	li	r3,0
388	b	syscall_exit
389
390/*
391 * This routine switches between two different tasks.  The process
392 * state of one is saved on its kernel stack.  Then the state
393 * of the other is restored from its kernel stack.  The memory
394 * management hardware is updated to the second process's state.
395 * Finally, we can return to the second process, via ret_from_except.
396 * On entry, r3 points to the THREAD for the current task, r4
397 * points to the THREAD for the new task.
398 *
399 * Note: there are two ways to get to the "going out" portion
400 * of this code; either by coming in via the entry (_switch)
401 * or via "fork" which must set up an environment equivalent
402 * to the "_switch" path.  If you change this you'll have to change
403 * the fork code also.
404 *
405 * The code which creates the new task context is in 'copy_thread'
406 * in arch/powerpc/kernel/process.c
407 */
408	.align	7
409_GLOBAL(_switch)
410	mflr	r0
411	std	r0,16(r1)
412	stdu	r1,-SWITCH_FRAME_SIZE(r1)
413	/* r3-r13 are caller saved -- Cort */
414	SAVE_8GPRS(14, r1)
415	SAVE_10GPRS(22, r1)
416	mflr	r20		/* Return to switch caller */
417	mfmsr	r22
418	li	r0, MSR_FP
419#ifdef CONFIG_VSX
420BEGIN_FTR_SECTION
421	oris	r0,r0,MSR_VSX@h	/* Disable VSX */
422END_FTR_SECTION_IFSET(CPU_FTR_VSX)
423#endif /* CONFIG_VSX */
424#ifdef CONFIG_ALTIVEC
425BEGIN_FTR_SECTION
426	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
427	mfspr	r24,SPRN_VRSAVE	/* save vrsave register value */
428	std	r24,THREAD_VRSAVE(r3)
429END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
430#endif /* CONFIG_ALTIVEC */
431	and.	r0,r0,r22
432	beq+	1f
433	andc	r22,r22,r0
434	MTMSRD(r22)
435	isync
4361:	std	r20,_NIP(r1)
437	mfcr	r23
438	std	r23,_CCR(r1)
439	std	r1,KSP(r3)	/* Set old stack pointer */
440
441#ifdef CONFIG_PPC_BOOK3S_64
442BEGIN_FTR_SECTION
443	/* Event based branch registers */
444	mfspr	r0, SPRN_BESCR
445	std	r0, THREAD_BESCR(r3)
446	mfspr	r0, SPRN_EBBHR
447	std	r0, THREAD_EBBHR(r3)
448	mfspr	r0, SPRN_EBBRR
449	std	r0, THREAD_EBBRR(r3)
450END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
451#endif
452
453#ifdef CONFIG_SMP
454	/* We need a sync somewhere here to make sure that if the
455	 * previous task gets rescheduled on another CPU, it sees all
456	 * stores it has performed on this one.
457	 */
458	sync
459#endif /* CONFIG_SMP */
460
461	/*
462	 * If we optimise away the clear of the reservation in system
463	 * calls because we know the CPU tracks the address of the
464	 * reservation, then we need to clear it here to cover the
465	 * case that the kernel context switch path has no larx
466	 * instructions.
467	 */
468BEGIN_FTR_SECTION
469	ldarx	r6,0,r1
470END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
471
472#ifdef CONFIG_PPC_BOOK3S
473/* Cancel all explict user streams as they will have no use after context
474 * switch and will stop the HW from creating streams itself
475 */
476	DCBT_STOP_ALL_STREAM_IDS(r6)
477#endif
478
479	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
480	std	r6,PACACURRENT(r13)	/* Set new 'current' */
481
482	ld	r8,KSP(r4)	/* new stack pointer */
483#ifdef CONFIG_PPC_BOOK3S
484BEGIN_FTR_SECTION
485	clrrdi	r6,r8,28	/* get its ESID */
486	clrrdi	r9,r1,28	/* get current sp ESID */
487FTR_SECTION_ELSE
488	clrrdi	r6,r8,40	/* get its 1T ESID */
489	clrrdi	r9,r1,40	/* get current sp 1T ESID */
490ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
491	clrldi.	r0,r6,2		/* is new ESID c00000000? */
492	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
493	cror	eq,4*cr1+eq,eq
494	beq	2f		/* if yes, don't slbie it */
495
496	/* Bolt in the new stack SLB entry */
497	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
498	oris	r0,r6,(SLB_ESID_V)@h
499	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
500BEGIN_FTR_SECTION
501	li	r9,MMU_SEGSIZE_1T	/* insert B field */
502	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
503	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
504END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
505
506	/* Update the last bolted SLB.  No write barriers are needed
507	 * here, provided we only update the current CPU's SLB shadow
508	 * buffer.
509	 */
510	ld	r9,PACA_SLBSHADOWPTR(r13)
511	li	r12,0
512	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
513	li	r12,SLBSHADOW_STACKVSID
514	STDX_BE	r7,r12,r9			/* Save VSID */
515	li	r12,SLBSHADOW_STACKESID
516	STDX_BE	r0,r12,r9			/* Save ESID */
517
518	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
519	 * we have 1TB segments, the only CPUs known to have the errata
520	 * only support less than 1TB of system memory and we'll never
521	 * actually hit this code path.
522	 */
523
524	slbie	r6
525	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
526	slbmte	r7,r0
527	isync
5282:
529#endif /* !CONFIG_PPC_BOOK3S */
530
531	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
532	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
533	   because we don't need to leave the 288-byte ABI gap at the
534	   top of the kernel stack. */
535	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
536
537	mr	r1,r8		/* start using new stack pointer */
538	std	r7,PACAKSAVE(r13)
539
540#ifdef CONFIG_PPC_BOOK3S_64
541BEGIN_FTR_SECTION
542	/* Event based branch registers */
543	ld	r0, THREAD_BESCR(r4)
544	mtspr	SPRN_BESCR, r0
545	ld	r0, THREAD_EBBHR(r4)
546	mtspr	SPRN_EBBHR, r0
547	ld	r0, THREAD_EBBRR(r4)
548	mtspr	SPRN_EBBRR, r0
549
550	ld	r0,THREAD_TAR(r4)
551	mtspr	SPRN_TAR,r0
552END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
553#endif
554
555#ifdef CONFIG_ALTIVEC
556BEGIN_FTR_SECTION
557	ld	r0,THREAD_VRSAVE(r4)
558	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
559END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
560#endif /* CONFIG_ALTIVEC */
561#ifdef CONFIG_PPC64
562BEGIN_FTR_SECTION
563	lwz	r6,THREAD_DSCR_INHERIT(r4)
564	ld	r0,THREAD_DSCR(r4)
565	cmpwi	r6,0
566	bne	1f
567	ld	r0,PACA_DSCR(r13)
5681:
569BEGIN_FTR_SECTION_NESTED(70)
570	mfspr	r8, SPRN_FSCR
571	rldimi	r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
572	mtspr	SPRN_FSCR, r8
573END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
574	cmpd	r0,r25
575	beq	2f
576	mtspr	SPRN_DSCR,r0
5772:
578END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
579#endif
580
581	ld	r6,_CCR(r1)
582	mtcrf	0xFF,r6
583
584	/* r3-r13 are destroyed -- Cort */
585	REST_8GPRS(14, r1)
586	REST_10GPRS(22, r1)
587
588	/* convert old thread to its task_struct for return value */
589	addi	r3,r3,-THREAD
590	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
591	mtlr	r7
592	addi	r1,r1,SWITCH_FRAME_SIZE
593	blr
594
595	.align	7
596_GLOBAL(ret_from_except)
597	ld	r11,_TRAP(r1)
598	andi.	r0,r11,1
599	bne	ret_from_except_lite
600	REST_NVGPRS(r1)
601
602_GLOBAL(ret_from_except_lite)
603	/*
604	 * Disable interrupts so that current_thread_info()->flags
605	 * can't change between when we test it and when we return
606	 * from the interrupt.
607	 */
608#ifdef CONFIG_PPC_BOOK3E
609	wrteei	0
610#else
611	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
612	mtmsrd	r10,1		  /* Update machine state */
613#endif /* CONFIG_PPC_BOOK3E */
614
615	CURRENT_THREAD_INFO(r9, r1)
616	ld	r3,_MSR(r1)
617#ifdef CONFIG_PPC_BOOK3E
618	ld	r10,PACACURRENT(r13)
619#endif /* CONFIG_PPC_BOOK3E */
620	ld	r4,TI_FLAGS(r9)
621	andi.	r3,r3,MSR_PR
622	beq	resume_kernel
623#ifdef CONFIG_PPC_BOOK3E
624	lwz	r3,(THREAD+THREAD_DBCR0)(r10)
625#endif /* CONFIG_PPC_BOOK3E */
626
627	/* Check current_thread_info()->flags */
628	andi.	r0,r4,_TIF_USER_WORK_MASK
629#ifdef CONFIG_PPC_BOOK3E
630	bne	1f
631	/*
632	 * Check to see if the dbcr0 register is set up to debug.
633	 * Use the internal debug mode bit to do this.
634	 */
635	andis.	r0,r3,DBCR0_IDM@h
636	beq	restore
637	mfmsr	r0
638	rlwinm	r0,r0,0,~MSR_DE	/* Clear MSR.DE */
639	mtmsr	r0
640	mtspr	SPRN_DBCR0,r3
641	li	r10, -1
642	mtspr	SPRN_DBSR,r10
643	b	restore
644#else
645	beq	restore
646#endif
6471:	andi.	r0,r4,_TIF_NEED_RESCHED
648	beq	2f
649	bl	restore_interrupts
650	SCHEDULE_USER
651	b	ret_from_except_lite
6522:
653#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
654	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
655	bne	3f		/* only restore TM if nothing else to do */
656	addi	r3,r1,STACK_FRAME_OVERHEAD
657	bl	restore_tm_state
658	b	restore
6593:
660#endif
661	bl	save_nvgprs
662	/*
663	 * Use a non volatile GPR to save and restore our thread_info flags
664	 * across the call to restore_interrupts.
665	 */
666	mr	r30,r4
667	bl	restore_interrupts
668	mr	r4,r30
669	addi	r3,r1,STACK_FRAME_OVERHEAD
670	bl	do_notify_resume
671	b	ret_from_except
672
673resume_kernel:
674	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
675	andis.	r8,r4,_TIF_EMULATE_STACK_STORE@h
676	beq+	1f
677
678	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
679
680	lwz	r3,GPR1(r1)
681	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
682	mr	r4,r1			/* src:  current exception frame */
683	mr	r1,r3			/* Reroute the trampoline frame to r1 */
684
685	/* Copy from the original to the trampoline. */
686	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
687	li	r6,0			/* start offset: 0 */
688	mtctr	r5
6892:	ldx	r0,r6,r4
690	stdx	r0,r6,r3
691	addi	r6,r6,8
692	bdnz	2b
693
694	/* Do real store operation to complete stwu */
695	lwz	r5,GPR1(r1)
696	std	r8,0(r5)
697
698	/* Clear _TIF_EMULATE_STACK_STORE flag */
699	lis	r11,_TIF_EMULATE_STACK_STORE@h
700	addi	r5,r9,TI_FLAGS
7010:	ldarx	r4,0,r5
702	andc	r4,r4,r11
703	stdcx.	r4,0,r5
704	bne-	0b
7051:
706
707#ifdef CONFIG_PREEMPT
708	/* Check if we need to preempt */
709	andi.	r0,r4,_TIF_NEED_RESCHED
710	beq+	restore
711	/* Check that preempt_count() == 0 and interrupts are enabled */
712	lwz	r8,TI_PREEMPT(r9)
713	cmpwi	cr1,r8,0
714	ld	r0,SOFTE(r1)
715	cmpdi	r0,0
716	crandc	eq,cr1*4+eq,eq
717	bne	restore
718
719	/*
720	 * Here we are preempting the current task. We want to make
721	 * sure we are soft-disabled first and reconcile irq state.
722	 */
723	RECONCILE_IRQ_STATE(r3,r4)
7241:	bl	preempt_schedule_irq
725
726	/* Re-test flags and eventually loop */
727	CURRENT_THREAD_INFO(r9, r1)
728	ld	r4,TI_FLAGS(r9)
729	andi.	r0,r4,_TIF_NEED_RESCHED
730	bne	1b
731
732	/*
733	 * arch_local_irq_restore() from preempt_schedule_irq above may
734	 * enable hard interrupt but we really should disable interrupts
735	 * when we return from the interrupt, and so that we don't get
736	 * interrupted after loading SRR0/1.
737	 */
738#ifdef CONFIG_PPC_BOOK3E
739	wrteei	0
740#else
741	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */
742	mtmsrd	r10,1		  /* Update machine state */
743#endif /* CONFIG_PPC_BOOK3E */
744#endif /* CONFIG_PREEMPT */
745
746	.globl	fast_exc_return_irq
747fast_exc_return_irq:
748restore:
749	/*
750	 * This is the main kernel exit path. First we check if we
751	 * are about to re-enable interrupts
752	 */
753	ld	r5,SOFTE(r1)
754	lbz	r6,PACASOFTIRQEN(r13)
755	cmpwi	cr0,r5,0
756	beq	restore_irq_off
757
758	/* We are enabling, were we already enabled ? Yes, just return */
759	cmpwi	cr0,r6,1
760	beq	cr0,do_restore
761
762	/*
763	 * We are about to soft-enable interrupts (we are hard disabled
764	 * at this point). We check if there's anything that needs to
765	 * be replayed first.
766	 */
767	lbz	r0,PACAIRQHAPPENED(r13)
768	cmpwi	cr0,r0,0
769	bne-	restore_check_irq_replay
770
771	/*
772	 * Get here when nothing happened while soft-disabled, just
773	 * soft-enable and move-on. We will hard-enable as a side
774	 * effect of rfi
775	 */
776restore_no_replay:
777	TRACE_ENABLE_INTS
778	li	r0,1
779	stb	r0,PACASOFTIRQEN(r13);
780
781	/*
782	 * Final return path. BookE is handled in a different file
783	 */
784do_restore:
785#ifdef CONFIG_PPC_BOOK3E
786	b	exception_return_book3e
787#else
788	/*
789	 * Clear the reservation. If we know the CPU tracks the address of
790	 * the reservation then we can potentially save some cycles and use
791	 * a larx. On POWER6 and POWER7 this is significantly faster.
792	 */
793BEGIN_FTR_SECTION
794	stdcx.	r0,0,r1		/* to clear the reservation */
795FTR_SECTION_ELSE
796	ldarx	r4,0,r1
797ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
798
799	/*
800	 * Some code path such as load_up_fpu or altivec return directly
801	 * here. They run entirely hard disabled and do not alter the
802	 * interrupt state. They also don't use lwarx/stwcx. and thus
803	 * are known not to leave dangling reservations.
804	 */
805	.globl	fast_exception_return
806fast_exception_return:
807	ld	r3,_MSR(r1)
808	ld	r4,_CTR(r1)
809	ld	r0,_LINK(r1)
810	mtctr	r4
811	mtlr	r0
812	ld	r4,_XER(r1)
813	mtspr	SPRN_XER,r4
814
815	REST_8GPRS(5, r1)
816
817	andi.	r0,r3,MSR_RI
818	beq-	unrecov_restore
819
820	/* Load PPR from thread struct before we clear MSR:RI */
821BEGIN_FTR_SECTION
822	ld	r2,PACACURRENT(r13)
823	ld	r2,TASKTHREADPPR(r2)
824END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
825
826	/*
827	 * Clear RI before restoring r13.  If we are returning to
828	 * userspace and we take an exception after restoring r13,
829	 * we end up corrupting the userspace r13 value.
830	 */
831	ld	r4,PACAKMSR(r13) /* Get kernel MSR without EE */
832	andc	r4,r4,r0	 /* r0 contains MSR_RI here */
833	mtmsrd	r4,1
834
835#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
836	/* TM debug */
837	std	r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
838#endif
839	/*
840	 * r13 is our per cpu area, only restore it if we are returning to
841	 * userspace the value stored in the stack frame may belong to
842	 * another CPU.
843	 */
844	andi.	r0,r3,MSR_PR
845	beq	1f
846BEGIN_FTR_SECTION
847	mtspr	SPRN_PPR,r2	/* Restore PPR */
848END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
849	ACCOUNT_CPU_USER_EXIT(r2, r4)
850	REST_GPR(13, r1)
8511:
852	mtspr	SPRN_SRR1,r3
853
854	ld	r2,_CCR(r1)
855	mtcrf	0xFF,r2
856	ld	r2,_NIP(r1)
857	mtspr	SPRN_SRR0,r2
858
859	ld	r0,GPR0(r1)
860	ld	r2,GPR2(r1)
861	ld	r3,GPR3(r1)
862	ld	r4,GPR4(r1)
863	ld	r1,GPR1(r1)
864
865	rfid
866	b	.	/* prevent speculative execution */
867
868#endif /* CONFIG_PPC_BOOK3E */
869
870	/*
871	 * We are returning to a context with interrupts soft disabled.
872	 *
873	 * However, we may also about to hard enable, so we need to
874	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
875	 * or that bit can get out of sync and bad things will happen
876	 */
877restore_irq_off:
878	ld	r3,_MSR(r1)
879	lbz	r7,PACAIRQHAPPENED(r13)
880	andi.	r0,r3,MSR_EE
881	beq	1f
882	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
883	stb	r7,PACAIRQHAPPENED(r13)
8841:	li	r0,0
885	stb	r0,PACASOFTIRQEN(r13);
886	TRACE_DISABLE_INTS
887	b	do_restore
888
889	/*
890	 * Something did happen, check if a re-emit is needed
891	 * (this also clears paca->irq_happened)
892	 */
893restore_check_irq_replay:
894	/* XXX: We could implement a fast path here where we check
895	 * for irq_happened being just 0x01, in which case we can
896	 * clear it and return. That means that we would potentially
897	 * miss a decrementer having wrapped all the way around.
898	 *
899	 * Still, this might be useful for things like hash_page
900	 */
901	bl	__check_irq_replay
902	cmpwi	cr0,r3,0
903 	beq	restore_no_replay
904
905	/*
906	 * We need to re-emit an interrupt. We do so by re-using our
907	 * existing exception frame. We first change the trap value,
908	 * but we need to ensure we preserve the low nibble of it
909	 */
910	ld	r4,_TRAP(r1)
911	clrldi	r4,r4,60
912	or	r4,r4,r3
913	std	r4,_TRAP(r1)
914
915	/*
916	 * Then find the right handler and call it. Interrupts are
917	 * still soft-disabled and we keep them that way.
918	*/
919	cmpwi	cr0,r3,0x500
920	bne	1f
921	addi	r3,r1,STACK_FRAME_OVERHEAD;
922 	bl	do_IRQ
923	b	ret_from_except
9241:	cmpwi	cr0,r3,0xe60
925	bne	1f
926	addi	r3,r1,STACK_FRAME_OVERHEAD;
927	bl	handle_hmi_exception
928	b	ret_from_except
9291:	cmpwi	cr0,r3,0x900
930	bne	1f
931	addi	r3,r1,STACK_FRAME_OVERHEAD;
932	bl	timer_interrupt
933	b	ret_from_except
934#ifdef CONFIG_PPC_DOORBELL
9351:
936#ifdef CONFIG_PPC_BOOK3E
937	cmpwi	cr0,r3,0x280
938#else
939	BEGIN_FTR_SECTION
940		cmpwi	cr0,r3,0xe80
941	FTR_SECTION_ELSE
942		cmpwi	cr0,r3,0xa00
943	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
944#endif /* CONFIG_PPC_BOOK3E */
945	bne	1f
946	addi	r3,r1,STACK_FRAME_OVERHEAD;
947	bl	doorbell_exception
948	b	ret_from_except
949#endif /* CONFIG_PPC_DOORBELL */
9501:	b	ret_from_except /* What else to do here ? */
951
952unrecov_restore:
953	addi	r3,r1,STACK_FRAME_OVERHEAD
954	bl	unrecoverable_exception
955	b	unrecov_restore
956
957#ifdef CONFIG_PPC_RTAS
958/*
959 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
960 * called with the MMU off.
961 *
962 * In addition, we need to be in 32b mode, at least for now.
963 *
964 * Note: r3 is an input parameter to rtas, so don't trash it...
965 */
966_GLOBAL(enter_rtas)
967	mflr	r0
968	std	r0,16(r1)
969        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */
970
971	/* Because RTAS is running in 32b mode, it clobbers the high order half
972	 * of all registers that it saves.  We therefore save those registers
973	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
974   	 */
975	SAVE_GPR(2, r1)			/* Save the TOC */
976	SAVE_GPR(13, r1)		/* Save paca */
977	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
978	SAVE_10GPRS(22, r1)		/* ditto */
979
980	mfcr	r4
981	std	r4,_CCR(r1)
982	mfctr	r5
983	std	r5,_CTR(r1)
984	mfspr	r6,SPRN_XER
985	std	r6,_XER(r1)
986	mfdar	r7
987	std	r7,_DAR(r1)
988	mfdsisr	r8
989	std	r8,_DSISR(r1)
990
991	/* Temporary workaround to clear CR until RTAS can be modified to
992	 * ignore all bits.
993	 */
994	li	r0,0
995	mtcr	r0
996
997#ifdef CONFIG_BUG
998	/* There is no way it is acceptable to get here with interrupts enabled,
999	 * check it with the asm equivalent of WARN_ON
1000	 */
1001	lbz	r0,PACASOFTIRQEN(r13)
10021:	tdnei	r0,0
1003	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1004#endif
1005
1006	/* Hard-disable interrupts */
1007	mfmsr	r6
1008	rldicl	r7,r6,48,1
1009	rotldi	r7,r7,16
1010	mtmsrd	r7,1
1011
1012	/* Unfortunately, the stack pointer and the MSR are also clobbered,
1013	 * so they are saved in the PACA which allows us to restore
1014	 * our original state after RTAS returns.
1015         */
1016	std	r1,PACAR1(r13)
1017        std	r6,PACASAVEDMSR(r13)
1018
1019	/* Setup our real return addr */
1020	LOAD_REG_ADDR(r4,rtas_return_loc)
1021	clrldi	r4,r4,2			/* convert to realmode address */
1022       	mtlr	r4
1023
1024	li	r0,0
1025	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1026	andc	r0,r6,r0
1027
1028        li      r9,1
1029        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1030	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1031	andc	r6,r0,r9
1032	sync				/* disable interrupts so SRR0/1 */
1033	mtmsrd	r0			/* don't get trashed */
1034
1035	LOAD_REG_ADDR(r4, rtas)
1036	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
1037	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
1038
1039	mtspr	SPRN_SRR0,r5
1040	mtspr	SPRN_SRR1,r6
1041	rfid
1042	b	.	/* prevent speculative execution */
1043
1044rtas_return_loc:
1045	FIXUP_ENDIAN
1046
1047	/* relocation is off at this point */
1048	GET_PACA(r4)
1049	clrldi	r4,r4,2			/* convert to realmode address */
1050
1051	bcl	20,31,$+4
10520:	mflr	r3
1053	ld	r3,(1f-0b)(r3)		/* get &rtas_restore_regs */
1054
1055	mfmsr   r6
1056	li	r0,MSR_RI
1057	andc	r6,r6,r0
1058	sync
1059	mtmsrd  r6
1060
1061        ld	r1,PACAR1(r4)           /* Restore our SP */
1062        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */
1063
1064	mtspr	SPRN_SRR0,r3
1065	mtspr	SPRN_SRR1,r4
1066	rfid
1067	b	.	/* prevent speculative execution */
1068
1069	.align	3
10701:	.llong	rtas_restore_regs
1071
1072rtas_restore_regs:
1073	/* relocation is on at this point */
1074	REST_GPR(2, r1)			/* Restore the TOC */
1075	REST_GPR(13, r1)		/* Restore paca */
1076	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
1077	REST_10GPRS(22, r1)		/* ditto */
1078
1079	GET_PACA(r13)
1080
1081	ld	r4,_CCR(r1)
1082	mtcr	r4
1083	ld	r5,_CTR(r1)
1084	mtctr	r5
1085	ld	r6,_XER(r1)
1086	mtspr	SPRN_XER,r6
1087	ld	r7,_DAR(r1)
1088	mtdar	r7
1089	ld	r8,_DSISR(r1)
1090	mtdsisr	r8
1091
1092        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
1093	ld	r0,16(r1)		/* get return address */
1094
1095	mtlr    r0
1096        blr				/* return to caller */
1097
1098#endif /* CONFIG_PPC_RTAS */
1099
1100_GLOBAL(enter_prom)
1101	mflr	r0
1102	std	r0,16(r1)
1103        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */
1104
1105	/* Because PROM is running in 32b mode, it clobbers the high order half
1106	 * of all registers that it saves.  We therefore save those registers
1107	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
1108   	 */
1109	SAVE_GPR(2, r1)
1110	SAVE_GPR(13, r1)
1111	SAVE_8GPRS(14, r1)
1112	SAVE_10GPRS(22, r1)
1113	mfcr	r10
1114	mfmsr	r11
1115	std	r10,_CCR(r1)
1116	std	r11,_MSR(r1)
1117
1118	/* Put PROM address in SRR0 */
1119	mtsrr0	r4
1120
1121	/* Setup our trampoline return addr in LR */
1122	bcl	20,31,$+4
11230:	mflr	r4
1124	addi	r4,r4,(1f - 0b)
1125       	mtlr	r4
1126
1127	/* Prepare a 32-bit mode big endian MSR
1128	 */
1129#ifdef CONFIG_PPC_BOOK3E
1130	rlwinm	r11,r11,0,1,31
1131	mtsrr1	r11
1132	rfi
1133#else /* CONFIG_PPC_BOOK3E */
1134	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1135	andc	r11,r11,r12
1136	mtsrr1	r11
1137	rfid
1138#endif /* CONFIG_PPC_BOOK3E */
1139
11401:	/* Return from OF */
1141	FIXUP_ENDIAN
1142
1143	/* Just make sure that r1 top 32 bits didn't get
1144	 * corrupt by OF
1145	 */
1146	rldicl	r1,r1,0,32
1147
1148	/* Restore the MSR (back to 64 bits) */
1149	ld	r0,_MSR(r1)
1150	MTMSRD(r0)
1151        isync
1152
1153	/* Restore other registers */
1154	REST_GPR(2, r1)
1155	REST_GPR(13, r1)
1156	REST_8GPRS(14, r1)
1157	REST_10GPRS(22, r1)
1158	ld	r4,_CCR(r1)
1159	mtcr	r4
1160
1161        addi	r1,r1,PROM_FRAME_SIZE
1162	ld	r0,16(r1)
1163	mtlr    r0
1164        blr
1165
1166#ifdef CONFIG_FUNCTION_TRACER
1167#ifdef CONFIG_DYNAMIC_FTRACE
1168_GLOBAL(mcount)
1169_GLOBAL(_mcount)
1170	blr
1171
1172_GLOBAL_TOC(ftrace_caller)
1173	/* Taken from output of objdump from lib64/glibc */
1174	mflr	r3
1175	ld	r11, 0(r1)
1176	stdu	r1, -112(r1)
1177	std	r3, 128(r1)
1178	ld	r4, 16(r11)
1179	subi	r3, r3, MCOUNT_INSN_SIZE
1180.globl ftrace_call
1181ftrace_call:
1182	bl	ftrace_stub
1183	nop
1184#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1185.globl ftrace_graph_call
1186ftrace_graph_call:
1187	b	ftrace_graph_stub
1188_GLOBAL(ftrace_graph_stub)
1189#endif
1190	ld	r0, 128(r1)
1191	mtlr	r0
1192	addi	r1, r1, 112
1193_GLOBAL(ftrace_stub)
1194	blr
1195#else
1196_GLOBAL_TOC(_mcount)
1197	/* Taken from output of objdump from lib64/glibc */
1198	mflr	r3
1199	ld	r11, 0(r1)
1200	stdu	r1, -112(r1)
1201	std	r3, 128(r1)
1202	ld	r4, 16(r11)
1203
1204	subi	r3, r3, MCOUNT_INSN_SIZE
1205	LOAD_REG_ADDR(r5,ftrace_trace_function)
1206	ld	r5,0(r5)
1207	ld	r5,0(r5)
1208	mtctr	r5
1209	bctrl
1210	nop
1211
1212
1213#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1214	b	ftrace_graph_caller
1215#endif
1216	ld	r0, 128(r1)
1217	mtlr	r0
1218	addi	r1, r1, 112
1219_GLOBAL(ftrace_stub)
1220	blr
1221
1222#endif /* CONFIG_DYNAMIC_FTRACE */
1223
1224#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1225_GLOBAL(ftrace_graph_caller)
1226	/* load r4 with local address */
1227	ld	r4, 128(r1)
1228	subi	r4, r4, MCOUNT_INSN_SIZE
1229
1230	/* get the parent address */
1231	ld	r11, 112(r1)
1232	addi	r3, r11, 16
1233
1234	bl	prepare_ftrace_return
1235	nop
1236
1237	ld	r0, 128(r1)
1238	mtlr	r0
1239	addi	r1, r1, 112
1240	blr
1241
1242_GLOBAL(return_to_handler)
1243	/* need to save return values */
1244	std	r4,  -24(r1)
1245	std	r3,  -16(r1)
1246	std	r31, -8(r1)
1247	mr	r31, r1
1248	stdu	r1, -112(r1)
1249
1250	bl	ftrace_return_to_handler
1251	nop
1252
1253	/* return value has real return address */
1254	mtlr	r3
1255
1256	ld	r1, 0(r1)
1257	ld	r4,  -24(r1)
1258	ld	r3,  -16(r1)
1259	ld	r31, -8(r1)
1260
1261	/* Jump back to real return address */
1262	blr
1263
1264_GLOBAL(mod_return_to_handler)
1265	/* need to save return values */
1266	std	r4,  -32(r1)
1267	std	r3,  -24(r1)
1268	/* save TOC */
1269	std	r2,  -16(r1)
1270	std	r31, -8(r1)
1271	mr	r31, r1
1272	stdu	r1, -112(r1)
1273
1274	/*
1275	 * We are in a module using the module's TOC.
1276	 * Switch to our TOC to run inside the core kernel.
1277	 */
1278	ld	r2, PACATOC(r13)
1279
1280	bl	ftrace_return_to_handler
1281	nop
1282
1283	/* return value has real return address */
1284	mtlr	r3
1285
1286	ld	r1, 0(r1)
1287	ld	r4,  -32(r1)
1288	ld	r3,  -24(r1)
1289	ld	r2,  -16(r1)
1290	ld	r31, -8(r1)
1291
1292	/* Jump back to real return address */
1293	blr
1294#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1295#endif /* CONFIG_FUNCTION_TRACER */
1296