signal_32.c revision c1cb299ead405f0ac065c4430729549b187e5b32
1/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 *  Derived from "arch/i386/kernel/signal.c"
11 *    Copyright (C) 1991, 1992 Linus Torvalds
12 *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13 *
14 *  This program is free software; you can redistribute it and/or
15 *  modify it under the terms of the GNU General Public License
16 *  as published by the Free Software Foundation; either version
17 *  2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/ptrace.h>
28#ifdef CONFIG_PPC64
29#include <linux/syscalls.h>
30#include <linux/compat.h>
31#else
32#include <linux/wait.h>
33#include <linux/unistd.h>
34#include <linux/stddef.h>
35#include <linux/tty.h>
36#include <linux/binfmts.h>
37#include <linux/freezer.h>
38#endif
39
40#include <asm/uaccess.h>
41#include <asm/cacheflush.h>
42#include <asm/syscalls.h>
43#include <asm/sigcontext.h>
44#include <asm/vdso.h>
45#ifdef CONFIG_PPC64
46#include "ppc32.h"
47#include <asm/unistd.h>
48#else
49#include <asm/ucontext.h>
50#include <asm/pgtable.h>
51#endif
52
53#include "signal.h"
54
55#undef DEBUG_SIG
56
57#ifdef CONFIG_PPC64
58#define sys_sigsuspend	compat_sys_sigsuspend
59#define sys_rt_sigsuspend	compat_sys_rt_sigsuspend
60#define sys_rt_sigreturn	compat_sys_rt_sigreturn
61#define sys_sigaction	compat_sys_sigaction
62#define sys_swapcontext	compat_sys_swapcontext
63#define sys_sigreturn	compat_sys_sigreturn
64
65#define old_sigaction	old_sigaction32
66#define sigcontext	sigcontext32
67#define mcontext	mcontext32
68#define ucontext	ucontext32
69
70/*
71 * Userspace code may pass a ucontext which doesn't include VSX added
72 * at the end.  We need to check for this case.
73 */
74#define UCONTEXTSIZEWITHOUTVSX \
75		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
76
77/*
78 * Returning 0 means we return to userspace via
79 * ret_from_except and thus restore all user
80 * registers from *regs.  This is what we need
81 * to do when a signal has been delivered.
82 */
83
84#define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
85#undef __SIGNAL_FRAMESIZE
86#define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
87#undef ELF_NVRREG
88#define ELF_NVRREG	ELF_NVRREG32
89
90/*
91 * Functions for flipping sigsets (thanks to brain dead generic
92 * implementation that makes things simple for little endian only)
93 */
94static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
95{
96	compat_sigset_t	cset;
97
98	switch (_NSIG_WORDS) {
99	case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
100		cset.sig[7] = set->sig[3] >> 32;
101	case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
102		cset.sig[5] = set->sig[2] >> 32;
103	case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
104		cset.sig[3] = set->sig[1] >> 32;
105	case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
106		cset.sig[1] = set->sig[0] >> 32;
107	}
108	return copy_to_user(uset, &cset, sizeof(*uset));
109}
110
111static inline int get_sigset_t(sigset_t *set,
112			       const compat_sigset_t __user *uset)
113{
114	compat_sigset_t s32;
115
116	if (copy_from_user(&s32, uset, sizeof(*uset)))
117		return -EFAULT;
118
119	/*
120	 * Swap the 2 words of the 64-bit sigset_t (they are stored
121	 * in the "wrong" endian in 32-bit user storage).
122	 */
123	switch (_NSIG_WORDS) {
124	case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
125	case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
126	case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
127	case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
128	}
129	return 0;
130}
131
132static inline int get_old_sigaction(struct k_sigaction *new_ka,
133		struct old_sigaction __user *act)
134{
135	compat_old_sigset_t mask;
136	compat_uptr_t handler, restorer;
137
138	if (get_user(handler, &act->sa_handler) ||
139	    __get_user(restorer, &act->sa_restorer) ||
140	    __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
141	    __get_user(mask, &act->sa_mask))
142		return -EFAULT;
143	new_ka->sa.sa_handler = compat_ptr(handler);
144	new_ka->sa.sa_restorer = compat_ptr(restorer);
145	siginitset(&new_ka->sa.sa_mask, mask);
146	return 0;
147}
148
149#define to_user_ptr(p)		ptr_to_compat(p)
150#define from_user_ptr(p)	compat_ptr(p)
151
152static inline int save_general_regs(struct pt_regs *regs,
153		struct mcontext __user *frame)
154{
155	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
156	int i;
157
158	WARN_ON(!FULL_REGS(regs));
159
160	for (i = 0; i <= PT_RESULT; i ++) {
161		if (i == 14 && !FULL_REGS(regs))
162			i = 32;
163		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
164			return -EFAULT;
165	}
166	return 0;
167}
168
169static inline int restore_general_regs(struct pt_regs *regs,
170		struct mcontext __user *sr)
171{
172	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
173	int i;
174
175	for (i = 0; i <= PT_RESULT; i++) {
176		if ((i == PT_MSR) || (i == PT_SOFTE))
177			continue;
178		if (__get_user(gregs[i], &sr->mc_gregs[i]))
179			return -EFAULT;
180	}
181	return 0;
182}
183
184#else /* CONFIG_PPC64 */
185
186#define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
187
188static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
189{
190	return copy_to_user(uset, set, sizeof(*uset));
191}
192
193static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
194{
195	return copy_from_user(set, uset, sizeof(*uset));
196}
197
198static inline int get_old_sigaction(struct k_sigaction *new_ka,
199		struct old_sigaction __user *act)
200{
201	old_sigset_t mask;
202
203	if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
204			__get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
205			__get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
206		return -EFAULT;
207	__get_user(new_ka->sa.sa_flags, &act->sa_flags);
208	__get_user(mask, &act->sa_mask);
209	siginitset(&new_ka->sa.sa_mask, mask);
210	return 0;
211}
212
213#define to_user_ptr(p)		((unsigned long)(p))
214#define from_user_ptr(p)	((void __user *)(p))
215
216static inline int save_general_regs(struct pt_regs *regs,
217		struct mcontext __user *frame)
218{
219	WARN_ON(!FULL_REGS(regs));
220	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
221}
222
223static inline int restore_general_regs(struct pt_regs *regs,
224		struct mcontext __user *sr)
225{
226	/* copy up to but not including MSR */
227	if (__copy_from_user(regs, &sr->mc_gregs,
228				PT_MSR * sizeof(elf_greg_t)))
229		return -EFAULT;
230	/* copy from orig_r3 (the word after the MSR) up to the end */
231	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
232				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
233		return -EFAULT;
234	return 0;
235}
236
237#endif /* CONFIG_PPC64 */
238
239/*
240 * Atomically swap in the new signal mask, and wait for a signal.
241 */
242long sys_sigsuspend(old_sigset_t mask)
243{
244	mask &= _BLOCKABLE;
245	spin_lock_irq(&current->sighand->siglock);
246	current->saved_sigmask = current->blocked;
247	siginitset(&current->blocked, mask);
248	recalc_sigpending();
249	spin_unlock_irq(&current->sighand->siglock);
250
251 	current->state = TASK_INTERRUPTIBLE;
252 	schedule();
253	set_restore_sigmask();
254 	return -ERESTARTNOHAND;
255}
256
257long sys_sigaction(int sig, struct old_sigaction __user *act,
258		struct old_sigaction __user *oact)
259{
260	struct k_sigaction new_ka, old_ka;
261	int ret;
262
263#ifdef CONFIG_PPC64
264	if (sig < 0)
265		sig = -sig;
266#endif
267
268	if (act) {
269		if (get_old_sigaction(&new_ka, act))
270			return -EFAULT;
271	}
272
273	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
274	if (!ret && oact) {
275		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
276		    __put_user(to_user_ptr(old_ka.sa.sa_handler),
277			    &oact->sa_handler) ||
278		    __put_user(to_user_ptr(old_ka.sa.sa_restorer),
279			    &oact->sa_restorer) ||
280		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
281		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
282			return -EFAULT;
283	}
284
285	return ret;
286}
287
288/*
289 * When we have signals to deliver, we set up on the
290 * user stack, going down from the original stack pointer:
291 *	an ABI gap of 56 words
292 *	an mcontext struct
293 *	a sigcontext struct
294 *	a gap of __SIGNAL_FRAMESIZE bytes
295 *
296 * Each of these things must be a multiple of 16 bytes in size. The following
297 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
298 *
299 */
300struct sigframe {
301	struct sigcontext sctx;		/* the sigcontext */
302	struct mcontext	mctx;		/* all the register values */
303	/*
304	 * Programs using the rs6000/xcoff abi can save up to 19 gp
305	 * regs and 18 fp regs below sp before decrementing it.
306	 */
307	int			abigap[56];
308};
309
310/* We use the mc_pad field for the signal return trampoline. */
311#define tramp	mc_pad
312
313/*
314 *  When we have rt signals to deliver, we set up on the
315 *  user stack, going down from the original stack pointer:
316 *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
317 *	a gap of __SIGNAL_FRAMESIZE+16 bytes
318 *  (the +16 is to get the siginfo and ucontext in the same
319 *  positions as in older kernels).
320 *
321 *  Each of these things must be a multiple of 16 bytes in size.
322 *
323 */
324struct rt_sigframe {
325#ifdef CONFIG_PPC64
326	compat_siginfo_t info;
327#else
328	struct siginfo info;
329#endif
330	struct ucontext	uc;
331	/*
332	 * Programs using the rs6000/xcoff abi can save up to 19 gp
333	 * regs and 18 fp regs below sp before decrementing it.
334	 */
335	int			abigap[56];
336};
337
338#ifdef CONFIG_VSX
339unsigned long copy_fpr_to_user(void __user *to,
340			       struct task_struct *task)
341{
342	double buf[ELF_NFPREG];
343	int i;
344
345	/* save FPR copy to local buffer then write to the thread_struct */
346	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
347		buf[i] = task->thread.TS_FPR(i);
348	memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
349	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
350}
351
352unsigned long copy_fpr_from_user(struct task_struct *task,
353				 void __user *from)
354{
355	double buf[ELF_NFPREG];
356	int i;
357
358	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
359		return 1;
360	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
361		task->thread.TS_FPR(i) = buf[i];
362	memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
363
364	return 0;
365}
366
367unsigned long copy_vsx_to_user(void __user *to,
368			       struct task_struct *task)
369{
370	double buf[ELF_NVSRHALFREG];
371	int i;
372
373	/* save FPR copy to local buffer then write to the thread_struct */
374	for (i = 0; i < ELF_NVSRHALFREG; i++)
375		buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
376	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
377}
378
379unsigned long copy_vsx_from_user(struct task_struct *task,
380				 void __user *from)
381{
382	double buf[ELF_NVSRHALFREG];
383	int i;
384
385	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
386		return 1;
387	for (i = 0; i < ELF_NVSRHALFREG ; i++)
388		task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
389	return 0;
390}
391#else
392inline unsigned long copy_fpr_to_user(void __user *to,
393				      struct task_struct *task)
394{
395	return __copy_to_user(to, task->thread.fpr,
396			      ELF_NFPREG * sizeof(double));
397}
398
399inline unsigned long copy_fpr_from_user(struct task_struct *task,
400					void __user *from)
401{
402	return __copy_from_user(task->thread.fpr, from,
403			      ELF_NFPREG * sizeof(double));
404}
405#endif
406
407/*
408 * Save the current user registers on the user stack.
409 * We only save the altivec/spe registers if the process has used
410 * altivec/spe instructions at some point.
411 */
412static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
413		int sigret)
414{
415	unsigned long msr = regs->msr;
416
417	/* Make sure floating point registers are stored in regs */
418	flush_fp_to_thread(current);
419
420	/* save general registers */
421	if (save_general_regs(regs, frame))
422		return 1;
423
424#ifdef CONFIG_ALTIVEC
425	/* save altivec registers */
426	if (current->thread.used_vr) {
427		flush_altivec_to_thread(current);
428		if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
429				   ELF_NVRREG * sizeof(vector128)))
430			return 1;
431		/* set MSR_VEC in the saved MSR value to indicate that
432		   frame->mc_vregs contains valid data */
433		msr |= MSR_VEC;
434	}
435	/* else assert((regs->msr & MSR_VEC) == 0) */
436
437	/* We always copy to/from vrsave, it's 0 if we don't have or don't
438	 * use altivec. Since VSCR only contains 32 bits saved in the least
439	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
440	 * most significant bits of that same vector. --BenH
441	 */
442	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
443		return 1;
444#endif /* CONFIG_ALTIVEC */
445	if (copy_fpr_to_user(&frame->mc_fregs, current))
446		return 1;
447#ifdef CONFIG_VSX
448	/*
449	 * Copy VSR 0-31 upper half from thread_struct to local
450	 * buffer, then write that to userspace.  Also set MSR_VSX in
451	 * the saved MSR value to indicate that frame->mc_vregs
452	 * contains valid data
453	 */
454	if (current->thread.used_vsr) {
455		flush_vsx_to_thread(current);
456		if (copy_vsx_to_user(&frame->mc_vsregs, current))
457			return 1;
458		msr |= MSR_VSX;
459	}
460#endif /* CONFIG_VSX */
461#ifdef CONFIG_SPE
462	/* save spe registers */
463	if (current->thread.used_spe) {
464		flush_spe_to_thread(current);
465		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
466				   ELF_NEVRREG * sizeof(u32)))
467			return 1;
468		/* set MSR_SPE in the saved MSR value to indicate that
469		   frame->mc_vregs contains valid data */
470		msr |= MSR_SPE;
471	}
472	/* else assert((regs->msr & MSR_SPE) == 0) */
473
474	/* We always copy to/from spefscr */
475	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
476		return 1;
477#endif /* CONFIG_SPE */
478
479	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
480		return 1;
481	if (sigret) {
482		/* Set up the sigreturn trampoline: li r0,sigret; sc */
483		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
484		    || __put_user(0x44000002UL, &frame->tramp[1]))
485			return 1;
486		flush_icache_range((unsigned long) &frame->tramp[0],
487				   (unsigned long) &frame->tramp[2]);
488	}
489
490	return 0;
491}
492
493/*
494 * Restore the current user register values from the user stack,
495 * (except for MSR).
496 */
497static long restore_user_regs(struct pt_regs *regs,
498			      struct mcontext __user *sr, int sig)
499{
500	long err;
501	unsigned int save_r2 = 0;
502	unsigned long msr;
503#ifdef CONFIG_VSX
504	int i;
505#endif
506
507	/*
508	 * restore general registers but not including MSR or SOFTE. Also
509	 * take care of keeping r2 (TLS) intact if not a signal
510	 */
511	if (!sig)
512		save_r2 = (unsigned int)regs->gpr[2];
513	err = restore_general_regs(regs, sr);
514	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
515	if (!sig)
516		regs->gpr[2] = (unsigned long) save_r2;
517	if (err)
518		return 1;
519
520	/* if doing signal return, restore the previous little-endian mode */
521	if (sig)
522		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
523
524	/*
525	 * Do this before updating the thread state in
526	 * current->thread.fpr/vr/evr.  That way, if we get preempted
527	 * and another task grabs the FPU/Altivec/SPE, it won't be
528	 * tempted to save the current CPU state into the thread_struct
529	 * and corrupt what we are writing there.
530	 */
531	discard_lazy_cpu_state();
532
533#ifdef CONFIG_ALTIVEC
534	/*
535	 * Force the process to reload the altivec registers from
536	 * current->thread when it next does altivec instructions
537	 */
538	regs->msr &= ~MSR_VEC;
539	if (msr & MSR_VEC) {
540		/* restore altivec registers from the stack */
541		if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
542				     sizeof(sr->mc_vregs)))
543			return 1;
544	} else if (current->thread.used_vr)
545		memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
546
547	/* Always get VRSAVE back */
548	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
549		return 1;
550#endif /* CONFIG_ALTIVEC */
551	if (copy_fpr_from_user(current, &sr->mc_fregs))
552		return 1;
553
554#ifdef CONFIG_VSX
555	/*
556	 * Force the process to reload the VSX registers from
557	 * current->thread when it next does VSX instruction.
558	 */
559	regs->msr &= ~MSR_VSX;
560	if (msr & MSR_VSX) {
561		/*
562		 * Restore altivec registers from the stack to a local
563		 * buffer, then write this out to the thread_struct
564		 */
565		if (copy_vsx_from_user(current, &sr->mc_vsregs))
566			return 1;
567	} else if (current->thread.used_vsr)
568		for (i = 0; i < 32 ; i++)
569			current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
570#endif /* CONFIG_VSX */
571	/*
572	 * force the process to reload the FP registers from
573	 * current->thread when it next does FP instructions
574	 */
575	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
576
577#ifdef CONFIG_SPE
578	/* force the process to reload the spe registers from
579	   current->thread when it next does spe instructions */
580	regs->msr &= ~MSR_SPE;
581	if (msr & MSR_SPE) {
582		/* restore spe registers from the stack */
583		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
584				     ELF_NEVRREG * sizeof(u32)))
585			return 1;
586	} else if (current->thread.used_spe)
587		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
588
589	/* Always get SPEFSCR back */
590	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
591		return 1;
592#endif /* CONFIG_SPE */
593
594	return 0;
595}
596
597#ifdef CONFIG_PPC64
598long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
599		struct sigaction32 __user *oact, size_t sigsetsize)
600{
601	struct k_sigaction new_ka, old_ka;
602	int ret;
603
604	/* XXX: Don't preclude handling different sized sigset_t's.  */
605	if (sigsetsize != sizeof(compat_sigset_t))
606		return -EINVAL;
607
608	if (act) {
609		compat_uptr_t handler;
610
611		ret = get_user(handler, &act->sa_handler);
612		new_ka.sa.sa_handler = compat_ptr(handler);
613		ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
614		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
615		if (ret)
616			return -EFAULT;
617	}
618
619	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
620	if (!ret && oact) {
621		ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
622		ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
623		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
624	}
625	return ret;
626}
627
628/*
629 * Note: it is necessary to treat how as an unsigned int, with the
630 * corresponding cast to a signed int to insure that the proper
631 * conversion (sign extension) between the register representation
632 * of a signed int (msr in 32-bit mode) and the register representation
633 * of a signed int (msr in 64-bit mode) is performed.
634 */
635long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
636		compat_sigset_t __user *oset, size_t sigsetsize)
637{
638	sigset_t s;
639	sigset_t __user *up;
640	int ret;
641	mm_segment_t old_fs = get_fs();
642
643	if (set) {
644		if (get_sigset_t(&s, set))
645			return -EFAULT;
646	}
647
648	set_fs(KERNEL_DS);
649	/* This is valid because of the set_fs() */
650	up = (sigset_t __user *) &s;
651	ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
652				 sigsetsize);
653	set_fs(old_fs);
654	if (ret)
655		return ret;
656	if (oset) {
657		if (put_sigset_t(oset, &s))
658			return -EFAULT;
659	}
660	return 0;
661}
662
663long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
664{
665	sigset_t s;
666	int ret;
667	mm_segment_t old_fs = get_fs();
668
669	set_fs(KERNEL_DS);
670	/* The __user pointer cast is valid because of the set_fs() */
671	ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
672	set_fs(old_fs);
673	if (!ret) {
674		if (put_sigset_t(set, &s))
675			return -EFAULT;
676	}
677	return ret;
678}
679
680
681int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
682{
683	int err;
684
685	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
686		return -EFAULT;
687
688	/* If you change siginfo_t structure, please be sure
689	 * this code is fixed accordingly.
690	 * It should never copy any pad contained in the structure
691	 * to avoid security leaks, but must copy the generic
692	 * 3 ints plus the relevant union member.
693	 * This routine must convert siginfo from 64bit to 32bit as well
694	 * at the same time.
695	 */
696	err = __put_user(s->si_signo, &d->si_signo);
697	err |= __put_user(s->si_errno, &d->si_errno);
698	err |= __put_user((short)s->si_code, &d->si_code);
699	if (s->si_code < 0)
700		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
701				      SI_PAD_SIZE32);
702	else switch(s->si_code >> 16) {
703	case __SI_CHLD >> 16:
704		err |= __put_user(s->si_pid, &d->si_pid);
705		err |= __put_user(s->si_uid, &d->si_uid);
706		err |= __put_user(s->si_utime, &d->si_utime);
707		err |= __put_user(s->si_stime, &d->si_stime);
708		err |= __put_user(s->si_status, &d->si_status);
709		break;
710	case __SI_FAULT >> 16:
711		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
712				  &d->si_addr);
713		break;
714	case __SI_POLL >> 16:
715		err |= __put_user(s->si_band, &d->si_band);
716		err |= __put_user(s->si_fd, &d->si_fd);
717		break;
718	case __SI_TIMER >> 16:
719		err |= __put_user(s->si_tid, &d->si_tid);
720		err |= __put_user(s->si_overrun, &d->si_overrun);
721		err |= __put_user(s->si_int, &d->si_int);
722		break;
723	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
724	case __SI_MESGQ >> 16:
725		err |= __put_user(s->si_int, &d->si_int);
726		/* fallthrough */
727	case __SI_KILL >> 16:
728	default:
729		err |= __put_user(s->si_pid, &d->si_pid);
730		err |= __put_user(s->si_uid, &d->si_uid);
731		break;
732	}
733	return err;
734}
735
736#define copy_siginfo_to_user	copy_siginfo_to_user32
737
738int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
739{
740	memset(to, 0, sizeof *to);
741
742	if (copy_from_user(to, from, 3*sizeof(int)) ||
743	    copy_from_user(to->_sifields._pad,
744			   from->_sifields._pad, SI_PAD_SIZE32))
745		return -EFAULT;
746
747	return 0;
748}
749
750/*
751 * Note: it is necessary to treat pid and sig as unsigned ints, with the
752 * corresponding cast to a signed int to insure that the proper conversion
753 * (sign extension) between the register representation of a signed int
754 * (msr in 32-bit mode) and the register representation of a signed int
755 * (msr in 64-bit mode) is performed.
756 */
757long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
758{
759	siginfo_t info;
760	int ret;
761	mm_segment_t old_fs = get_fs();
762
763	ret = copy_siginfo_from_user32(&info, uinfo);
764	if (unlikely(ret))
765		return ret;
766
767	set_fs (KERNEL_DS);
768	/* The __user pointer cast is valid becasuse of the set_fs() */
769	ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
770	set_fs (old_fs);
771	return ret;
772}
773/*
774 *  Start Alternate signal stack support
775 *
776 *  System Calls
777 *       sigaltatck               compat_sys_sigaltstack
778 */
779
780int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
781		      int r6, int r7, int r8, struct pt_regs *regs)
782{
783	stack_32_t __user * newstack = compat_ptr(__new);
784	stack_32_t __user * oldstack = compat_ptr(__old);
785	stack_t uss, uoss;
786	int ret;
787	mm_segment_t old_fs;
788	unsigned long sp;
789	compat_uptr_t ss_sp;
790
791	/*
792	 * set sp to the user stack on entry to the system call
793	 * the system call router sets R9 to the saved registers
794	 */
795	sp = regs->gpr[1];
796
797	/* Put new stack info in local 64 bit stack struct */
798	if (newstack) {
799		if (get_user(ss_sp, &newstack->ss_sp) ||
800		    __get_user(uss.ss_flags, &newstack->ss_flags) ||
801		    __get_user(uss.ss_size, &newstack->ss_size))
802			return -EFAULT;
803		uss.ss_sp = compat_ptr(ss_sp);
804	}
805
806	old_fs = get_fs();
807	set_fs(KERNEL_DS);
808	/* The __user pointer casts are valid because of the set_fs() */
809	ret = do_sigaltstack(
810		newstack ? (stack_t __user *) &uss : NULL,
811		oldstack ? (stack_t __user *) &uoss : NULL,
812		sp);
813	set_fs(old_fs);
814	/* Copy the stack information to the user output buffer */
815	if (!ret && oldstack  &&
816		(put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
817		 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
818		 __put_user(uoss.ss_size, &oldstack->ss_size)))
819		return -EFAULT;
820	return ret;
821}
822#endif /* CONFIG_PPC64 */
823
824/*
825 * Set up a signal frame for a "real-time" signal handler
826 * (one which gets siginfo).
827 */
828int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
829		siginfo_t *info, sigset_t *oldset,
830		struct pt_regs *regs)
831{
832	struct rt_sigframe __user *rt_sf;
833	struct mcontext __user *frame;
834	void __user *addr;
835	unsigned long newsp = 0;
836
837	/* Set up Signal Frame */
838	/* Put a Real Time Context onto stack */
839	rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
840	addr = rt_sf;
841	if (unlikely(rt_sf == NULL))
842		goto badframe;
843
844	/* Put the siginfo & fill in most of the ucontext */
845	if (copy_siginfo_to_user(&rt_sf->info, info)
846	    || __put_user(0, &rt_sf->uc.uc_flags)
847	    || __put_user(0, &rt_sf->uc.uc_link)
848	    || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
849	    || __put_user(sas_ss_flags(regs->gpr[1]),
850			  &rt_sf->uc.uc_stack.ss_flags)
851	    || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
852	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
853		    &rt_sf->uc.uc_regs)
854	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
855		goto badframe;
856
857	/* Save user registers on the stack */
858	frame = &rt_sf->uc.uc_mcontext;
859	addr = frame;
860	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
861		if (save_user_regs(regs, frame, 0))
862			goto badframe;
863		regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
864	} else {
865		if (save_user_regs(regs, frame, __NR_rt_sigreturn))
866			goto badframe;
867		regs->link = (unsigned long) frame->tramp;
868	}
869
870	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
871
872	/* create a stack frame for the caller of the handler */
873	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
874	addr = (void __user *)regs->gpr[1];
875	if (put_user(regs->gpr[1], (u32 __user *)newsp))
876		goto badframe;
877
878	/* Fill registers for signal handler */
879	regs->gpr[1] = newsp;
880	regs->gpr[3] = sig;
881	regs->gpr[4] = (unsigned long) &rt_sf->info;
882	regs->gpr[5] = (unsigned long) &rt_sf->uc;
883	regs->gpr[6] = (unsigned long) rt_sf;
884	regs->nip = (unsigned long) ka->sa.sa_handler;
885	/* enter the signal handler in big-endian mode */
886	regs->msr &= ~MSR_LE;
887	regs->trap = 0;
888	return 1;
889
890badframe:
891#ifdef DEBUG_SIG
892	printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
893	       regs, frame, newsp);
894#endif
895	if (show_unhandled_signals && printk_ratelimit())
896		printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: "
897			"%p nip %08lx lr %08lx\n",
898			current->comm, current->pid,
899			addr, regs->nip, regs->link);
900
901	force_sigsegv(sig, current);
902	return 0;
903}
904
905static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
906{
907	sigset_t set;
908	struct mcontext __user *mcp;
909
910	if (get_sigset_t(&set, &ucp->uc_sigmask))
911		return -EFAULT;
912#ifdef CONFIG_PPC64
913	{
914		u32 cmcp;
915
916		if (__get_user(cmcp, &ucp->uc_regs))
917			return -EFAULT;
918		mcp = (struct mcontext __user *)(u64)cmcp;
919		/* no need to check access_ok(mcp), since mcp < 4GB */
920	}
921#else
922	if (__get_user(mcp, &ucp->uc_regs))
923		return -EFAULT;
924	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
925		return -EFAULT;
926#endif
927	restore_sigmask(&set);
928	if (restore_user_regs(regs, mcp, sig))
929		return -EFAULT;
930
931	return 0;
932}
933
934long sys_swapcontext(struct ucontext __user *old_ctx,
935		     struct ucontext __user *new_ctx,
936		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
937{
938	unsigned char tmp;
939
940#ifdef CONFIG_PPC64
941	unsigned long new_msr = 0;
942
943	if (new_ctx &&
944	    __get_user(new_msr, &new_ctx->uc_mcontext.mc_gregs[PT_MSR]))
945		return -EFAULT;
946	/*
947	 * Check that the context is not smaller than the original
948	 * size (with VMX but without VSX)
949	 */
950	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
951		return -EINVAL;
952	/*
953	 * If the new context state sets the MSR VSX bits but
954	 * it doesn't provide VSX state.
955	 */
956	if ((ctx_size < sizeof(struct ucontext)) &&
957	    (new_msr & MSR_VSX))
958		return -EINVAL;
959#ifdef CONFIG_VSX
960	/*
961	 * If userspace doesn't provide enough room for VSX data,
962	 * but current thread has used VSX, we don't have anywhere
963	 * to store the full context back into.
964	 */
965	if ((ctx_size < sizeof(struct ucontext)) &&
966	    (current->thread.used_vsr && old_ctx))
967		return -EINVAL;
968#endif
969#else
970	/* Context size is for future use. Right now, we only make sure
971	 * we are passed something we understand
972	 */
973	if (ctx_size < sizeof(struct ucontext))
974		return -EINVAL;
975#endif
976	if (old_ctx != NULL) {
977		struct mcontext __user *mctx;
978
979		/*
980		 * old_ctx might not be 16-byte aligned, in which
981		 * case old_ctx->uc_mcontext won't be either.
982		 * Because we have the old_ctx->uc_pad2 field
983		 * before old_ctx->uc_mcontext, we need to round down
984		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
985		 */
986		mctx = (struct mcontext __user *)
987			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
988		if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
989		    || save_user_regs(regs, mctx, 0)
990		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
991		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
992			return -EFAULT;
993	}
994	if (new_ctx == NULL)
995		return 0;
996	if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
997	    || __get_user(tmp, (u8 __user *) new_ctx)
998	    || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
999		return -EFAULT;
1000
1001	/*
1002	 * If we get a fault copying the context into the kernel's
1003	 * image of the user's registers, we can't just return -EFAULT
1004	 * because the user's registers will be corrupted.  For instance
1005	 * the NIP value may have been updated but not some of the
1006	 * other registers.  Given that we have done the access_ok
1007	 * and successfully read the first and last bytes of the region
1008	 * above, this should only happen in an out-of-memory situation
1009	 * or if another thread unmaps the region containing the context.
1010	 * We kill the task with a SIGSEGV in this situation.
1011	 */
1012	if (do_setcontext(new_ctx, regs, 0))
1013		do_exit(SIGSEGV);
1014
1015	set_thread_flag(TIF_RESTOREALL);
1016	return 0;
1017}
1018
1019long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1020		     struct pt_regs *regs)
1021{
1022	struct rt_sigframe __user *rt_sf;
1023
1024	/* Always make any pending restarted system calls return -EINTR */
1025	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1026
1027	rt_sf = (struct rt_sigframe __user *)
1028		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1029	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1030		goto bad;
1031	if (do_setcontext(&rt_sf->uc, regs, 1))
1032		goto bad;
1033
1034	/*
1035	 * It's not clear whether or why it is desirable to save the
1036	 * sigaltstack setting on signal delivery and restore it on
1037	 * signal return.  But other architectures do this and we have
1038	 * always done it up until now so it is probably better not to
1039	 * change it.  -- paulus
1040	 */
1041#ifdef CONFIG_PPC64
1042	/*
1043	 * We use the compat_sys_ version that does the 32/64 bits conversion
1044	 * and takes userland pointer directly. What about error checking ?
1045	 * nobody does any...
1046	 */
1047	compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
1048#else
1049	do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
1050#endif
1051	set_thread_flag(TIF_RESTOREALL);
1052	return 0;
1053
1054 bad:
1055	if (show_unhandled_signals && printk_ratelimit())
1056		printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: "
1057			"%p nip %08lx lr %08lx\n",
1058			current->comm, current->pid,
1059			rt_sf, regs->nip, regs->link);
1060
1061	force_sig(SIGSEGV, current);
1062	return 0;
1063}
1064
1065#ifdef CONFIG_PPC32
1066int sys_debug_setcontext(struct ucontext __user *ctx,
1067			 int ndbg, struct sig_dbg_op __user *dbg,
1068			 int r6, int r7, int r8,
1069			 struct pt_regs *regs)
1070{
1071	struct sig_dbg_op op;
1072	int i;
1073	unsigned char tmp;
1074	unsigned long new_msr = regs->msr;
1075#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1076	unsigned long new_dbcr0 = current->thread.dbcr0;
1077#endif
1078
1079	for (i=0; i<ndbg; i++) {
1080		if (copy_from_user(&op, dbg + i, sizeof(op)))
1081			return -EFAULT;
1082		switch (op.dbg_type) {
1083		case SIG_DBG_SINGLE_STEPPING:
1084#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1085			if (op.dbg_value) {
1086				new_msr |= MSR_DE;
1087				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1088			} else {
1089				new_msr &= ~MSR_DE;
1090				new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
1091			}
1092#else
1093			if (op.dbg_value)
1094				new_msr |= MSR_SE;
1095			else
1096				new_msr &= ~MSR_SE;
1097#endif
1098			break;
1099		case SIG_DBG_BRANCH_TRACING:
1100#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1101			return -EINVAL;
1102#else
1103			if (op.dbg_value)
1104				new_msr |= MSR_BE;
1105			else
1106				new_msr &= ~MSR_BE;
1107#endif
1108			break;
1109
1110		default:
1111			return -EINVAL;
1112		}
1113	}
1114
1115	/* We wait until here to actually install the values in the
1116	   registers so if we fail in the above loop, it will not
1117	   affect the contents of these registers.  After this point,
1118	   failure is a problem, anyway, and it's very unlikely unless
1119	   the user is really doing something wrong. */
1120	regs->msr = new_msr;
1121#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1122	current->thread.dbcr0 = new_dbcr0;
1123#endif
1124
1125	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1126	    || __get_user(tmp, (u8 __user *) ctx)
1127	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1128		return -EFAULT;
1129
1130	/*
1131	 * If we get a fault copying the context into the kernel's
1132	 * image of the user's registers, we can't just return -EFAULT
1133	 * because the user's registers will be corrupted.  For instance
1134	 * the NIP value may have been updated but not some of the
1135	 * other registers.  Given that we have done the access_ok
1136	 * and successfully read the first and last bytes of the region
1137	 * above, this should only happen in an out-of-memory situation
1138	 * or if another thread unmaps the region containing the context.
1139	 * We kill the task with a SIGSEGV in this situation.
1140	 */
1141	if (do_setcontext(ctx, regs, 1)) {
1142		if (show_unhandled_signals && printk_ratelimit())
1143			printk(KERN_INFO "%s[%d]: bad frame in "
1144				"sys_debug_setcontext: %p nip %08lx "
1145				"lr %08lx\n",
1146				current->comm, current->pid,
1147				ctx, regs->nip, regs->link);
1148
1149		force_sig(SIGSEGV, current);
1150		goto out;
1151	}
1152
1153	/*
1154	 * It's not clear whether or why it is desirable to save the
1155	 * sigaltstack setting on signal delivery and restore it on
1156	 * signal return.  But other architectures do this and we have
1157	 * always done it up until now so it is probably better not to
1158	 * change it.  -- paulus
1159	 */
1160	do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1161
1162	set_thread_flag(TIF_RESTOREALL);
1163 out:
1164	return 0;
1165}
1166#endif
1167
1168/*
1169 * OK, we're invoking a handler
1170 */
1171int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1172		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1173{
1174	struct sigcontext __user *sc;
1175	struct sigframe __user *frame;
1176	unsigned long newsp = 0;
1177
1178	/* Set up Signal Frame */
1179	frame = get_sigframe(ka, regs, sizeof(*frame));
1180	if (unlikely(frame == NULL))
1181		goto badframe;
1182	sc = (struct sigcontext __user *) &frame->sctx;
1183
1184#if _NSIG != 64
1185#error "Please adjust handle_signal()"
1186#endif
1187	if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1188	    || __put_user(oldset->sig[0], &sc->oldmask)
1189#ifdef CONFIG_PPC64
1190	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1191#else
1192	    || __put_user(oldset->sig[1], &sc->_unused[3])
1193#endif
1194	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1195	    || __put_user(sig, &sc->signal))
1196		goto badframe;
1197
1198	if (vdso32_sigtramp && current->mm->context.vdso_base) {
1199		if (save_user_regs(regs, &frame->mctx, 0))
1200			goto badframe;
1201		regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1202	} else {
1203		if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
1204			goto badframe;
1205		regs->link = (unsigned long) frame->mctx.tramp;
1206	}
1207
1208	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
1209
1210	/* create a stack frame for the caller of the handler */
1211	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1212	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1213		goto badframe;
1214
1215	regs->gpr[1] = newsp;
1216	regs->gpr[3] = sig;
1217	regs->gpr[4] = (unsigned long) sc;
1218	regs->nip = (unsigned long) ka->sa.sa_handler;
1219	/* enter the signal handler in big-endian mode */
1220	regs->msr &= ~MSR_LE;
1221	regs->trap = 0;
1222
1223	return 1;
1224
1225badframe:
1226#ifdef DEBUG_SIG
1227	printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1228	       regs, frame, newsp);
1229#endif
1230	if (show_unhandled_signals && printk_ratelimit())
1231		printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: "
1232			"%p nip %08lx lr %08lx\n",
1233			current->comm, current->pid,
1234			frame, regs->nip, regs->link);
1235
1236	force_sigsegv(sig, current);
1237	return 0;
1238}
1239
1240/*
1241 * Do a signal return; undo the signal stack.
1242 */
1243long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1244		       struct pt_regs *regs)
1245{
1246	struct sigcontext __user *sc;
1247	struct sigcontext sigctx;
1248	struct mcontext __user *sr;
1249	void __user *addr;
1250	sigset_t set;
1251
1252	/* Always make any pending restarted system calls return -EINTR */
1253	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1254
1255	sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1256	addr = sc;
1257	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1258		goto badframe;
1259
1260#ifdef CONFIG_PPC64
1261	/*
1262	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1263	 * unused part of the signal stackframe
1264	 */
1265	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1266#else
1267	set.sig[0] = sigctx.oldmask;
1268	set.sig[1] = sigctx._unused[3];
1269#endif
1270	restore_sigmask(&set);
1271
1272	sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1273	addr = sr;
1274	if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1275	    || restore_user_regs(regs, sr, 1))
1276		goto badframe;
1277
1278	set_thread_flag(TIF_RESTOREALL);
1279	return 0;
1280
1281badframe:
1282	if (show_unhandled_signals && printk_ratelimit())
1283		printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: "
1284			"%p nip %08lx lr %08lx\n",
1285			current->comm, current->pid,
1286			addr, regs->nip, regs->link);
1287
1288	force_sig(SIGSEGV, current);
1289	return 0;
1290}
1291