signal_32.c revision 77eb50aefa5dd2337246dce8b66e18e837c1a8bc
1/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 *  Derived from "arch/i386/kernel/signal.c"
11 *    Copyright (C) 1991, 1992 Linus Torvalds
12 *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13 *
14 *  This program is free software; you can redistribute it and/or
15 *  modify it under the terms of the GNU General Public License
16 *  as published by the Free Software Foundation; either version
17 *  2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/ptrace.h>
28#ifdef CONFIG_PPC64
29#include <linux/syscalls.h>
30#include <linux/compat.h>
31#else
32#include <linux/wait.h>
33#include <linux/unistd.h>
34#include <linux/stddef.h>
35#include <linux/tty.h>
36#include <linux/binfmts.h>
37#include <linux/freezer.h>
38#endif
39
40#include <asm/uaccess.h>
41#include <asm/cacheflush.h>
42#include <asm/syscalls.h>
43#include <asm/sigcontext.h>
44#include <asm/vdso.h>
45#ifdef CONFIG_PPC64
46#include "ppc32.h"
47#include <asm/unistd.h>
48#else
49#include <asm/ucontext.h>
50#include <asm/pgtable.h>
51#endif
52
53#include "signal.h"
54
55#undef DEBUG_SIG
56
57#ifdef CONFIG_PPC64
58#define sys_sigsuspend	compat_sys_sigsuspend
59#define sys_rt_sigsuspend	compat_sys_rt_sigsuspend
60#define sys_rt_sigreturn	compat_sys_rt_sigreturn
61#define sys_sigaction	compat_sys_sigaction
62#define sys_swapcontext	compat_sys_swapcontext
63#define sys_sigreturn	compat_sys_sigreturn
64
65#define old_sigaction	old_sigaction32
66#define sigcontext	sigcontext32
67#define mcontext	mcontext32
68#define ucontext	ucontext32
69
70/*
71 * Userspace code may pass a ucontext which doesn't include VSX added
72 * at the end.  We need to check for this case.
73 */
74#define UCONTEXTSIZEWITHOUTVSX \
75		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
76
77/*
78 * Returning 0 means we return to userspace via
79 * ret_from_except and thus restore all user
80 * registers from *regs.  This is what we need
81 * to do when a signal has been delivered.
82 */
83
84#define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
85#undef __SIGNAL_FRAMESIZE
86#define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
87#undef ELF_NVRREG
88#define ELF_NVRREG	ELF_NVRREG32
89
90/*
91 * Functions for flipping sigsets (thanks to brain dead generic
92 * implementation that makes things simple for little endian only)
93 */
94static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
95{
96	compat_sigset_t	cset;
97
98	switch (_NSIG_WORDS) {
99	case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
100		cset.sig[7] = set->sig[3] >> 32;
101	case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
102		cset.sig[5] = set->sig[2] >> 32;
103	case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
104		cset.sig[3] = set->sig[1] >> 32;
105	case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
106		cset.sig[1] = set->sig[0] >> 32;
107	}
108	return copy_to_user(uset, &cset, sizeof(*uset));
109}
110
111static inline int get_sigset_t(sigset_t *set,
112			       const compat_sigset_t __user *uset)
113{
114	compat_sigset_t s32;
115
116	if (copy_from_user(&s32, uset, sizeof(*uset)))
117		return -EFAULT;
118
119	/*
120	 * Swap the 2 words of the 64-bit sigset_t (they are stored
121	 * in the "wrong" endian in 32-bit user storage).
122	 */
123	switch (_NSIG_WORDS) {
124	case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
125	case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
126	case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
127	case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
128	}
129	return 0;
130}
131
132static inline int get_old_sigaction(struct k_sigaction *new_ka,
133		struct old_sigaction __user *act)
134{
135	compat_old_sigset_t mask;
136	compat_uptr_t handler, restorer;
137
138	if (get_user(handler, &act->sa_handler) ||
139	    __get_user(restorer, &act->sa_restorer) ||
140	    __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
141	    __get_user(mask, &act->sa_mask))
142		return -EFAULT;
143	new_ka->sa.sa_handler = compat_ptr(handler);
144	new_ka->sa.sa_restorer = compat_ptr(restorer);
145	siginitset(&new_ka->sa.sa_mask, mask);
146	return 0;
147}
148
149#define to_user_ptr(p)		ptr_to_compat(p)
150#define from_user_ptr(p)	compat_ptr(p)
151
152static inline int save_general_regs(struct pt_regs *regs,
153		struct mcontext __user *frame)
154{
155	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
156	int i;
157
158	WARN_ON(!FULL_REGS(regs));
159
160	for (i = 0; i <= PT_RESULT; i ++) {
161		if (i == 14 && !FULL_REGS(regs))
162			i = 32;
163		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
164			return -EFAULT;
165	}
166	return 0;
167}
168
169static inline int restore_general_regs(struct pt_regs *regs,
170		struct mcontext __user *sr)
171{
172	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
173	int i;
174
175	for (i = 0; i <= PT_RESULT; i++) {
176		if ((i == PT_MSR) || (i == PT_SOFTE))
177			continue;
178		if (__get_user(gregs[i], &sr->mc_gregs[i]))
179			return -EFAULT;
180	}
181	return 0;
182}
183
184#else /* CONFIG_PPC64 */
185
186#define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
187
188static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
189{
190	return copy_to_user(uset, set, sizeof(*uset));
191}
192
193static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
194{
195	return copy_from_user(set, uset, sizeof(*uset));
196}
197
198static inline int get_old_sigaction(struct k_sigaction *new_ka,
199		struct old_sigaction __user *act)
200{
201	old_sigset_t mask;
202
203	if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
204			__get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
205			__get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
206		return -EFAULT;
207	__get_user(new_ka->sa.sa_flags, &act->sa_flags);
208	__get_user(mask, &act->sa_mask);
209	siginitset(&new_ka->sa.sa_mask, mask);
210	return 0;
211}
212
213#define to_user_ptr(p)		((unsigned long)(p))
214#define from_user_ptr(p)	((void __user *)(p))
215
216static inline int save_general_regs(struct pt_regs *regs,
217		struct mcontext __user *frame)
218{
219	WARN_ON(!FULL_REGS(regs));
220	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
221}
222
223static inline int restore_general_regs(struct pt_regs *regs,
224		struct mcontext __user *sr)
225{
226	/* copy up to but not including MSR */
227	if (__copy_from_user(regs, &sr->mc_gregs,
228				PT_MSR * sizeof(elf_greg_t)))
229		return -EFAULT;
230	/* copy from orig_r3 (the word after the MSR) up to the end */
231	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
232				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
233		return -EFAULT;
234	return 0;
235}
236
237#endif /* CONFIG_PPC64 */
238
239/*
240 * Atomically swap in the new signal mask, and wait for a signal.
241 */
242long sys_sigsuspend(old_sigset_t mask)
243{
244	mask &= _BLOCKABLE;
245	spin_lock_irq(&current->sighand->siglock);
246	current->saved_sigmask = current->blocked;
247	siginitset(&current->blocked, mask);
248	recalc_sigpending();
249	spin_unlock_irq(&current->sighand->siglock);
250
251 	current->state = TASK_INTERRUPTIBLE;
252 	schedule();
253	set_restore_sigmask();
254 	return -ERESTARTNOHAND;
255}
256
257long sys_sigaction(int sig, struct old_sigaction __user *act,
258		struct old_sigaction __user *oact)
259{
260	struct k_sigaction new_ka, old_ka;
261	int ret;
262
263#ifdef CONFIG_PPC64
264	if (sig < 0)
265		sig = -sig;
266#endif
267
268	if (act) {
269		if (get_old_sigaction(&new_ka, act))
270			return -EFAULT;
271	}
272
273	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
274	if (!ret && oact) {
275		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
276		    __put_user(to_user_ptr(old_ka.sa.sa_handler),
277			    &oact->sa_handler) ||
278		    __put_user(to_user_ptr(old_ka.sa.sa_restorer),
279			    &oact->sa_restorer) ||
280		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
281		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
282			return -EFAULT;
283	}
284
285	return ret;
286}
287
288/*
289 * When we have signals to deliver, we set up on the
290 * user stack, going down from the original stack pointer:
291 *	an ABI gap of 56 words
292 *	an mcontext struct
293 *	a sigcontext struct
294 *	a gap of __SIGNAL_FRAMESIZE bytes
295 *
296 * Each of these things must be a multiple of 16 bytes in size. The following
297 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
298 *
299 */
300struct sigframe {
301	struct sigcontext sctx;		/* the sigcontext */
302	struct mcontext	mctx;		/* all the register values */
303	/*
304	 * Programs using the rs6000/xcoff abi can save up to 19 gp
305	 * regs and 18 fp regs below sp before decrementing it.
306	 */
307	int			abigap[56];
308};
309
310/* We use the mc_pad field for the signal return trampoline. */
311#define tramp	mc_pad
312
313/*
314 *  When we have rt signals to deliver, we set up on the
315 *  user stack, going down from the original stack pointer:
316 *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
317 *	a gap of __SIGNAL_FRAMESIZE+16 bytes
318 *  (the +16 is to get the siginfo and ucontext in the same
319 *  positions as in older kernels).
320 *
321 *  Each of these things must be a multiple of 16 bytes in size.
322 *
323 */
324struct rt_sigframe {
325#ifdef CONFIG_PPC64
326	compat_siginfo_t info;
327#else
328	struct siginfo info;
329#endif
330	struct ucontext	uc;
331	/*
332	 * Programs using the rs6000/xcoff abi can save up to 19 gp
333	 * regs and 18 fp regs below sp before decrementing it.
334	 */
335	int			abigap[56];
336};
337
338#ifdef CONFIG_VSX
339unsigned long copy_fpr_to_user(void __user *to,
340			       struct task_struct *task)
341{
342	double buf[ELF_NFPREG];
343	int i;
344
345	/* save FPR copy to local buffer then write to the thread_struct */
346	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
347		buf[i] = task->thread.TS_FPR(i);
348	memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
349	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
350}
351
352unsigned long copy_fpr_from_user(struct task_struct *task,
353				 void __user *from)
354{
355	double buf[ELF_NFPREG];
356	int i;
357
358	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
359		return 1;
360	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
361		task->thread.TS_FPR(i) = buf[i];
362	memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
363
364	return 0;
365}
366
367unsigned long copy_vsx_to_user(void __user *to,
368			       struct task_struct *task)
369{
370	double buf[ELF_NVSRHALFREG];
371	int i;
372
373	/* save FPR copy to local buffer then write to the thread_struct */
374	for (i = 0; i < ELF_NVSRHALFREG; i++)
375		buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
376	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
377}
378
379unsigned long copy_vsx_from_user(struct task_struct *task,
380				 void __user *from)
381{
382	double buf[ELF_NVSRHALFREG];
383	int i;
384
385	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
386		return 1;
387	for (i = 0; i < ELF_NVSRHALFREG ; i++)
388		task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
389	return 0;
390}
391#else
392inline unsigned long copy_fpr_to_user(void __user *to,
393				      struct task_struct *task)
394{
395	return __copy_to_user(to, task->thread.fpr,
396			      ELF_NFPREG * sizeof(double));
397}
398
399inline unsigned long copy_fpr_from_user(struct task_struct *task,
400					void __user *from)
401{
402	return __copy_from_user(task->thread.fpr, from,
403			      ELF_NFPREG * sizeof(double));
404}
405#endif
406
407/*
408 * Save the current user registers on the user stack.
409 * We only save the altivec/spe registers if the process has used
410 * altivec/spe instructions at some point.
411 */
412static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
413		int sigret, int ctx_has_vsx_region)
414{
415	unsigned long msr = regs->msr;
416
417	/* Make sure floating point registers are stored in regs */
418	flush_fp_to_thread(current);
419
420	/* save general registers */
421	if (save_general_regs(regs, frame))
422		return 1;
423
424#ifdef CONFIG_ALTIVEC
425	/* save altivec registers */
426	if (current->thread.used_vr) {
427		flush_altivec_to_thread(current);
428		if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
429				   ELF_NVRREG * sizeof(vector128)))
430			return 1;
431		/* set MSR_VEC in the saved MSR value to indicate that
432		   frame->mc_vregs contains valid data */
433		msr |= MSR_VEC;
434	}
435	/* else assert((regs->msr & MSR_VEC) == 0) */
436
437	/* We always copy to/from vrsave, it's 0 if we don't have or don't
438	 * use altivec. Since VSCR only contains 32 bits saved in the least
439	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
440	 * most significant bits of that same vector. --BenH
441	 */
442	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
443		return 1;
444#endif /* CONFIG_ALTIVEC */
445	if (copy_fpr_to_user(&frame->mc_fregs, current))
446		return 1;
447#ifdef CONFIG_VSX
448	/*
449	 * Copy VSR 0-31 upper half from thread_struct to local
450	 * buffer, then write that to userspace.  Also set MSR_VSX in
451	 * the saved MSR value to indicate that frame->mc_vregs
452	 * contains valid data
453	 */
454	if (current->thread.used_vsr && ctx_has_vsx_region) {
455		__giveup_vsx(current);
456		if (copy_vsx_to_user(&frame->mc_vsregs, current))
457			return 1;
458		msr |= MSR_VSX;
459	}
460#endif /* CONFIG_VSX */
461#ifdef CONFIG_SPE
462	/* save spe registers */
463	if (current->thread.used_spe) {
464		flush_spe_to_thread(current);
465		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
466				   ELF_NEVRREG * sizeof(u32)))
467			return 1;
468		/* set MSR_SPE in the saved MSR value to indicate that
469		   frame->mc_vregs contains valid data */
470		msr |= MSR_SPE;
471	}
472	/* else assert((regs->msr & MSR_SPE) == 0) */
473
474	/* We always copy to/from spefscr */
475	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
476		return 1;
477#endif /* CONFIG_SPE */
478
479	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
480		return 1;
481	if (sigret) {
482		/* Set up the sigreturn trampoline: li r0,sigret; sc */
483		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
484		    || __put_user(0x44000002UL, &frame->tramp[1]))
485			return 1;
486		flush_icache_range((unsigned long) &frame->tramp[0],
487				   (unsigned long) &frame->tramp[2]);
488	}
489
490	return 0;
491}
492
493/*
494 * Restore the current user register values from the user stack,
495 * (except for MSR).
496 */
497static long restore_user_regs(struct pt_regs *regs,
498			      struct mcontext __user *sr, int sig)
499{
500	long err;
501	unsigned int save_r2 = 0;
502	unsigned long msr;
503#ifdef CONFIG_VSX
504	int i;
505#endif
506
507	/*
508	 * restore general registers but not including MSR or SOFTE. Also
509	 * take care of keeping r2 (TLS) intact if not a signal
510	 */
511	if (!sig)
512		save_r2 = (unsigned int)regs->gpr[2];
513	err = restore_general_regs(regs, sr);
514	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
515	if (!sig)
516		regs->gpr[2] = (unsigned long) save_r2;
517	if (err)
518		return 1;
519
520	/* if doing signal return, restore the previous little-endian mode */
521	if (sig)
522		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
523
524	/*
525	 * Do this before updating the thread state in
526	 * current->thread.fpr/vr/evr.  That way, if we get preempted
527	 * and another task grabs the FPU/Altivec/SPE, it won't be
528	 * tempted to save the current CPU state into the thread_struct
529	 * and corrupt what we are writing there.
530	 */
531	discard_lazy_cpu_state();
532
533#ifdef CONFIG_ALTIVEC
534	/*
535	 * Force the process to reload the altivec registers from
536	 * current->thread when it next does altivec instructions
537	 */
538	regs->msr &= ~MSR_VEC;
539	if (msr & MSR_VEC) {
540		/* restore altivec registers from the stack */
541		if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
542				     sizeof(sr->mc_vregs)))
543			return 1;
544	} else if (current->thread.used_vr)
545		memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
546
547	/* Always get VRSAVE back */
548	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
549		return 1;
550#endif /* CONFIG_ALTIVEC */
551	if (copy_fpr_from_user(current, &sr->mc_fregs))
552		return 1;
553
554#ifdef CONFIG_VSX
555	/*
556	 * Force the process to reload the VSX registers from
557	 * current->thread when it next does VSX instruction.
558	 */
559	regs->msr &= ~MSR_VSX;
560	if (msr & MSR_VSX) {
561		/*
562		 * Restore altivec registers from the stack to a local
563		 * buffer, then write this out to the thread_struct
564		 */
565		if (copy_vsx_from_user(current, &sr->mc_vsregs))
566			return 1;
567	} else if (current->thread.used_vsr)
568		for (i = 0; i < 32 ; i++)
569			current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
570#endif /* CONFIG_VSX */
571	/*
572	 * force the process to reload the FP registers from
573	 * current->thread when it next does FP instructions
574	 */
575	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
576
577#ifdef CONFIG_SPE
578	/* force the process to reload the spe registers from
579	   current->thread when it next does spe instructions */
580	regs->msr &= ~MSR_SPE;
581	if (msr & MSR_SPE) {
582		/* restore spe registers from the stack */
583		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
584				     ELF_NEVRREG * sizeof(u32)))
585			return 1;
586	} else if (current->thread.used_spe)
587		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
588
589	/* Always get SPEFSCR back */
590	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
591		return 1;
592#endif /* CONFIG_SPE */
593
594	return 0;
595}
596
597#ifdef CONFIG_PPC64
598long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
599		struct sigaction32 __user *oact, size_t sigsetsize)
600{
601	struct k_sigaction new_ka, old_ka;
602	int ret;
603
604	/* XXX: Don't preclude handling different sized sigset_t's.  */
605	if (sigsetsize != sizeof(compat_sigset_t))
606		return -EINVAL;
607
608	if (act) {
609		compat_uptr_t handler;
610
611		ret = get_user(handler, &act->sa_handler);
612		new_ka.sa.sa_handler = compat_ptr(handler);
613		ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
614		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
615		if (ret)
616			return -EFAULT;
617	}
618
619	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
620	if (!ret && oact) {
621		ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
622		ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
623		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
624	}
625	return ret;
626}
627
628/*
629 * Note: it is necessary to treat how as an unsigned int, with the
630 * corresponding cast to a signed int to insure that the proper
631 * conversion (sign extension) between the register representation
632 * of a signed int (msr in 32-bit mode) and the register representation
633 * of a signed int (msr in 64-bit mode) is performed.
634 */
635long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
636		compat_sigset_t __user *oset, size_t sigsetsize)
637{
638	sigset_t s;
639	sigset_t __user *up;
640	int ret;
641	mm_segment_t old_fs = get_fs();
642
643	if (set) {
644		if (get_sigset_t(&s, set))
645			return -EFAULT;
646	}
647
648	set_fs(KERNEL_DS);
649	/* This is valid because of the set_fs() */
650	up = (sigset_t __user *) &s;
651	ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
652				 sigsetsize);
653	set_fs(old_fs);
654	if (ret)
655		return ret;
656	if (oset) {
657		if (put_sigset_t(oset, &s))
658			return -EFAULT;
659	}
660	return 0;
661}
662
663long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
664{
665	sigset_t s;
666	int ret;
667	mm_segment_t old_fs = get_fs();
668
669	set_fs(KERNEL_DS);
670	/* The __user pointer cast is valid because of the set_fs() */
671	ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
672	set_fs(old_fs);
673	if (!ret) {
674		if (put_sigset_t(set, &s))
675			return -EFAULT;
676	}
677	return ret;
678}
679
680
681int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
682{
683	int err;
684
685	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
686		return -EFAULT;
687
688	/* If you change siginfo_t structure, please be sure
689	 * this code is fixed accordingly.
690	 * It should never copy any pad contained in the structure
691	 * to avoid security leaks, but must copy the generic
692	 * 3 ints plus the relevant union member.
693	 * This routine must convert siginfo from 64bit to 32bit as well
694	 * at the same time.
695	 */
696	err = __put_user(s->si_signo, &d->si_signo);
697	err |= __put_user(s->si_errno, &d->si_errno);
698	err |= __put_user((short)s->si_code, &d->si_code);
699	if (s->si_code < 0)
700		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
701				      SI_PAD_SIZE32);
702	else switch(s->si_code >> 16) {
703	case __SI_CHLD >> 16:
704		err |= __put_user(s->si_pid, &d->si_pid);
705		err |= __put_user(s->si_uid, &d->si_uid);
706		err |= __put_user(s->si_utime, &d->si_utime);
707		err |= __put_user(s->si_stime, &d->si_stime);
708		err |= __put_user(s->si_status, &d->si_status);
709		break;
710	case __SI_FAULT >> 16:
711		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
712				  &d->si_addr);
713		break;
714	case __SI_POLL >> 16:
715		err |= __put_user(s->si_band, &d->si_band);
716		err |= __put_user(s->si_fd, &d->si_fd);
717		break;
718	case __SI_TIMER >> 16:
719		err |= __put_user(s->si_tid, &d->si_tid);
720		err |= __put_user(s->si_overrun, &d->si_overrun);
721		err |= __put_user(s->si_int, &d->si_int);
722		break;
723	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
724	case __SI_MESGQ >> 16:
725		err |= __put_user(s->si_int, &d->si_int);
726		/* fallthrough */
727	case __SI_KILL >> 16:
728	default:
729		err |= __put_user(s->si_pid, &d->si_pid);
730		err |= __put_user(s->si_uid, &d->si_uid);
731		break;
732	}
733	return err;
734}
735
736#define copy_siginfo_to_user	copy_siginfo_to_user32
737
738int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
739{
740	memset(to, 0, sizeof *to);
741
742	if (copy_from_user(to, from, 3*sizeof(int)) ||
743	    copy_from_user(to->_sifields._pad,
744			   from->_sifields._pad, SI_PAD_SIZE32))
745		return -EFAULT;
746
747	return 0;
748}
749
750/*
751 * Note: it is necessary to treat pid and sig as unsigned ints, with the
752 * corresponding cast to a signed int to insure that the proper conversion
753 * (sign extension) between the register representation of a signed int
754 * (msr in 32-bit mode) and the register representation of a signed int
755 * (msr in 64-bit mode) is performed.
756 */
757long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
758{
759	siginfo_t info;
760	int ret;
761	mm_segment_t old_fs = get_fs();
762
763	ret = copy_siginfo_from_user32(&info, uinfo);
764	if (unlikely(ret))
765		return ret;
766
767	set_fs (KERNEL_DS);
768	/* The __user pointer cast is valid becasuse of the set_fs() */
769	ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
770	set_fs (old_fs);
771	return ret;
772}
773/*
774 *  Start Alternate signal stack support
775 *
776 *  System Calls
777 *       sigaltatck               compat_sys_sigaltstack
778 */
779
780int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
781		      int r6, int r7, int r8, struct pt_regs *regs)
782{
783	stack_32_t __user * newstack = compat_ptr(__new);
784	stack_32_t __user * oldstack = compat_ptr(__old);
785	stack_t uss, uoss;
786	int ret;
787	mm_segment_t old_fs;
788	unsigned long sp;
789	compat_uptr_t ss_sp;
790
791	/*
792	 * set sp to the user stack on entry to the system call
793	 * the system call router sets R9 to the saved registers
794	 */
795	sp = regs->gpr[1];
796
797	/* Put new stack info in local 64 bit stack struct */
798	if (newstack) {
799		if (get_user(ss_sp, &newstack->ss_sp) ||
800		    __get_user(uss.ss_flags, &newstack->ss_flags) ||
801		    __get_user(uss.ss_size, &newstack->ss_size))
802			return -EFAULT;
803		uss.ss_sp = compat_ptr(ss_sp);
804	}
805
806	old_fs = get_fs();
807	set_fs(KERNEL_DS);
808	/* The __user pointer casts are valid because of the set_fs() */
809	ret = do_sigaltstack(
810		newstack ? (stack_t __user *) &uss : NULL,
811		oldstack ? (stack_t __user *) &uoss : NULL,
812		sp);
813	set_fs(old_fs);
814	/* Copy the stack information to the user output buffer */
815	if (!ret && oldstack  &&
816		(put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
817		 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
818		 __put_user(uoss.ss_size, &oldstack->ss_size)))
819		return -EFAULT;
820	return ret;
821}
822#endif /* CONFIG_PPC64 */
823
824/*
825 * Set up a signal frame for a "real-time" signal handler
826 * (one which gets siginfo).
827 */
828int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
829		siginfo_t *info, sigset_t *oldset,
830		struct pt_regs *regs)
831{
832	struct rt_sigframe __user *rt_sf;
833	struct mcontext __user *frame;
834	void __user *addr;
835	unsigned long newsp = 0;
836
837	/* Set up Signal Frame */
838	/* Put a Real Time Context onto stack */
839	rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
840	addr = rt_sf;
841	if (unlikely(rt_sf == NULL))
842		goto badframe;
843
844	/* Put the siginfo & fill in most of the ucontext */
845	if (copy_siginfo_to_user(&rt_sf->info, info)
846	    || __put_user(0, &rt_sf->uc.uc_flags)
847	    || __put_user(0, &rt_sf->uc.uc_link)
848	    || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
849	    || __put_user(sas_ss_flags(regs->gpr[1]),
850			  &rt_sf->uc.uc_stack.ss_flags)
851	    || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
852	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
853		    &rt_sf->uc.uc_regs)
854	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
855		goto badframe;
856
857	/* Save user registers on the stack */
858	frame = &rt_sf->uc.uc_mcontext;
859	addr = frame;
860	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
861		if (save_user_regs(regs, frame, 0, 1))
862			goto badframe;
863		regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
864	} else {
865		if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1))
866			goto badframe;
867		regs->link = (unsigned long) frame->tramp;
868	}
869
870	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
871
872	/* create a stack frame for the caller of the handler */
873	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
874	addr = (void __user *)regs->gpr[1];
875	if (put_user(regs->gpr[1], (u32 __user *)newsp))
876		goto badframe;
877
878	/* Fill registers for signal handler */
879	regs->gpr[1] = newsp;
880	regs->gpr[3] = sig;
881	regs->gpr[4] = (unsigned long) &rt_sf->info;
882	regs->gpr[5] = (unsigned long) &rt_sf->uc;
883	regs->gpr[6] = (unsigned long) rt_sf;
884	regs->nip = (unsigned long) ka->sa.sa_handler;
885	/* enter the signal handler in big-endian mode */
886	regs->msr &= ~MSR_LE;
887	regs->trap = 0;
888	return 1;
889
890badframe:
891#ifdef DEBUG_SIG
892	printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
893	       regs, frame, newsp);
894#endif
895	if (show_unhandled_signals && printk_ratelimit())
896		printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: "
897			"%p nip %08lx lr %08lx\n",
898			current->comm, current->pid,
899			addr, regs->nip, regs->link);
900
901	force_sigsegv(sig, current);
902	return 0;
903}
904
905static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
906{
907	sigset_t set;
908	struct mcontext __user *mcp;
909
910	if (get_sigset_t(&set, &ucp->uc_sigmask))
911		return -EFAULT;
912#ifdef CONFIG_PPC64
913	{
914		u32 cmcp;
915
916		if (__get_user(cmcp, &ucp->uc_regs))
917			return -EFAULT;
918		mcp = (struct mcontext __user *)(u64)cmcp;
919		/* no need to check access_ok(mcp), since mcp < 4GB */
920	}
921#else
922	if (__get_user(mcp, &ucp->uc_regs))
923		return -EFAULT;
924	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
925		return -EFAULT;
926#endif
927	restore_sigmask(&set);
928	if (restore_user_regs(regs, mcp, sig))
929		return -EFAULT;
930
931	return 0;
932}
933
934long sys_swapcontext(struct ucontext __user *old_ctx,
935		     struct ucontext __user *new_ctx,
936		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
937{
938	unsigned char tmp;
939	int ctx_has_vsx_region = 0;
940
941#ifdef CONFIG_PPC64
942	unsigned long new_msr = 0;
943
944	if (new_ctx) {
945		struct mcontext __user *mcp;
946		u32 cmcp;
947
948		/*
949		 * Get pointer to the real mcontext.  No need for
950		 * access_ok since we are dealing with compat
951		 * pointers.
952		 */
953		if (__get_user(cmcp, &new_ctx->uc_regs))
954			return -EFAULT;
955		mcp = (struct mcontext __user *)(u64)cmcp;
956		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
957			return -EFAULT;
958	}
959	/*
960	 * Check that the context is not smaller than the original
961	 * size (with VMX but without VSX)
962	 */
963	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
964		return -EINVAL;
965	/*
966	 * If the new context state sets the MSR VSX bits but
967	 * it doesn't provide VSX state.
968	 */
969	if ((ctx_size < sizeof(struct ucontext)) &&
970	    (new_msr & MSR_VSX))
971		return -EINVAL;
972	/* Does the context have enough room to store VSX data? */
973	if (ctx_size >= sizeof(struct ucontext))
974		ctx_has_vsx_region = 1;
975#else
976	/* Context size is for future use. Right now, we only make sure
977	 * we are passed something we understand
978	 */
979	if (ctx_size < sizeof(struct ucontext))
980		return -EINVAL;
981#endif
982	if (old_ctx != NULL) {
983		struct mcontext __user *mctx;
984
985		/*
986		 * old_ctx might not be 16-byte aligned, in which
987		 * case old_ctx->uc_mcontext won't be either.
988		 * Because we have the old_ctx->uc_pad2 field
989		 * before old_ctx->uc_mcontext, we need to round down
990		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
991		 */
992		mctx = (struct mcontext __user *)
993			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
994		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
995		    || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
996		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
997		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
998			return -EFAULT;
999	}
1000	if (new_ctx == NULL)
1001		return 0;
1002	if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1003	    || __get_user(tmp, (u8 __user *) new_ctx)
1004	    || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1005		return -EFAULT;
1006
1007	/*
1008	 * If we get a fault copying the context into the kernel's
1009	 * image of the user's registers, we can't just return -EFAULT
1010	 * because the user's registers will be corrupted.  For instance
1011	 * the NIP value may have been updated but not some of the
1012	 * other registers.  Given that we have done the access_ok
1013	 * and successfully read the first and last bytes of the region
1014	 * above, this should only happen in an out-of-memory situation
1015	 * or if another thread unmaps the region containing the context.
1016	 * We kill the task with a SIGSEGV in this situation.
1017	 */
1018	if (do_setcontext(new_ctx, regs, 0))
1019		do_exit(SIGSEGV);
1020
1021	set_thread_flag(TIF_RESTOREALL);
1022	return 0;
1023}
1024
1025long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1026		     struct pt_regs *regs)
1027{
1028	struct rt_sigframe __user *rt_sf;
1029
1030	/* Always make any pending restarted system calls return -EINTR */
1031	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1032
1033	rt_sf = (struct rt_sigframe __user *)
1034		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1035	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1036		goto bad;
1037	if (do_setcontext(&rt_sf->uc, regs, 1))
1038		goto bad;
1039
1040	/*
1041	 * It's not clear whether or why it is desirable to save the
1042	 * sigaltstack setting on signal delivery and restore it on
1043	 * signal return.  But other architectures do this and we have
1044	 * always done it up until now so it is probably better not to
1045	 * change it.  -- paulus
1046	 */
1047#ifdef CONFIG_PPC64
1048	/*
1049	 * We use the compat_sys_ version that does the 32/64 bits conversion
1050	 * and takes userland pointer directly. What about error checking ?
1051	 * nobody does any...
1052	 */
1053	compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
1054#else
1055	do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
1056#endif
1057	set_thread_flag(TIF_RESTOREALL);
1058	return 0;
1059
1060 bad:
1061	if (show_unhandled_signals && printk_ratelimit())
1062		printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: "
1063			"%p nip %08lx lr %08lx\n",
1064			current->comm, current->pid,
1065			rt_sf, regs->nip, regs->link);
1066
1067	force_sig(SIGSEGV, current);
1068	return 0;
1069}
1070
1071#ifdef CONFIG_PPC32
1072int sys_debug_setcontext(struct ucontext __user *ctx,
1073			 int ndbg, struct sig_dbg_op __user *dbg,
1074			 int r6, int r7, int r8,
1075			 struct pt_regs *regs)
1076{
1077	struct sig_dbg_op op;
1078	int i;
1079	unsigned char tmp;
1080	unsigned long new_msr = regs->msr;
1081#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1082	unsigned long new_dbcr0 = current->thread.dbcr0;
1083#endif
1084
1085	for (i=0; i<ndbg; i++) {
1086		if (copy_from_user(&op, dbg + i, sizeof(op)))
1087			return -EFAULT;
1088		switch (op.dbg_type) {
1089		case SIG_DBG_SINGLE_STEPPING:
1090#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1091			if (op.dbg_value) {
1092				new_msr |= MSR_DE;
1093				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1094			} else {
1095				new_msr &= ~MSR_DE;
1096				new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
1097			}
1098#else
1099			if (op.dbg_value)
1100				new_msr |= MSR_SE;
1101			else
1102				new_msr &= ~MSR_SE;
1103#endif
1104			break;
1105		case SIG_DBG_BRANCH_TRACING:
1106#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1107			return -EINVAL;
1108#else
1109			if (op.dbg_value)
1110				new_msr |= MSR_BE;
1111			else
1112				new_msr &= ~MSR_BE;
1113#endif
1114			break;
1115
1116		default:
1117			return -EINVAL;
1118		}
1119	}
1120
1121	/* We wait until here to actually install the values in the
1122	   registers so if we fail in the above loop, it will not
1123	   affect the contents of these registers.  After this point,
1124	   failure is a problem, anyway, and it's very unlikely unless
1125	   the user is really doing something wrong. */
1126	regs->msr = new_msr;
1127#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1128	current->thread.dbcr0 = new_dbcr0;
1129#endif
1130
1131	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1132	    || __get_user(tmp, (u8 __user *) ctx)
1133	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1134		return -EFAULT;
1135
1136	/*
1137	 * If we get a fault copying the context into the kernel's
1138	 * image of the user's registers, we can't just return -EFAULT
1139	 * because the user's registers will be corrupted.  For instance
1140	 * the NIP value may have been updated but not some of the
1141	 * other registers.  Given that we have done the access_ok
1142	 * and successfully read the first and last bytes of the region
1143	 * above, this should only happen in an out-of-memory situation
1144	 * or if another thread unmaps the region containing the context.
1145	 * We kill the task with a SIGSEGV in this situation.
1146	 */
1147	if (do_setcontext(ctx, regs, 1)) {
1148		if (show_unhandled_signals && printk_ratelimit())
1149			printk(KERN_INFO "%s[%d]: bad frame in "
1150				"sys_debug_setcontext: %p nip %08lx "
1151				"lr %08lx\n",
1152				current->comm, current->pid,
1153				ctx, regs->nip, regs->link);
1154
1155		force_sig(SIGSEGV, current);
1156		goto out;
1157	}
1158
1159	/*
1160	 * It's not clear whether or why it is desirable to save the
1161	 * sigaltstack setting on signal delivery and restore it on
1162	 * signal return.  But other architectures do this and we have
1163	 * always done it up until now so it is probably better not to
1164	 * change it.  -- paulus
1165	 */
1166	do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1167
1168	set_thread_flag(TIF_RESTOREALL);
1169 out:
1170	return 0;
1171}
1172#endif
1173
1174/*
1175 * OK, we're invoking a handler
1176 */
1177int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1178		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1179{
1180	struct sigcontext __user *sc;
1181	struct sigframe __user *frame;
1182	unsigned long newsp = 0;
1183
1184	/* Set up Signal Frame */
1185	frame = get_sigframe(ka, regs, sizeof(*frame));
1186	if (unlikely(frame == NULL))
1187		goto badframe;
1188	sc = (struct sigcontext __user *) &frame->sctx;
1189
1190#if _NSIG != 64
1191#error "Please adjust handle_signal()"
1192#endif
1193	if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1194	    || __put_user(oldset->sig[0], &sc->oldmask)
1195#ifdef CONFIG_PPC64
1196	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1197#else
1198	    || __put_user(oldset->sig[1], &sc->_unused[3])
1199#endif
1200	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1201	    || __put_user(sig, &sc->signal))
1202		goto badframe;
1203
1204	if (vdso32_sigtramp && current->mm->context.vdso_base) {
1205		if (save_user_regs(regs, &frame->mctx, 0, 1))
1206			goto badframe;
1207		regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1208	} else {
1209		if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1))
1210			goto badframe;
1211		regs->link = (unsigned long) frame->mctx.tramp;
1212	}
1213
1214	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
1215
1216	/* create a stack frame for the caller of the handler */
1217	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1218	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1219		goto badframe;
1220
1221	regs->gpr[1] = newsp;
1222	regs->gpr[3] = sig;
1223	regs->gpr[4] = (unsigned long) sc;
1224	regs->nip = (unsigned long) ka->sa.sa_handler;
1225	/* enter the signal handler in big-endian mode */
1226	regs->msr &= ~MSR_LE;
1227	regs->trap = 0;
1228
1229	return 1;
1230
1231badframe:
1232#ifdef DEBUG_SIG
1233	printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1234	       regs, frame, newsp);
1235#endif
1236	if (show_unhandled_signals && printk_ratelimit())
1237		printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: "
1238			"%p nip %08lx lr %08lx\n",
1239			current->comm, current->pid,
1240			frame, regs->nip, regs->link);
1241
1242	force_sigsegv(sig, current);
1243	return 0;
1244}
1245
1246/*
1247 * Do a signal return; undo the signal stack.
1248 */
1249long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1250		       struct pt_regs *regs)
1251{
1252	struct sigcontext __user *sc;
1253	struct sigcontext sigctx;
1254	struct mcontext __user *sr;
1255	void __user *addr;
1256	sigset_t set;
1257
1258	/* Always make any pending restarted system calls return -EINTR */
1259	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1260
1261	sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1262	addr = sc;
1263	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1264		goto badframe;
1265
1266#ifdef CONFIG_PPC64
1267	/*
1268	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1269	 * unused part of the signal stackframe
1270	 */
1271	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1272#else
1273	set.sig[0] = sigctx.oldmask;
1274	set.sig[1] = sigctx._unused[3];
1275#endif
1276	restore_sigmask(&set);
1277
1278	sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1279	addr = sr;
1280	if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1281	    || restore_user_regs(regs, sr, 1))
1282		goto badframe;
1283
1284	set_thread_flag(TIF_RESTOREALL);
1285	return 0;
1286
1287badframe:
1288	if (show_unhandled_signals && printk_ratelimit())
1289		printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: "
1290			"%p nip %08lx lr %08lx\n",
1291			current->comm, current->pid,
1292			addr, regs->nip, regs->link);
1293
1294	force_sig(SIGSEGV, current);
1295	return 0;
1296}
1297