signal_32.c revision 6a274c08f2f4dfac7167bbd849621f3a2b55d424
1/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 *  Derived from "arch/i386/kernel/signal.c"
11 *    Copyright (C) 1991, 1992 Linus Torvalds
12 *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13 *
14 *  This program is free software; you can redistribute it and/or
15 *  modify it under the terms of the GNU General Public License
16 *  as published by the Free Software Foundation; either version
17 *  2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/ptrace.h>
28#ifdef CONFIG_PPC64
29#include <linux/syscalls.h>
30#include <linux/compat.h>
31#else
32#include <linux/wait.h>
33#include <linux/unistd.h>
34#include <linux/stddef.h>
35#include <linux/tty.h>
36#include <linux/binfmts.h>
37#include <linux/freezer.h>
38#endif
39
40#include <asm/uaccess.h>
41#include <asm/cacheflush.h>
42#include <asm/syscalls.h>
43#include <asm/sigcontext.h>
44#include <asm/vdso.h>
45#ifdef CONFIG_PPC64
46#include "ppc32.h"
47#include <asm/unistd.h>
48#else
49#include <asm/ucontext.h>
50#include <asm/pgtable.h>
51#endif
52
53#include "signal.h"
54
55#undef DEBUG_SIG
56
57#ifdef CONFIG_PPC64
58#define sys_sigsuspend	compat_sys_sigsuspend
59#define sys_rt_sigsuspend	compat_sys_rt_sigsuspend
60#define sys_rt_sigreturn	compat_sys_rt_sigreturn
61#define sys_sigaction	compat_sys_sigaction
62#define sys_swapcontext	compat_sys_swapcontext
63#define sys_sigreturn	compat_sys_sigreturn
64
65#define old_sigaction	old_sigaction32
66#define sigcontext	sigcontext32
67#define mcontext	mcontext32
68#define ucontext	ucontext32
69
70/*
71 * Returning 0 means we return to userspace via
72 * ret_from_except and thus restore all user
73 * registers from *regs.  This is what we need
74 * to do when a signal has been delivered.
75 */
76
77#define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
78#undef __SIGNAL_FRAMESIZE
79#define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
80#undef ELF_NVRREG
81#define ELF_NVRREG	ELF_NVRREG32
82
83/*
84 * Functions for flipping sigsets (thanks to brain dead generic
85 * implementation that makes things simple for little endian only)
86 */
87static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
88{
89	compat_sigset_t	cset;
90
91	switch (_NSIG_WORDS) {
92	case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
93		cset.sig[7] = set->sig[3] >> 32;
94	case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
95		cset.sig[5] = set->sig[2] >> 32;
96	case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
97		cset.sig[3] = set->sig[1] >> 32;
98	case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
99		cset.sig[1] = set->sig[0] >> 32;
100	}
101	return copy_to_user(uset, &cset, sizeof(*uset));
102}
103
104static inline int get_sigset_t(sigset_t *set,
105			       const compat_sigset_t __user *uset)
106{
107	compat_sigset_t s32;
108
109	if (copy_from_user(&s32, uset, sizeof(*uset)))
110		return -EFAULT;
111
112	/*
113	 * Swap the 2 words of the 64-bit sigset_t (they are stored
114	 * in the "wrong" endian in 32-bit user storage).
115	 */
116	switch (_NSIG_WORDS) {
117	case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
118	case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
119	case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
120	case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
121	}
122	return 0;
123}
124
125static inline int get_old_sigaction(struct k_sigaction *new_ka,
126		struct old_sigaction __user *act)
127{
128	compat_old_sigset_t mask;
129	compat_uptr_t handler, restorer;
130
131	if (get_user(handler, &act->sa_handler) ||
132	    __get_user(restorer, &act->sa_restorer) ||
133	    __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
134	    __get_user(mask, &act->sa_mask))
135		return -EFAULT;
136	new_ka->sa.sa_handler = compat_ptr(handler);
137	new_ka->sa.sa_restorer = compat_ptr(restorer);
138	siginitset(&new_ka->sa.sa_mask, mask);
139	return 0;
140}
141
142#define to_user_ptr(p)		ptr_to_compat(p)
143#define from_user_ptr(p)	compat_ptr(p)
144
145static inline int save_general_regs(struct pt_regs *regs,
146		struct mcontext __user *frame)
147{
148	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
149	int i;
150
151	WARN_ON(!FULL_REGS(regs));
152
153	for (i = 0; i <= PT_RESULT; i ++) {
154		if (i == 14 && !FULL_REGS(regs))
155			i = 32;
156		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
157			return -EFAULT;
158	}
159	return 0;
160}
161
162static inline int restore_general_regs(struct pt_regs *regs,
163		struct mcontext __user *sr)
164{
165	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
166	int i;
167
168	for (i = 0; i <= PT_RESULT; i++) {
169		if ((i == PT_MSR) || (i == PT_SOFTE))
170			continue;
171		if (__get_user(gregs[i], &sr->mc_gregs[i]))
172			return -EFAULT;
173	}
174	return 0;
175}
176
177#else /* CONFIG_PPC64 */
178
179#define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
180
181static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
182{
183	return copy_to_user(uset, set, sizeof(*uset));
184}
185
186static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
187{
188	return copy_from_user(set, uset, sizeof(*uset));
189}
190
191static inline int get_old_sigaction(struct k_sigaction *new_ka,
192		struct old_sigaction __user *act)
193{
194	old_sigset_t mask;
195
196	if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
197			__get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
198			__get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
199		return -EFAULT;
200	__get_user(new_ka->sa.sa_flags, &act->sa_flags);
201	__get_user(mask, &act->sa_mask);
202	siginitset(&new_ka->sa.sa_mask, mask);
203	return 0;
204}
205
206#define to_user_ptr(p)		((unsigned long)(p))
207#define from_user_ptr(p)	((void __user *)(p))
208
209static inline int save_general_regs(struct pt_regs *regs,
210		struct mcontext __user *frame)
211{
212	WARN_ON(!FULL_REGS(regs));
213	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
214}
215
216static inline int restore_general_regs(struct pt_regs *regs,
217		struct mcontext __user *sr)
218{
219	/* copy up to but not including MSR */
220	if (__copy_from_user(regs, &sr->mc_gregs,
221				PT_MSR * sizeof(elf_greg_t)))
222		return -EFAULT;
223	/* copy from orig_r3 (the word after the MSR) up to the end */
224	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
225				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
226		return -EFAULT;
227	return 0;
228}
229
230#endif /* CONFIG_PPC64 */
231
232/*
233 * Atomically swap in the new signal mask, and wait for a signal.
234 */
235long sys_sigsuspend(old_sigset_t mask)
236{
237	mask &= _BLOCKABLE;
238	spin_lock_irq(&current->sighand->siglock);
239	current->saved_sigmask = current->blocked;
240	siginitset(&current->blocked, mask);
241	recalc_sigpending();
242	spin_unlock_irq(&current->sighand->siglock);
243
244 	current->state = TASK_INTERRUPTIBLE;
245 	schedule();
246	set_restore_sigmask();
247 	return -ERESTARTNOHAND;
248}
249
250long sys_sigaction(int sig, struct old_sigaction __user *act,
251		struct old_sigaction __user *oact)
252{
253	struct k_sigaction new_ka, old_ka;
254	int ret;
255
256#ifdef CONFIG_PPC64
257	if (sig < 0)
258		sig = -sig;
259#endif
260
261	if (act) {
262		if (get_old_sigaction(&new_ka, act))
263			return -EFAULT;
264	}
265
266	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
267	if (!ret && oact) {
268		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
269		    __put_user(to_user_ptr(old_ka.sa.sa_handler),
270			    &oact->sa_handler) ||
271		    __put_user(to_user_ptr(old_ka.sa.sa_restorer),
272			    &oact->sa_restorer) ||
273		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
274		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
275			return -EFAULT;
276	}
277
278	return ret;
279}
280
281/*
282 * When we have signals to deliver, we set up on the
283 * user stack, going down from the original stack pointer:
284 *	an ABI gap of 56 words
285 *	an mcontext struct
286 *	a sigcontext struct
287 *	a gap of __SIGNAL_FRAMESIZE bytes
288 *
289 * Each of these things must be a multiple of 16 bytes in size. The following
290 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
291 *
292 */
293struct sigframe {
294	struct sigcontext sctx;		/* the sigcontext */
295	struct mcontext	mctx;		/* all the register values */
296	/*
297	 * Programs using the rs6000/xcoff abi can save up to 19 gp
298	 * regs and 18 fp regs below sp before decrementing it.
299	 */
300	int			abigap[56];
301};
302
303/* We use the mc_pad field for the signal return trampoline. */
304#define tramp	mc_pad
305
306/*
307 *  When we have rt signals to deliver, we set up on the
308 *  user stack, going down from the original stack pointer:
309 *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
310 *	a gap of __SIGNAL_FRAMESIZE+16 bytes
311 *  (the +16 is to get the siginfo and ucontext in the same
312 *  positions as in older kernels).
313 *
314 *  Each of these things must be a multiple of 16 bytes in size.
315 *
316 */
317struct rt_sigframe {
318#ifdef CONFIG_PPC64
319	compat_siginfo_t info;
320#else
321	struct siginfo info;
322#endif
323	struct ucontext	uc;
324	/*
325	 * Programs using the rs6000/xcoff abi can save up to 19 gp
326	 * regs and 18 fp regs below sp before decrementing it.
327	 */
328	int			abigap[56];
329};
330
331#ifdef CONFIG_VSX
332unsigned long copy_fpr_to_user(void __user *to,
333			       struct task_struct *task)
334{
335	double buf[ELF_NFPREG];
336	int i;
337
338	/* save FPR copy to local buffer then write to the thread_struct */
339	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
340		buf[i] = task->thread.TS_FPR(i);
341	memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
342	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
343}
344
345unsigned long copy_fpr_from_user(struct task_struct *task,
346				 void __user *from)
347{
348	double buf[ELF_NFPREG];
349	int i;
350
351	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
352		return 1;
353	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
354		task->thread.TS_FPR(i) = buf[i];
355	memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
356
357	return 0;
358}
359
360unsigned long copy_vsx_to_user(void __user *to,
361			       struct task_struct *task)
362{
363	double buf[ELF_NVSRHALFREG];
364	int i;
365
366	/* save FPR copy to local buffer then write to the thread_struct */
367	for (i = 0; i < ELF_NVSRHALFREG; i++)
368		buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
369	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
370}
371
372unsigned long copy_vsx_from_user(struct task_struct *task,
373				 void __user *from)
374{
375	double buf[ELF_NVSRHALFREG];
376	int i;
377
378	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
379		return 1;
380	for (i = 0; i < ELF_NVSRHALFREG ; i++)
381		task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
382	return 0;
383}
384#else
385inline unsigned long copy_fpr_to_user(void __user *to,
386				      struct task_struct *task)
387{
388	return __copy_to_user(to, task->thread.fpr,
389			      ELF_NFPREG * sizeof(double));
390}
391
392inline unsigned long copy_fpr_from_user(struct task_struct *task,
393					void __user *from)
394{
395	return __copy_from_user(task->thread.fpr, from,
396			      ELF_NFPREG * sizeof(double));
397}
398#endif
399
400/*
401 * Save the current user registers on the user stack.
402 * We only save the altivec/spe registers if the process has used
403 * altivec/spe instructions at some point.
404 */
405static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
406		int sigret)
407{
408	unsigned long msr = regs->msr;
409
410	/* Make sure floating point registers are stored in regs */
411	flush_fp_to_thread(current);
412
413	/* save general registers */
414	if (save_general_regs(regs, frame))
415		return 1;
416
417#ifdef CONFIG_ALTIVEC
418	/* save altivec registers */
419	if (current->thread.used_vr) {
420		flush_altivec_to_thread(current);
421		if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
422				   ELF_NVRREG * sizeof(vector128)))
423			return 1;
424		/* set MSR_VEC in the saved MSR value to indicate that
425		   frame->mc_vregs contains valid data */
426		msr |= MSR_VEC;
427	}
428	/* else assert((regs->msr & MSR_VEC) == 0) */
429
430	/* We always copy to/from vrsave, it's 0 if we don't have or don't
431	 * use altivec. Since VSCR only contains 32 bits saved in the least
432	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
433	 * most significant bits of that same vector. --BenH
434	 */
435	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
436		return 1;
437#endif /* CONFIG_ALTIVEC */
438	if (copy_fpr_to_user(&frame->mc_fregs, current))
439		return 1;
440#ifdef CONFIG_VSX
441	/*
442	 * Copy VSR 0-31 upper half from thread_struct to local
443	 * buffer, then write that to userspace.  Also set MSR_VSX in
444	 * the saved MSR value to indicate that frame->mc_vregs
445	 * contains valid data
446	 */
447	if (current->thread.used_vsr) {
448		flush_vsx_to_thread(current);
449		if (copy_vsx_to_user(&frame->mc_vsregs, current))
450			return 1;
451		msr |= MSR_VSX;
452	}
453#endif /* CONFIG_VSX */
454#ifdef CONFIG_SPE
455	/* save spe registers */
456	if (current->thread.used_spe) {
457		flush_spe_to_thread(current);
458		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
459				   ELF_NEVRREG * sizeof(u32)))
460			return 1;
461		/* set MSR_SPE in the saved MSR value to indicate that
462		   frame->mc_vregs contains valid data */
463		msr |= MSR_SPE;
464	}
465	/* else assert((regs->msr & MSR_SPE) == 0) */
466
467	/* We always copy to/from spefscr */
468	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
469		return 1;
470#endif /* CONFIG_SPE */
471
472	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
473		return 1;
474	if (sigret) {
475		/* Set up the sigreturn trampoline: li r0,sigret; sc */
476		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
477		    || __put_user(0x44000002UL, &frame->tramp[1]))
478			return 1;
479		flush_icache_range((unsigned long) &frame->tramp[0],
480				   (unsigned long) &frame->tramp[2]);
481	}
482
483	return 0;
484}
485
486/*
487 * Restore the current user register values from the user stack,
488 * (except for MSR).
489 */
490static long restore_user_regs(struct pt_regs *regs,
491			      struct mcontext __user *sr, int sig)
492{
493	long err;
494	unsigned int save_r2 = 0;
495	unsigned long msr;
496#ifdef CONFIG_VSX
497	int i;
498#endif
499
500	/*
501	 * restore general registers but not including MSR or SOFTE. Also
502	 * take care of keeping r2 (TLS) intact if not a signal
503	 */
504	if (!sig)
505		save_r2 = (unsigned int)regs->gpr[2];
506	err = restore_general_regs(regs, sr);
507	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
508	if (!sig)
509		regs->gpr[2] = (unsigned long) save_r2;
510	if (err)
511		return 1;
512
513	/* if doing signal return, restore the previous little-endian mode */
514	if (sig)
515		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
516
517	/*
518	 * Do this before updating the thread state in
519	 * current->thread.fpr/vr/evr.  That way, if we get preempted
520	 * and another task grabs the FPU/Altivec/SPE, it won't be
521	 * tempted to save the current CPU state into the thread_struct
522	 * and corrupt what we are writing there.
523	 */
524	discard_lazy_cpu_state();
525
526#ifdef CONFIG_ALTIVEC
527	/*
528	 * Force the process to reload the altivec registers from
529	 * current->thread when it next does altivec instructions
530	 */
531	regs->msr &= ~MSR_VEC;
532	if (msr & MSR_VEC) {
533		/* restore altivec registers from the stack */
534		if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
535				     sizeof(sr->mc_vregs)))
536			return 1;
537	} else if (current->thread.used_vr)
538		memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
539
540	/* Always get VRSAVE back */
541	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
542		return 1;
543#endif /* CONFIG_ALTIVEC */
544	if (copy_fpr_from_user(current, &sr->mc_fregs))
545		return 1;
546
547#ifdef CONFIG_VSX
548	/*
549	 * Force the process to reload the VSX registers from
550	 * current->thread when it next does VSX instruction.
551	 */
552	regs->msr &= ~MSR_VSX;
553	if (msr & MSR_VSX) {
554		/*
555		 * Restore altivec registers from the stack to a local
556		 * buffer, then write this out to the thread_struct
557		 */
558		if (copy_vsx_from_user(current, &sr->mc_vsregs))
559			return 1;
560	} else if (current->thread.used_vsr)
561		for (i = 0; i < 32 ; i++)
562			current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
563#endif /* CONFIG_VSX */
564	/*
565	 * force the process to reload the FP registers from
566	 * current->thread when it next does FP instructions
567	 */
568	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
569
570#ifdef CONFIG_SPE
571	/* force the process to reload the spe registers from
572	   current->thread when it next does spe instructions */
573	regs->msr &= ~MSR_SPE;
574	if (msr & MSR_SPE) {
575		/* restore spe registers from the stack */
576		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
577				     ELF_NEVRREG * sizeof(u32)))
578			return 1;
579	} else if (current->thread.used_spe)
580		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
581
582	/* Always get SPEFSCR back */
583	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
584		return 1;
585#endif /* CONFIG_SPE */
586
587	return 0;
588}
589
590#ifdef CONFIG_PPC64
591long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
592		struct sigaction32 __user *oact, size_t sigsetsize)
593{
594	struct k_sigaction new_ka, old_ka;
595	int ret;
596
597	/* XXX: Don't preclude handling different sized sigset_t's.  */
598	if (sigsetsize != sizeof(compat_sigset_t))
599		return -EINVAL;
600
601	if (act) {
602		compat_uptr_t handler;
603
604		ret = get_user(handler, &act->sa_handler);
605		new_ka.sa.sa_handler = compat_ptr(handler);
606		ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
607		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
608		if (ret)
609			return -EFAULT;
610	}
611
612	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
613	if (!ret && oact) {
614		ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
615		ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
616		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
617	}
618	return ret;
619}
620
621/*
622 * Note: it is necessary to treat how as an unsigned int, with the
623 * corresponding cast to a signed int to insure that the proper
624 * conversion (sign extension) between the register representation
625 * of a signed int (msr in 32-bit mode) and the register representation
626 * of a signed int (msr in 64-bit mode) is performed.
627 */
628long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
629		compat_sigset_t __user *oset, size_t sigsetsize)
630{
631	sigset_t s;
632	sigset_t __user *up;
633	int ret;
634	mm_segment_t old_fs = get_fs();
635
636	if (set) {
637		if (get_sigset_t(&s, set))
638			return -EFAULT;
639	}
640
641	set_fs(KERNEL_DS);
642	/* This is valid because of the set_fs() */
643	up = (sigset_t __user *) &s;
644	ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
645				 sigsetsize);
646	set_fs(old_fs);
647	if (ret)
648		return ret;
649	if (oset) {
650		if (put_sigset_t(oset, &s))
651			return -EFAULT;
652	}
653	return 0;
654}
655
656long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
657{
658	sigset_t s;
659	int ret;
660	mm_segment_t old_fs = get_fs();
661
662	set_fs(KERNEL_DS);
663	/* The __user pointer cast is valid because of the set_fs() */
664	ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
665	set_fs(old_fs);
666	if (!ret) {
667		if (put_sigset_t(set, &s))
668			return -EFAULT;
669	}
670	return ret;
671}
672
673
674int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
675{
676	int err;
677
678	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
679		return -EFAULT;
680
681	/* If you change siginfo_t structure, please be sure
682	 * this code is fixed accordingly.
683	 * It should never copy any pad contained in the structure
684	 * to avoid security leaks, but must copy the generic
685	 * 3 ints plus the relevant union member.
686	 * This routine must convert siginfo from 64bit to 32bit as well
687	 * at the same time.
688	 */
689	err = __put_user(s->si_signo, &d->si_signo);
690	err |= __put_user(s->si_errno, &d->si_errno);
691	err |= __put_user((short)s->si_code, &d->si_code);
692	if (s->si_code < 0)
693		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
694				      SI_PAD_SIZE32);
695	else switch(s->si_code >> 16) {
696	case __SI_CHLD >> 16:
697		err |= __put_user(s->si_pid, &d->si_pid);
698		err |= __put_user(s->si_uid, &d->si_uid);
699		err |= __put_user(s->si_utime, &d->si_utime);
700		err |= __put_user(s->si_stime, &d->si_stime);
701		err |= __put_user(s->si_status, &d->si_status);
702		break;
703	case __SI_FAULT >> 16:
704		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
705				  &d->si_addr);
706		break;
707	case __SI_POLL >> 16:
708		err |= __put_user(s->si_band, &d->si_band);
709		err |= __put_user(s->si_fd, &d->si_fd);
710		break;
711	case __SI_TIMER >> 16:
712		err |= __put_user(s->si_tid, &d->si_tid);
713		err |= __put_user(s->si_overrun, &d->si_overrun);
714		err |= __put_user(s->si_int, &d->si_int);
715		break;
716	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
717	case __SI_MESGQ >> 16:
718		err |= __put_user(s->si_int, &d->si_int);
719		/* fallthrough */
720	case __SI_KILL >> 16:
721	default:
722		err |= __put_user(s->si_pid, &d->si_pid);
723		err |= __put_user(s->si_uid, &d->si_uid);
724		break;
725	}
726	return err;
727}
728
729#define copy_siginfo_to_user	copy_siginfo_to_user32
730
731int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
732{
733	memset(to, 0, sizeof *to);
734
735	if (copy_from_user(to, from, 3*sizeof(int)) ||
736	    copy_from_user(to->_sifields._pad,
737			   from->_sifields._pad, SI_PAD_SIZE32))
738		return -EFAULT;
739
740	return 0;
741}
742
743/*
744 * Note: it is necessary to treat pid and sig as unsigned ints, with the
745 * corresponding cast to a signed int to insure that the proper conversion
746 * (sign extension) between the register representation of a signed int
747 * (msr in 32-bit mode) and the register representation of a signed int
748 * (msr in 64-bit mode) is performed.
749 */
750long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
751{
752	siginfo_t info;
753	int ret;
754	mm_segment_t old_fs = get_fs();
755
756	ret = copy_siginfo_from_user32(&info, uinfo);
757	if (unlikely(ret))
758		return ret;
759
760	set_fs (KERNEL_DS);
761	/* The __user pointer cast is valid becasuse of the set_fs() */
762	ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
763	set_fs (old_fs);
764	return ret;
765}
766/*
767 *  Start Alternate signal stack support
768 *
769 *  System Calls
770 *       sigaltatck               compat_sys_sigaltstack
771 */
772
773int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
774		      int r6, int r7, int r8, struct pt_regs *regs)
775{
776	stack_32_t __user * newstack = compat_ptr(__new);
777	stack_32_t __user * oldstack = compat_ptr(__old);
778	stack_t uss, uoss;
779	int ret;
780	mm_segment_t old_fs;
781	unsigned long sp;
782	compat_uptr_t ss_sp;
783
784	/*
785	 * set sp to the user stack on entry to the system call
786	 * the system call router sets R9 to the saved registers
787	 */
788	sp = regs->gpr[1];
789
790	/* Put new stack info in local 64 bit stack struct */
791	if (newstack) {
792		if (get_user(ss_sp, &newstack->ss_sp) ||
793		    __get_user(uss.ss_flags, &newstack->ss_flags) ||
794		    __get_user(uss.ss_size, &newstack->ss_size))
795			return -EFAULT;
796		uss.ss_sp = compat_ptr(ss_sp);
797	}
798
799	old_fs = get_fs();
800	set_fs(KERNEL_DS);
801	/* The __user pointer casts are valid because of the set_fs() */
802	ret = do_sigaltstack(
803		newstack ? (stack_t __user *) &uss : NULL,
804		oldstack ? (stack_t __user *) &uoss : NULL,
805		sp);
806	set_fs(old_fs);
807	/* Copy the stack information to the user output buffer */
808	if (!ret && oldstack  &&
809		(put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
810		 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
811		 __put_user(uoss.ss_size, &oldstack->ss_size)))
812		return -EFAULT;
813	return ret;
814}
815#endif /* CONFIG_PPC64 */
816
817/*
818 * Set up a signal frame for a "real-time" signal handler
819 * (one which gets siginfo).
820 */
821int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
822		siginfo_t *info, sigset_t *oldset,
823		struct pt_regs *regs)
824{
825	struct rt_sigframe __user *rt_sf;
826	struct mcontext __user *frame;
827	void __user *addr;
828	unsigned long newsp = 0;
829
830	/* Set up Signal Frame */
831	/* Put a Real Time Context onto stack */
832	rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
833	addr = rt_sf;
834	if (unlikely(rt_sf == NULL))
835		goto badframe;
836
837	/* Put the siginfo & fill in most of the ucontext */
838	if (copy_siginfo_to_user(&rt_sf->info, info)
839	    || __put_user(0, &rt_sf->uc.uc_flags)
840	    || __put_user(0, &rt_sf->uc.uc_link)
841	    || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
842	    || __put_user(sas_ss_flags(regs->gpr[1]),
843			  &rt_sf->uc.uc_stack.ss_flags)
844	    || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
845	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
846		    &rt_sf->uc.uc_regs)
847	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
848		goto badframe;
849
850	/* Save user registers on the stack */
851	frame = &rt_sf->uc.uc_mcontext;
852	addr = frame;
853	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
854		if (save_user_regs(regs, frame, 0))
855			goto badframe;
856		regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
857	} else {
858		if (save_user_regs(regs, frame, __NR_rt_sigreturn))
859			goto badframe;
860		regs->link = (unsigned long) frame->tramp;
861	}
862
863	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
864
865	/* create a stack frame for the caller of the handler */
866	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
867	addr = (void __user *)regs->gpr[1];
868	if (put_user(regs->gpr[1], (u32 __user *)newsp))
869		goto badframe;
870
871	/* Fill registers for signal handler */
872	regs->gpr[1] = newsp;
873	regs->gpr[3] = sig;
874	regs->gpr[4] = (unsigned long) &rt_sf->info;
875	regs->gpr[5] = (unsigned long) &rt_sf->uc;
876	regs->gpr[6] = (unsigned long) rt_sf;
877	regs->nip = (unsigned long) ka->sa.sa_handler;
878	/* enter the signal handler in big-endian mode */
879	regs->msr &= ~MSR_LE;
880	regs->trap = 0;
881	return 1;
882
883badframe:
884#ifdef DEBUG_SIG
885	printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
886	       regs, frame, newsp);
887#endif
888	if (show_unhandled_signals && printk_ratelimit())
889		printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: "
890			"%p nip %08lx lr %08lx\n",
891			current->comm, current->pid,
892			addr, regs->nip, regs->link);
893
894	force_sigsegv(sig, current);
895	return 0;
896}
897
898static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
899{
900	sigset_t set;
901	struct mcontext __user *mcp;
902
903	if (get_sigset_t(&set, &ucp->uc_sigmask))
904		return -EFAULT;
905#ifdef CONFIG_PPC64
906	{
907		u32 cmcp;
908
909		if (__get_user(cmcp, &ucp->uc_regs))
910			return -EFAULT;
911		mcp = (struct mcontext __user *)(u64)cmcp;
912		/* no need to check access_ok(mcp), since mcp < 4GB */
913	}
914#else
915	if (__get_user(mcp, &ucp->uc_regs))
916		return -EFAULT;
917	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
918		return -EFAULT;
919#endif
920	restore_sigmask(&set);
921	if (restore_user_regs(regs, mcp, sig))
922		return -EFAULT;
923
924	return 0;
925}
926
927long sys_swapcontext(struct ucontext __user *old_ctx,
928		     struct ucontext __user *new_ctx,
929		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
930{
931	unsigned char tmp;
932
933	/* Context size is for future use. Right now, we only make sure
934	 * we are passed something we understand
935	 */
936	if (ctx_size < sizeof(struct ucontext))
937		return -EINVAL;
938
939	if (old_ctx != NULL) {
940		struct mcontext __user *mctx;
941
942		/*
943		 * old_ctx might not be 16-byte aligned, in which
944		 * case old_ctx->uc_mcontext won't be either.
945		 * Because we have the old_ctx->uc_pad2 field
946		 * before old_ctx->uc_mcontext, we need to round down
947		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
948		 */
949		mctx = (struct mcontext __user *)
950			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
951		if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
952		    || save_user_regs(regs, mctx, 0)
953		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
954		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
955			return -EFAULT;
956	}
957	if (new_ctx == NULL)
958		return 0;
959	if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
960	    || __get_user(tmp, (u8 __user *) new_ctx)
961	    || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
962		return -EFAULT;
963
964	/*
965	 * If we get a fault copying the context into the kernel's
966	 * image of the user's registers, we can't just return -EFAULT
967	 * because the user's registers will be corrupted.  For instance
968	 * the NIP value may have been updated but not some of the
969	 * other registers.  Given that we have done the access_ok
970	 * and successfully read the first and last bytes of the region
971	 * above, this should only happen in an out-of-memory situation
972	 * or if another thread unmaps the region containing the context.
973	 * We kill the task with a SIGSEGV in this situation.
974	 */
975	if (do_setcontext(new_ctx, regs, 0))
976		do_exit(SIGSEGV);
977
978	set_thread_flag(TIF_RESTOREALL);
979	return 0;
980}
981
982long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
983		     struct pt_regs *regs)
984{
985	struct rt_sigframe __user *rt_sf;
986
987	/* Always make any pending restarted system calls return -EINTR */
988	current_thread_info()->restart_block.fn = do_no_restart_syscall;
989
990	rt_sf = (struct rt_sigframe __user *)
991		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
992	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
993		goto bad;
994	if (do_setcontext(&rt_sf->uc, regs, 1))
995		goto bad;
996
997	/*
998	 * It's not clear whether or why it is desirable to save the
999	 * sigaltstack setting on signal delivery and restore it on
1000	 * signal return.  But other architectures do this and we have
1001	 * always done it up until now so it is probably better not to
1002	 * change it.  -- paulus
1003	 */
1004#ifdef CONFIG_PPC64
1005	/*
1006	 * We use the compat_sys_ version that does the 32/64 bits conversion
1007	 * and takes userland pointer directly. What about error checking ?
1008	 * nobody does any...
1009	 */
1010	compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
1011#else
1012	do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
1013#endif
1014	set_thread_flag(TIF_RESTOREALL);
1015	return 0;
1016
1017 bad:
1018	if (show_unhandled_signals && printk_ratelimit())
1019		printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: "
1020			"%p nip %08lx lr %08lx\n",
1021			current->comm, current->pid,
1022			rt_sf, regs->nip, regs->link);
1023
1024	force_sig(SIGSEGV, current);
1025	return 0;
1026}
1027
1028#ifdef CONFIG_PPC32
1029int sys_debug_setcontext(struct ucontext __user *ctx,
1030			 int ndbg, struct sig_dbg_op __user *dbg,
1031			 int r6, int r7, int r8,
1032			 struct pt_regs *regs)
1033{
1034	struct sig_dbg_op op;
1035	int i;
1036	unsigned char tmp;
1037	unsigned long new_msr = regs->msr;
1038#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1039	unsigned long new_dbcr0 = current->thread.dbcr0;
1040#endif
1041
1042	for (i=0; i<ndbg; i++) {
1043		if (copy_from_user(&op, dbg + i, sizeof(op)))
1044			return -EFAULT;
1045		switch (op.dbg_type) {
1046		case SIG_DBG_SINGLE_STEPPING:
1047#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1048			if (op.dbg_value) {
1049				new_msr |= MSR_DE;
1050				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1051			} else {
1052				new_msr &= ~MSR_DE;
1053				new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
1054			}
1055#else
1056			if (op.dbg_value)
1057				new_msr |= MSR_SE;
1058			else
1059				new_msr &= ~MSR_SE;
1060#endif
1061			break;
1062		case SIG_DBG_BRANCH_TRACING:
1063#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1064			return -EINVAL;
1065#else
1066			if (op.dbg_value)
1067				new_msr |= MSR_BE;
1068			else
1069				new_msr &= ~MSR_BE;
1070#endif
1071			break;
1072
1073		default:
1074			return -EINVAL;
1075		}
1076	}
1077
1078	/* We wait until here to actually install the values in the
1079	   registers so if we fail in the above loop, it will not
1080	   affect the contents of these registers.  After this point,
1081	   failure is a problem, anyway, and it's very unlikely unless
1082	   the user is really doing something wrong. */
1083	regs->msr = new_msr;
1084#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1085	current->thread.dbcr0 = new_dbcr0;
1086#endif
1087
1088	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1089	    || __get_user(tmp, (u8 __user *) ctx)
1090	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1091		return -EFAULT;
1092
1093	/*
1094	 * If we get a fault copying the context into the kernel's
1095	 * image of the user's registers, we can't just return -EFAULT
1096	 * because the user's registers will be corrupted.  For instance
1097	 * the NIP value may have been updated but not some of the
1098	 * other registers.  Given that we have done the access_ok
1099	 * and successfully read the first and last bytes of the region
1100	 * above, this should only happen in an out-of-memory situation
1101	 * or if another thread unmaps the region containing the context.
1102	 * We kill the task with a SIGSEGV in this situation.
1103	 */
1104	if (do_setcontext(ctx, regs, 1)) {
1105		if (show_unhandled_signals && printk_ratelimit())
1106			printk(KERN_INFO "%s[%d]: bad frame in "
1107				"sys_debug_setcontext: %p nip %08lx "
1108				"lr %08lx\n",
1109				current->comm, current->pid,
1110				ctx, regs->nip, regs->link);
1111
1112		force_sig(SIGSEGV, current);
1113		goto out;
1114	}
1115
1116	/*
1117	 * It's not clear whether or why it is desirable to save the
1118	 * sigaltstack setting on signal delivery and restore it on
1119	 * signal return.  But other architectures do this and we have
1120	 * always done it up until now so it is probably better not to
1121	 * change it.  -- paulus
1122	 */
1123	do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1124
1125	set_thread_flag(TIF_RESTOREALL);
1126 out:
1127	return 0;
1128}
1129#endif
1130
1131/*
1132 * OK, we're invoking a handler
1133 */
1134int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1135		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1136{
1137	struct sigcontext __user *sc;
1138	struct sigframe __user *frame;
1139	unsigned long newsp = 0;
1140
1141	/* Set up Signal Frame */
1142	frame = get_sigframe(ka, regs, sizeof(*frame));
1143	if (unlikely(frame == NULL))
1144		goto badframe;
1145	sc = (struct sigcontext __user *) &frame->sctx;
1146
1147#if _NSIG != 64
1148#error "Please adjust handle_signal()"
1149#endif
1150	if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1151	    || __put_user(oldset->sig[0], &sc->oldmask)
1152#ifdef CONFIG_PPC64
1153	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1154#else
1155	    || __put_user(oldset->sig[1], &sc->_unused[3])
1156#endif
1157	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1158	    || __put_user(sig, &sc->signal))
1159		goto badframe;
1160
1161	if (vdso32_sigtramp && current->mm->context.vdso_base) {
1162		if (save_user_regs(regs, &frame->mctx, 0))
1163			goto badframe;
1164		regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1165	} else {
1166		if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
1167			goto badframe;
1168		regs->link = (unsigned long) frame->mctx.tramp;
1169	}
1170
1171	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
1172
1173	/* create a stack frame for the caller of the handler */
1174	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1175	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1176		goto badframe;
1177
1178	regs->gpr[1] = newsp;
1179	regs->gpr[3] = sig;
1180	regs->gpr[4] = (unsigned long) sc;
1181	regs->nip = (unsigned long) ka->sa.sa_handler;
1182	/* enter the signal handler in big-endian mode */
1183	regs->msr &= ~MSR_LE;
1184	regs->trap = 0;
1185
1186	return 1;
1187
1188badframe:
1189#ifdef DEBUG_SIG
1190	printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1191	       regs, frame, newsp);
1192#endif
1193	if (show_unhandled_signals && printk_ratelimit())
1194		printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: "
1195			"%p nip %08lx lr %08lx\n",
1196			current->comm, current->pid,
1197			frame, regs->nip, regs->link);
1198
1199	force_sigsegv(sig, current);
1200	return 0;
1201}
1202
1203/*
1204 * Do a signal return; undo the signal stack.
1205 */
1206long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1207		       struct pt_regs *regs)
1208{
1209	struct sigcontext __user *sc;
1210	struct sigcontext sigctx;
1211	struct mcontext __user *sr;
1212	void __user *addr;
1213	sigset_t set;
1214
1215	/* Always make any pending restarted system calls return -EINTR */
1216	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1217
1218	sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1219	addr = sc;
1220	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1221		goto badframe;
1222
1223#ifdef CONFIG_PPC64
1224	/*
1225	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1226	 * unused part of the signal stackframe
1227	 */
1228	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1229#else
1230	set.sig[0] = sigctx.oldmask;
1231	set.sig[1] = sigctx._unused[3];
1232#endif
1233	restore_sigmask(&set);
1234
1235	sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1236	addr = sr;
1237	if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1238	    || restore_user_regs(regs, sr, 1))
1239		goto badframe;
1240
1241	set_thread_flag(TIF_RESTOREALL);
1242	return 0;
1243
1244badframe:
1245	if (show_unhandled_signals && printk_ratelimit())
1246		printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: "
1247			"%p nip %08lx lr %08lx\n",
1248			current->comm, current->pid,
1249			addr, regs->nip, regs->link);
1250
1251	force_sig(SIGSEGV, current);
1252	return 0;
1253}
1254