signal_32.c revision e63340ae6b6205fef26b40a75673d1c9c0c8bb90
1/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 *  PowerPC version
5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 *  Derived from "arch/i386/kernel/signal.c"
11 *    Copyright (C) 1991, 1992 Linus Torvalds
12 *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13 *
14 *  This program is free software; you can redistribute it and/or
15 *  modify it under the terms of the GNU General Public License
16 *  as published by the Free Software Foundation; either version
17 *  2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#ifdef CONFIG_PPC64
28#include <linux/syscalls.h>
29#include <linux/compat.h>
30#include <linux/ptrace.h>
31#else
32#include <linux/wait.h>
33#include <linux/ptrace.h>
34#include <linux/unistd.h>
35#include <linux/stddef.h>
36#include <linux/tty.h>
37#include <linux/binfmts.h>
38#include <linux/freezer.h>
39#endif
40
41#include <asm/uaccess.h>
42#include <asm/cacheflush.h>
43#include <asm/syscalls.h>
44#include <asm/sigcontext.h>
45#include <asm/vdso.h>
46#ifdef CONFIG_PPC64
47#include "ppc32.h"
48#include <asm/unistd.h>
49#else
50#include <asm/ucontext.h>
51#include <asm/pgtable.h>
52#endif
53
54#undef DEBUG_SIG
55
56#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
57
58#ifdef CONFIG_PPC64
59#define do_signal	do_signal32
60#define sys_sigsuspend	compat_sys_sigsuspend
61#define sys_rt_sigsuspend	compat_sys_rt_sigsuspend
62#define sys_rt_sigreturn	compat_sys_rt_sigreturn
63#define sys_sigaction	compat_sys_sigaction
64#define sys_swapcontext	compat_sys_swapcontext
65#define sys_sigreturn	compat_sys_sigreturn
66
67#define old_sigaction	old_sigaction32
68#define sigcontext	sigcontext32
69#define mcontext	mcontext32
70#define ucontext	ucontext32
71
72/*
73 * Returning 0 means we return to userspace via
74 * ret_from_except and thus restore all user
75 * registers from *regs.  This is what we need
76 * to do when a signal has been delivered.
77 */
78
79#define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
80#undef __SIGNAL_FRAMESIZE
81#define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
82#undef ELF_NVRREG
83#define ELF_NVRREG	ELF_NVRREG32
84
85/*
86 * Functions for flipping sigsets (thanks to brain dead generic
87 * implementation that makes things simple for little endian only)
88 */
89static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
90{
91	compat_sigset_t	cset;
92
93	switch (_NSIG_WORDS) {
94	case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
95		cset.sig[7] = set->sig[3] >> 32;
96	case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
97		cset.sig[5] = set->sig[2] >> 32;
98	case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
99		cset.sig[3] = set->sig[1] >> 32;
100	case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
101		cset.sig[1] = set->sig[0] >> 32;
102	}
103	return copy_to_user(uset, &cset, sizeof(*uset));
104}
105
106static inline int get_sigset_t(sigset_t *set,
107			       const compat_sigset_t __user *uset)
108{
109	compat_sigset_t s32;
110
111	if (copy_from_user(&s32, uset, sizeof(*uset)))
112		return -EFAULT;
113
114	/*
115	 * Swap the 2 words of the 64-bit sigset_t (they are stored
116	 * in the "wrong" endian in 32-bit user storage).
117	 */
118	switch (_NSIG_WORDS) {
119	case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
120	case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
121	case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
122	case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
123	}
124	return 0;
125}
126
127static inline int get_old_sigaction(struct k_sigaction *new_ka,
128		struct old_sigaction __user *act)
129{
130	compat_old_sigset_t mask;
131	compat_uptr_t handler, restorer;
132
133	if (get_user(handler, &act->sa_handler) ||
134	    __get_user(restorer, &act->sa_restorer) ||
135	    __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
136	    __get_user(mask, &act->sa_mask))
137		return -EFAULT;
138	new_ka->sa.sa_handler = compat_ptr(handler);
139	new_ka->sa.sa_restorer = compat_ptr(restorer);
140	siginitset(&new_ka->sa.sa_mask, mask);
141	return 0;
142}
143
144#define to_user_ptr(p)		ptr_to_compat(p)
145#define from_user_ptr(p)	compat_ptr(p)
146
147static inline int save_general_regs(struct pt_regs *regs,
148		struct mcontext __user *frame)
149{
150	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
151	int i;
152
153	WARN_ON(!FULL_REGS(regs));
154
155	for (i = 0; i <= PT_RESULT; i ++) {
156		if (i == 14 && !FULL_REGS(regs))
157			i = 32;
158		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
159			return -EFAULT;
160	}
161	return 0;
162}
163
164static inline int restore_general_regs(struct pt_regs *regs,
165		struct mcontext __user *sr)
166{
167	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
168	int i;
169
170	for (i = 0; i <= PT_RESULT; i++) {
171		if ((i == PT_MSR) || (i == PT_SOFTE))
172			continue;
173		if (__get_user(gregs[i], &sr->mc_gregs[i]))
174			return -EFAULT;
175	}
176	return 0;
177}
178
179#else /* CONFIG_PPC64 */
180
181#define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
182
183static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
184{
185	return copy_to_user(uset, set, sizeof(*uset));
186}
187
188static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
189{
190	return copy_from_user(set, uset, sizeof(*uset));
191}
192
193static inline int get_old_sigaction(struct k_sigaction *new_ka,
194		struct old_sigaction __user *act)
195{
196	old_sigset_t mask;
197
198	if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
199			__get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
200			__get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
201		return -EFAULT;
202	__get_user(new_ka->sa.sa_flags, &act->sa_flags);
203	__get_user(mask, &act->sa_mask);
204	siginitset(&new_ka->sa.sa_mask, mask);
205	return 0;
206}
207
208#define to_user_ptr(p)		((unsigned long)(p))
209#define from_user_ptr(p)	((void __user *)(p))
210
211static inline int save_general_regs(struct pt_regs *regs,
212		struct mcontext __user *frame)
213{
214	WARN_ON(!FULL_REGS(regs));
215	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
216}
217
218static inline int restore_general_regs(struct pt_regs *regs,
219		struct mcontext __user *sr)
220{
221	/* copy up to but not including MSR */
222	if (__copy_from_user(regs, &sr->mc_gregs,
223				PT_MSR * sizeof(elf_greg_t)))
224		return -EFAULT;
225	/* copy from orig_r3 (the word after the MSR) up to the end */
226	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
227				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
228		return -EFAULT;
229	return 0;
230}
231
232#endif /* CONFIG_PPC64 */
233
234int do_signal(sigset_t *oldset, struct pt_regs *regs);
235
236/*
237 * Atomically swap in the new signal mask, and wait for a signal.
238 */
239long sys_sigsuspend(old_sigset_t mask)
240{
241	mask &= _BLOCKABLE;
242	spin_lock_irq(&current->sighand->siglock);
243	current->saved_sigmask = current->blocked;
244	siginitset(&current->blocked, mask);
245	recalc_sigpending();
246	spin_unlock_irq(&current->sighand->siglock);
247
248 	current->state = TASK_INTERRUPTIBLE;
249 	schedule();
250 	set_thread_flag(TIF_RESTORE_SIGMASK);
251 	return -ERESTARTNOHAND;
252}
253
254#ifdef CONFIG_PPC32
255long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
256		int r6, int r7, int r8, struct pt_regs *regs)
257{
258	return do_sigaltstack(uss, uoss, regs->gpr[1]);
259}
260#endif
261
262long sys_sigaction(int sig, struct old_sigaction __user *act,
263		struct old_sigaction __user *oact)
264{
265	struct k_sigaction new_ka, old_ka;
266	int ret;
267
268#ifdef CONFIG_PPC64
269	if (sig < 0)
270		sig = -sig;
271#endif
272
273	if (act) {
274		if (get_old_sigaction(&new_ka, act))
275			return -EFAULT;
276	}
277
278	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
279	if (!ret && oact) {
280		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
281		    __put_user(to_user_ptr(old_ka.sa.sa_handler),
282			    &oact->sa_handler) ||
283		    __put_user(to_user_ptr(old_ka.sa.sa_restorer),
284			    &oact->sa_restorer) ||
285		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
286		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
287			return -EFAULT;
288	}
289
290	return ret;
291}
292
293/*
294 * When we have signals to deliver, we set up on the
295 * user stack, going down from the original stack pointer:
296 *	a sigregs struct
297 *	a sigcontext struct
298 *	a gap of __SIGNAL_FRAMESIZE bytes
299 *
300 * Each of these things must be a multiple of 16 bytes in size.
301 *
302 */
303struct sigregs {
304	struct mcontext	mctx;		/* all the register values */
305	/*
306	 * Programs using the rs6000/xcoff abi can save up to 19 gp
307	 * regs and 18 fp regs below sp before decrementing it.
308	 */
309	int			abigap[56];
310};
311
312/* We use the mc_pad field for the signal return trampoline. */
313#define tramp	mc_pad
314
315/*
316 *  When we have rt signals to deliver, we set up on the
317 *  user stack, going down from the original stack pointer:
318 *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
319 *	a gap of __SIGNAL_FRAMESIZE+16 bytes
320 *  (the +16 is to get the siginfo and ucontext in the same
321 *  positions as in older kernels).
322 *
323 *  Each of these things must be a multiple of 16 bytes in size.
324 *
325 */
326struct rt_sigframe {
327#ifdef CONFIG_PPC64
328	compat_siginfo_t info;
329#else
330	struct siginfo info;
331#endif
332	struct ucontext	uc;
333	/*
334	 * Programs using the rs6000/xcoff abi can save up to 19 gp
335	 * regs and 18 fp regs below sp before decrementing it.
336	 */
337	int			abigap[56];
338};
339
340/*
341 * Save the current user registers on the user stack.
342 * We only save the altivec/spe registers if the process has used
343 * altivec/spe instructions at some point.
344 */
345static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
346		int sigret)
347{
348	/* Make sure floating point registers are stored in regs */
349	flush_fp_to_thread(current);
350
351	/* save general and floating-point registers */
352	if (save_general_regs(regs, frame) ||
353	    __copy_to_user(&frame->mc_fregs, current->thread.fpr,
354		    ELF_NFPREG * sizeof(double)))
355		return 1;
356
357#ifdef CONFIG_ALTIVEC
358	/* save altivec registers */
359	if (current->thread.used_vr) {
360		flush_altivec_to_thread(current);
361		if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
362				   ELF_NVRREG * sizeof(vector128)))
363			return 1;
364		/* set MSR_VEC in the saved MSR value to indicate that
365		   frame->mc_vregs contains valid data */
366		if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
367			return 1;
368	}
369	/* else assert((regs->msr & MSR_VEC) == 0) */
370
371	/* We always copy to/from vrsave, it's 0 if we don't have or don't
372	 * use altivec. Since VSCR only contains 32 bits saved in the least
373	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
374	 * most significant bits of that same vector. --BenH
375	 */
376	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
377		return 1;
378#endif /* CONFIG_ALTIVEC */
379
380#ifdef CONFIG_SPE
381	/* save spe registers */
382	if (current->thread.used_spe) {
383		flush_spe_to_thread(current);
384		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
385				   ELF_NEVRREG * sizeof(u32)))
386			return 1;
387		/* set MSR_SPE in the saved MSR value to indicate that
388		   frame->mc_vregs contains valid data */
389		if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
390			return 1;
391	}
392	/* else assert((regs->msr & MSR_SPE) == 0) */
393
394	/* We always copy to/from spefscr */
395	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
396		return 1;
397#endif /* CONFIG_SPE */
398
399	if (sigret) {
400		/* Set up the sigreturn trampoline: li r0,sigret; sc */
401		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
402		    || __put_user(0x44000002UL, &frame->tramp[1]))
403			return 1;
404		flush_icache_range((unsigned long) &frame->tramp[0],
405				   (unsigned long) &frame->tramp[2]);
406	}
407
408	return 0;
409}
410
411/*
412 * Restore the current user register values from the user stack,
413 * (except for MSR).
414 */
415static long restore_user_regs(struct pt_regs *regs,
416			      struct mcontext __user *sr, int sig)
417{
418	long err;
419	unsigned int save_r2 = 0;
420	unsigned long msr;
421
422	/*
423	 * restore general registers but not including MSR or SOFTE. Also
424	 * take care of keeping r2 (TLS) intact if not a signal
425	 */
426	if (!sig)
427		save_r2 = (unsigned int)regs->gpr[2];
428	err = restore_general_regs(regs, sr);
429	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
430	if (!sig)
431		regs->gpr[2] = (unsigned long) save_r2;
432	if (err)
433		return 1;
434
435	/* if doing signal return, restore the previous little-endian mode */
436	if (sig)
437		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
438
439	/*
440	 * Do this before updating the thread state in
441	 * current->thread.fpr/vr/evr.  That way, if we get preempted
442	 * and another task grabs the FPU/Altivec/SPE, it won't be
443	 * tempted to save the current CPU state into the thread_struct
444	 * and corrupt what we are writing there.
445	 */
446	discard_lazy_cpu_state();
447
448	/* force the process to reload the FP registers from
449	   current->thread when it next does FP instructions */
450	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
451	if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
452			     sizeof(sr->mc_fregs)))
453		return 1;
454
455#ifdef CONFIG_ALTIVEC
456	/* force the process to reload the altivec registers from
457	   current->thread when it next does altivec instructions */
458	regs->msr &= ~MSR_VEC;
459	if (msr & MSR_VEC) {
460		/* restore altivec registers from the stack */
461		if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
462				     sizeof(sr->mc_vregs)))
463			return 1;
464	} else if (current->thread.used_vr)
465		memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
466
467	/* Always get VRSAVE back */
468	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
469		return 1;
470#endif /* CONFIG_ALTIVEC */
471
472#ifdef CONFIG_SPE
473	/* force the process to reload the spe registers from
474	   current->thread when it next does spe instructions */
475	regs->msr &= ~MSR_SPE;
476	if (msr & MSR_SPE) {
477		/* restore spe registers from the stack */
478		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
479				     ELF_NEVRREG * sizeof(u32)))
480			return 1;
481	} else if (current->thread.used_spe)
482		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
483
484	/* Always get SPEFSCR back */
485	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
486		return 1;
487#endif /* CONFIG_SPE */
488
489	return 0;
490}
491
492#ifdef CONFIG_PPC64
493long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
494		struct sigaction32 __user *oact, size_t sigsetsize)
495{
496	struct k_sigaction new_ka, old_ka;
497	int ret;
498
499	/* XXX: Don't preclude handling different sized sigset_t's.  */
500	if (sigsetsize != sizeof(compat_sigset_t))
501		return -EINVAL;
502
503	if (act) {
504		compat_uptr_t handler;
505
506		ret = get_user(handler, &act->sa_handler);
507		new_ka.sa.sa_handler = compat_ptr(handler);
508		ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
509		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
510		if (ret)
511			return -EFAULT;
512	}
513
514	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
515	if (!ret && oact) {
516		ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
517		ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
518		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
519	}
520	return ret;
521}
522
523/*
524 * Note: it is necessary to treat how as an unsigned int, with the
525 * corresponding cast to a signed int to insure that the proper
526 * conversion (sign extension) between the register representation
527 * of a signed int (msr in 32-bit mode) and the register representation
528 * of a signed int (msr in 64-bit mode) is performed.
529 */
530long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
531		compat_sigset_t __user *oset, size_t sigsetsize)
532{
533	sigset_t s;
534	sigset_t __user *up;
535	int ret;
536	mm_segment_t old_fs = get_fs();
537
538	if (set) {
539		if (get_sigset_t(&s, set))
540			return -EFAULT;
541	}
542
543	set_fs(KERNEL_DS);
544	/* This is valid because of the set_fs() */
545	up = (sigset_t __user *) &s;
546	ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
547				 sigsetsize);
548	set_fs(old_fs);
549	if (ret)
550		return ret;
551	if (oset) {
552		if (put_sigset_t(oset, &s))
553			return -EFAULT;
554	}
555	return 0;
556}
557
558long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
559{
560	sigset_t s;
561	int ret;
562	mm_segment_t old_fs = get_fs();
563
564	set_fs(KERNEL_DS);
565	/* The __user pointer cast is valid because of the set_fs() */
566	ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
567	set_fs(old_fs);
568	if (!ret) {
569		if (put_sigset_t(set, &s))
570			return -EFAULT;
571	}
572	return ret;
573}
574
575
576int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
577{
578	int err;
579
580	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
581		return -EFAULT;
582
583	/* If you change siginfo_t structure, please be sure
584	 * this code is fixed accordingly.
585	 * It should never copy any pad contained in the structure
586	 * to avoid security leaks, but must copy the generic
587	 * 3 ints plus the relevant union member.
588	 * This routine must convert siginfo from 64bit to 32bit as well
589	 * at the same time.
590	 */
591	err = __put_user(s->si_signo, &d->si_signo);
592	err |= __put_user(s->si_errno, &d->si_errno);
593	err |= __put_user((short)s->si_code, &d->si_code);
594	if (s->si_code < 0)
595		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
596				      SI_PAD_SIZE32);
597	else switch(s->si_code >> 16) {
598	case __SI_CHLD >> 16:
599		err |= __put_user(s->si_pid, &d->si_pid);
600		err |= __put_user(s->si_uid, &d->si_uid);
601		err |= __put_user(s->si_utime, &d->si_utime);
602		err |= __put_user(s->si_stime, &d->si_stime);
603		err |= __put_user(s->si_status, &d->si_status);
604		break;
605	case __SI_FAULT >> 16:
606		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
607				  &d->si_addr);
608		break;
609	case __SI_POLL >> 16:
610		err |= __put_user(s->si_band, &d->si_band);
611		err |= __put_user(s->si_fd, &d->si_fd);
612		break;
613	case __SI_TIMER >> 16:
614		err |= __put_user(s->si_tid, &d->si_tid);
615		err |= __put_user(s->si_overrun, &d->si_overrun);
616		err |= __put_user(s->si_int, &d->si_int);
617		break;
618	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
619	case __SI_MESGQ >> 16:
620		err |= __put_user(s->si_int, &d->si_int);
621		/* fallthrough */
622	case __SI_KILL >> 16:
623	default:
624		err |= __put_user(s->si_pid, &d->si_pid);
625		err |= __put_user(s->si_uid, &d->si_uid);
626		break;
627	}
628	return err;
629}
630
631#define copy_siginfo_to_user	copy_siginfo_to_user32
632
633/*
634 * Note: it is necessary to treat pid and sig as unsigned ints, with the
635 * corresponding cast to a signed int to insure that the proper conversion
636 * (sign extension) between the register representation of a signed int
637 * (msr in 32-bit mode) and the register representation of a signed int
638 * (msr in 64-bit mode) is performed.
639 */
640long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
641{
642	siginfo_t info;
643	int ret;
644	mm_segment_t old_fs = get_fs();
645
646	if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
647	    copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32))
648		return -EFAULT;
649	set_fs (KERNEL_DS);
650	/* The __user pointer cast is valid becasuse of the set_fs() */
651	ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
652	set_fs (old_fs);
653	return ret;
654}
655/*
656 *  Start Alternate signal stack support
657 *
658 *  System Calls
659 *       sigaltatck               compat_sys_sigaltstack
660 */
661
662int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
663		      int r6, int r7, int r8, struct pt_regs *regs)
664{
665	stack_32_t __user * newstack = compat_ptr(__new);
666	stack_32_t __user * oldstack = compat_ptr(__old);
667	stack_t uss, uoss;
668	int ret;
669	mm_segment_t old_fs;
670	unsigned long sp;
671	compat_uptr_t ss_sp;
672
673	/*
674	 * set sp to the user stack on entry to the system call
675	 * the system call router sets R9 to the saved registers
676	 */
677	sp = regs->gpr[1];
678
679	/* Put new stack info in local 64 bit stack struct */
680	if (newstack) {
681		if (get_user(ss_sp, &newstack->ss_sp) ||
682		    __get_user(uss.ss_flags, &newstack->ss_flags) ||
683		    __get_user(uss.ss_size, &newstack->ss_size))
684			return -EFAULT;
685		uss.ss_sp = compat_ptr(ss_sp);
686	}
687
688	old_fs = get_fs();
689	set_fs(KERNEL_DS);
690	/* The __user pointer casts are valid because of the set_fs() */
691	ret = do_sigaltstack(
692		newstack ? (stack_t __user *) &uss : NULL,
693		oldstack ? (stack_t __user *) &uoss : NULL,
694		sp);
695	set_fs(old_fs);
696	/* Copy the stack information to the user output buffer */
697	if (!ret && oldstack  &&
698		(put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
699		 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
700		 __put_user(uoss.ss_size, &oldstack->ss_size)))
701		return -EFAULT;
702	return ret;
703}
704#endif /* CONFIG_PPC64 */
705
706
707/*
708 * Restore the user process's signal mask
709 */
710#ifdef CONFIG_PPC64
711extern void restore_sigmask(sigset_t *set);
712#else /* CONFIG_PPC64 */
713static void restore_sigmask(sigset_t *set)
714{
715	sigdelsetmask(set, ~_BLOCKABLE);
716	spin_lock_irq(&current->sighand->siglock);
717	current->blocked = *set;
718	recalc_sigpending();
719	spin_unlock_irq(&current->sighand->siglock);
720}
721#endif
722
723/*
724 * Set up a signal frame for a "real-time" signal handler
725 * (one which gets siginfo).
726 */
727static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
728		siginfo_t *info, sigset_t *oldset,
729		struct pt_regs *regs, unsigned long newsp)
730{
731	struct rt_sigframe __user *rt_sf;
732	struct mcontext __user *frame;
733	unsigned long origsp = newsp;
734
735	/* Set up Signal Frame */
736	/* Put a Real Time Context onto stack */
737	newsp -= sizeof(*rt_sf);
738	rt_sf = (struct rt_sigframe __user *)newsp;
739
740	/* create a stack frame for the caller of the handler */
741	newsp -= __SIGNAL_FRAMESIZE + 16;
742
743	if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
744		goto badframe;
745
746	/* Put the siginfo & fill in most of the ucontext */
747	if (copy_siginfo_to_user(&rt_sf->info, info)
748	    || __put_user(0, &rt_sf->uc.uc_flags)
749	    || __put_user(0, &rt_sf->uc.uc_link)
750	    || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
751	    || __put_user(sas_ss_flags(regs->gpr[1]),
752			  &rt_sf->uc.uc_stack.ss_flags)
753	    || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
754	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
755		    &rt_sf->uc.uc_regs)
756	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
757		goto badframe;
758
759	/* Save user registers on the stack */
760	frame = &rt_sf->uc.uc_mcontext;
761	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
762		if (save_user_regs(regs, frame, 0))
763			goto badframe;
764		regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
765	} else {
766		if (save_user_regs(regs, frame, __NR_rt_sigreturn))
767			goto badframe;
768		regs->link = (unsigned long) frame->tramp;
769	}
770
771	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
772
773	if (put_user(regs->gpr[1], (u32 __user *)newsp))
774		goto badframe;
775	regs->gpr[1] = newsp;
776	regs->gpr[3] = sig;
777	regs->gpr[4] = (unsigned long) &rt_sf->info;
778	regs->gpr[5] = (unsigned long) &rt_sf->uc;
779	regs->gpr[6] = (unsigned long) rt_sf;
780	regs->nip = (unsigned long) ka->sa.sa_handler;
781	/* enter the signal handler in big-endian mode */
782	regs->msr &= ~MSR_LE;
783	regs->trap = 0;
784	return 1;
785
786badframe:
787#ifdef DEBUG_SIG
788	printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
789	       regs, frame, newsp);
790#endif
791	force_sigsegv(sig, current);
792	return 0;
793}
794
795static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
796{
797	sigset_t set;
798	struct mcontext __user *mcp;
799
800	if (get_sigset_t(&set, &ucp->uc_sigmask))
801		return -EFAULT;
802#ifdef CONFIG_PPC64
803	{
804		u32 cmcp;
805
806		if (__get_user(cmcp, &ucp->uc_regs))
807			return -EFAULT;
808		mcp = (struct mcontext __user *)(u64)cmcp;
809		/* no need to check access_ok(mcp), since mcp < 4GB */
810	}
811#else
812	if (__get_user(mcp, &ucp->uc_regs))
813		return -EFAULT;
814	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
815		return -EFAULT;
816#endif
817	restore_sigmask(&set);
818	if (restore_user_regs(regs, mcp, sig))
819		return -EFAULT;
820
821	return 0;
822}
823
824long sys_swapcontext(struct ucontext __user *old_ctx,
825		     struct ucontext __user *new_ctx,
826		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
827{
828	unsigned char tmp;
829
830	/* Context size is for future use. Right now, we only make sure
831	 * we are passed something we understand
832	 */
833	if (ctx_size < sizeof(struct ucontext))
834		return -EINVAL;
835
836	if (old_ctx != NULL) {
837		struct mcontext __user *mctx;
838
839		/*
840		 * old_ctx might not be 16-byte aligned, in which
841		 * case old_ctx->uc_mcontext won't be either.
842		 * Because we have the old_ctx->uc_pad2 field
843		 * before old_ctx->uc_mcontext, we need to round down
844		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
845		 */
846		mctx = (struct mcontext __user *)
847			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
848		if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
849		    || save_user_regs(regs, mctx, 0)
850		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
851		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
852			return -EFAULT;
853	}
854	if (new_ctx == NULL)
855		return 0;
856	if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
857	    || __get_user(tmp, (u8 __user *) new_ctx)
858	    || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
859		return -EFAULT;
860
861	/*
862	 * If we get a fault copying the context into the kernel's
863	 * image of the user's registers, we can't just return -EFAULT
864	 * because the user's registers will be corrupted.  For instance
865	 * the NIP value may have been updated but not some of the
866	 * other registers.  Given that we have done the access_ok
867	 * and successfully read the first and last bytes of the region
868	 * above, this should only happen in an out-of-memory situation
869	 * or if another thread unmaps the region containing the context.
870	 * We kill the task with a SIGSEGV in this situation.
871	 */
872	if (do_setcontext(new_ctx, regs, 0))
873		do_exit(SIGSEGV);
874
875	set_thread_flag(TIF_RESTOREALL);
876	return 0;
877}
878
879long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
880		     struct pt_regs *regs)
881{
882	struct rt_sigframe __user *rt_sf;
883
884	/* Always make any pending restarted system calls return -EINTR */
885	current_thread_info()->restart_block.fn = do_no_restart_syscall;
886
887	rt_sf = (struct rt_sigframe __user *)
888		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
889	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
890		goto bad;
891	if (do_setcontext(&rt_sf->uc, regs, 1))
892		goto bad;
893
894	/*
895	 * It's not clear whether or why it is desirable to save the
896	 * sigaltstack setting on signal delivery and restore it on
897	 * signal return.  But other architectures do this and we have
898	 * always done it up until now so it is probably better not to
899	 * change it.  -- paulus
900	 */
901#ifdef CONFIG_PPC64
902	/*
903	 * We use the compat_sys_ version that does the 32/64 bits conversion
904	 * and takes userland pointer directly. What about error checking ?
905	 * nobody does any...
906	 */
907	compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
908#else
909	do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
910#endif
911	set_thread_flag(TIF_RESTOREALL);
912	return 0;
913
914 bad:
915	force_sig(SIGSEGV, current);
916	return 0;
917}
918
919#ifdef CONFIG_PPC32
920int sys_debug_setcontext(struct ucontext __user *ctx,
921			 int ndbg, struct sig_dbg_op __user *dbg,
922			 int r6, int r7, int r8,
923			 struct pt_regs *regs)
924{
925	struct sig_dbg_op op;
926	int i;
927	unsigned char tmp;
928	unsigned long new_msr = regs->msr;
929#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
930	unsigned long new_dbcr0 = current->thread.dbcr0;
931#endif
932
933	for (i=0; i<ndbg; i++) {
934		if (copy_from_user(&op, dbg + i, sizeof(op)))
935			return -EFAULT;
936		switch (op.dbg_type) {
937		case SIG_DBG_SINGLE_STEPPING:
938#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
939			if (op.dbg_value) {
940				new_msr |= MSR_DE;
941				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
942			} else {
943				new_msr &= ~MSR_DE;
944				new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
945			}
946#else
947			if (op.dbg_value)
948				new_msr |= MSR_SE;
949			else
950				new_msr &= ~MSR_SE;
951#endif
952			break;
953		case SIG_DBG_BRANCH_TRACING:
954#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
955			return -EINVAL;
956#else
957			if (op.dbg_value)
958				new_msr |= MSR_BE;
959			else
960				new_msr &= ~MSR_BE;
961#endif
962			break;
963
964		default:
965			return -EINVAL;
966		}
967	}
968
969	/* We wait until here to actually install the values in the
970	   registers so if we fail in the above loop, it will not
971	   affect the contents of these registers.  After this point,
972	   failure is a problem, anyway, and it's very unlikely unless
973	   the user is really doing something wrong. */
974	regs->msr = new_msr;
975#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
976	current->thread.dbcr0 = new_dbcr0;
977#endif
978
979	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
980	    || __get_user(tmp, (u8 __user *) ctx)
981	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
982		return -EFAULT;
983
984	/*
985	 * If we get a fault copying the context into the kernel's
986	 * image of the user's registers, we can't just return -EFAULT
987	 * because the user's registers will be corrupted.  For instance
988	 * the NIP value may have been updated but not some of the
989	 * other registers.  Given that we have done the access_ok
990	 * and successfully read the first and last bytes of the region
991	 * above, this should only happen in an out-of-memory situation
992	 * or if another thread unmaps the region containing the context.
993	 * We kill the task with a SIGSEGV in this situation.
994	 */
995	if (do_setcontext(ctx, regs, 1)) {
996		force_sig(SIGSEGV, current);
997		goto out;
998	}
999
1000	/*
1001	 * It's not clear whether or why it is desirable to save the
1002	 * sigaltstack setting on signal delivery and restore it on
1003	 * signal return.  But other architectures do this and we have
1004	 * always done it up until now so it is probably better not to
1005	 * change it.  -- paulus
1006	 */
1007	do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1008
1009	set_thread_flag(TIF_RESTOREALL);
1010 out:
1011	return 0;
1012}
1013#endif
1014
1015/*
1016 * OK, we're invoking a handler
1017 */
1018static int handle_signal(unsigned long sig, struct k_sigaction *ka,
1019		siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
1020		unsigned long newsp)
1021{
1022	struct sigcontext __user *sc;
1023	struct sigregs __user *frame;
1024	unsigned long origsp = newsp;
1025
1026	/* Set up Signal Frame */
1027	newsp -= sizeof(struct sigregs);
1028	frame = (struct sigregs __user *) newsp;
1029
1030	/* Put a sigcontext on the stack */
1031	newsp -= sizeof(*sc);
1032	sc = (struct sigcontext __user *) newsp;
1033
1034	/* create a stack frame for the caller of the handler */
1035	newsp -= __SIGNAL_FRAMESIZE;
1036
1037	if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
1038		goto badframe;
1039
1040#if _NSIG != 64
1041#error "Please adjust handle_signal()"
1042#endif
1043	if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1044	    || __put_user(oldset->sig[0], &sc->oldmask)
1045#ifdef CONFIG_PPC64
1046	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1047#else
1048	    || __put_user(oldset->sig[1], &sc->_unused[3])
1049#endif
1050	    || __put_user(to_user_ptr(frame), &sc->regs)
1051	    || __put_user(sig, &sc->signal))
1052		goto badframe;
1053
1054	if (vdso32_sigtramp && current->mm->context.vdso_base) {
1055		if (save_user_regs(regs, &frame->mctx, 0))
1056			goto badframe;
1057		regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1058	} else {
1059		if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
1060			goto badframe;
1061		regs->link = (unsigned long) frame->mctx.tramp;
1062	}
1063
1064	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
1065
1066	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1067		goto badframe;
1068	regs->gpr[1] = newsp;
1069	regs->gpr[3] = sig;
1070	regs->gpr[4] = (unsigned long) sc;
1071	regs->nip = (unsigned long) ka->sa.sa_handler;
1072	/* enter the signal handler in big-endian mode */
1073	regs->msr &= ~MSR_LE;
1074	regs->trap = 0;
1075
1076	return 1;
1077
1078badframe:
1079#ifdef DEBUG_SIG
1080	printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1081	       regs, frame, newsp);
1082#endif
1083	force_sigsegv(sig, current);
1084	return 0;
1085}
1086
1087/*
1088 * Do a signal return; undo the signal stack.
1089 */
1090long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1091		       struct pt_regs *regs)
1092{
1093	struct sigcontext __user *sc;
1094	struct sigcontext sigctx;
1095	struct mcontext __user *sr;
1096	sigset_t set;
1097
1098	/* Always make any pending restarted system calls return -EINTR */
1099	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1100
1101	sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1102	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1103		goto badframe;
1104
1105#ifdef CONFIG_PPC64
1106	/*
1107	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1108	 * unused part of the signal stackframe
1109	 */
1110	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1111#else
1112	set.sig[0] = sigctx.oldmask;
1113	set.sig[1] = sigctx._unused[3];
1114#endif
1115	restore_sigmask(&set);
1116
1117	sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1118	if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1119	    || restore_user_regs(regs, sr, 1))
1120		goto badframe;
1121
1122	set_thread_flag(TIF_RESTOREALL);
1123	return 0;
1124
1125badframe:
1126	force_sig(SIGSEGV, current);
1127	return 0;
1128}
1129
1130/*
1131 * Note that 'init' is a special process: it doesn't get signals it doesn't
1132 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1133 * mistake.
1134 */
1135int do_signal(sigset_t *oldset, struct pt_regs *regs)
1136{
1137	siginfo_t info;
1138	struct k_sigaction ka;
1139	unsigned int newsp;
1140	int signr, ret;
1141
1142#ifdef CONFIG_PPC32
1143	if (try_to_freeze()) {
1144		signr = 0;
1145		if (!signal_pending(current))
1146			goto no_signal;
1147	}
1148#endif
1149
1150	if (test_thread_flag(TIF_RESTORE_SIGMASK))
1151		oldset = &current->saved_sigmask;
1152	else if (!oldset)
1153		oldset = &current->blocked;
1154
1155	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
1156#ifdef CONFIG_PPC32
1157no_signal:
1158#endif
1159	if (TRAP(regs) == 0x0C00		/* System Call! */
1160	    && regs->ccr & 0x10000000		/* error signalled */
1161	    && ((ret = regs->gpr[3]) == ERESTARTSYS
1162		|| ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
1163		|| ret == ERESTART_RESTARTBLOCK)) {
1164
1165		if (signr > 0
1166		    && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
1167			|| (ret == ERESTARTSYS
1168			    && !(ka.sa.sa_flags & SA_RESTART)))) {
1169			/* make the system call return an EINTR error */
1170			regs->result = -EINTR;
1171			regs->gpr[3] = EINTR;
1172			/* note that the cr0.SO bit is already set */
1173		} else {
1174			regs->nip -= 4;	/* Back up & retry system call */
1175			regs->result = 0;
1176			regs->trap = 0;
1177			if (ret == ERESTART_RESTARTBLOCK)
1178				regs->gpr[0] = __NR_restart_syscall;
1179			else
1180				regs->gpr[3] = regs->orig_gpr3;
1181		}
1182	}
1183
1184	if (signr == 0) {
1185		/* No signal to deliver -- put the saved sigmask back */
1186		if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
1187			clear_thread_flag(TIF_RESTORE_SIGMASK);
1188			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
1189		}
1190		return 0;		/* no signals delivered */
1191	}
1192
1193	if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
1194	    && !on_sig_stack(regs->gpr[1]))
1195		newsp = current->sas_ss_sp + current->sas_ss_size;
1196	else
1197		newsp = regs->gpr[1];
1198	newsp &= ~0xfUL;
1199
1200#ifdef CONFIG_PPC64
1201	/*
1202	 * Reenable the DABR before delivering the signal to
1203	 * user space. The DABR will have been cleared if it
1204	 * triggered inside the kernel.
1205	 */
1206	if (current->thread.dabr)
1207		set_dabr(current->thread.dabr);
1208#endif
1209
1210	/* Whee!  Actually deliver the signal.  */
1211	if (ka.sa.sa_flags & SA_SIGINFO)
1212		ret = handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
1213	else
1214		ret = handle_signal(signr, &ka, &info, oldset, regs, newsp);
1215
1216	if (ret) {
1217		spin_lock_irq(&current->sighand->siglock);
1218		sigorsets(&current->blocked, &current->blocked,
1219			  &ka.sa.sa_mask);
1220		if (!(ka.sa.sa_flags & SA_NODEFER))
1221			sigaddset(&current->blocked, signr);
1222		recalc_sigpending();
1223		spin_unlock_irq(&current->sighand->siglock);
1224		/* A signal was successfully delivered; the saved sigmask is in
1225		   its frame, and we can clear the TIF_RESTORE_SIGMASK flag */
1226		if (test_thread_flag(TIF_RESTORE_SIGMASK))
1227			clear_thread_flag(TIF_RESTORE_SIGMASK);
1228	}
1229
1230	return ret;
1231}
1232