signal.c revision d814c28ceca8f659c0012eaec8e21eee43710716
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1991, 1992  Linus Torvalds
7 * Copyright (C) 1994 - 2000  Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10#include <linux/cache.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/personality.h>
14#include <linux/smp.h>
15#include <linux/kernel.h>
16#include <linux/signal.h>
17#include <linux/errno.h>
18#include <linux/wait.h>
19#include <linux/ptrace.h>
20#include <linux/unistd.h>
21#include <linux/compiler.h>
22#include <linux/syscalls.h>
23#include <linux/uaccess.h>
24#include <linux/tracehook.h>
25
26#include <asm/abi.h>
27#include <asm/asm.h>
28#include <linux/bitops.h>
29#include <asm/cacheflush.h>
30#include <asm/fpu.h>
31#include <asm/sim.h>
32#include <asm/ucontext.h>
33#include <asm/cpu-features.h>
34#include <asm/war.h>
35#include <asm/vdso.h>
36
37#include "signal-common.h"
38
39static int (*save_fp_context)(struct sigcontext __user *sc);
40static int (*restore_fp_context)(struct sigcontext __user *sc);
41
42extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
43extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
44
45extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
46extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
47
48struct sigframe {
49	u32 sf_ass[4];		/* argument save space for o32 */
50	u32 sf_pad[2];		/* Was: signal trampoline */
51	struct sigcontext sf_sc;
52	sigset_t sf_mask;
53};
54
55struct rt_sigframe {
56	u32 rs_ass[4];		/* argument save space for o32 */
57	u32 rs_pad[2];		/* Was: signal trampoline */
58	struct siginfo rs_info;
59	struct ucontext rs_uc;
60};
61
62/*
63 * Helper routines
64 */
65static int protected_save_fp_context(struct sigcontext __user *sc)
66{
67	int err;
68	while (1) {
69		lock_fpu_owner();
70		own_fpu_inatomic(1);
71		err = save_fp_context(sc); /* this might fail */
72		unlock_fpu_owner();
73		if (likely(!err))
74			break;
75		/* touch the sigcontext and try again */
76		err = __put_user(0, &sc->sc_fpregs[0]) |
77			__put_user(0, &sc->sc_fpregs[31]) |
78			__put_user(0, &sc->sc_fpc_csr);
79		if (err)
80			break;	/* really bad sigcontext */
81	}
82	return err;
83}
84
85static int protected_restore_fp_context(struct sigcontext __user *sc)
86{
87	int err, tmp;
88	while (1) {
89		lock_fpu_owner();
90		own_fpu_inatomic(0);
91		err = restore_fp_context(sc); /* this might fail */
92		unlock_fpu_owner();
93		if (likely(!err))
94			break;
95		/* touch the sigcontext and try again */
96		err = __get_user(tmp, &sc->sc_fpregs[0]) |
97			__get_user(tmp, &sc->sc_fpregs[31]) |
98			__get_user(tmp, &sc->sc_fpc_csr);
99		if (err)
100			break;	/* really bad sigcontext */
101	}
102	return err;
103}
104
105int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
106{
107	int err = 0;
108	int i;
109	unsigned int used_math;
110
111	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
112
113	err |= __put_user(0, &sc->sc_regs[0]);
114	for (i = 1; i < 32; i++)
115		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
116
117#ifdef CONFIG_CPU_HAS_SMARTMIPS
118	err |= __put_user(regs->acx, &sc->sc_acx);
119#endif
120	err |= __put_user(regs->hi, &sc->sc_mdhi);
121	err |= __put_user(regs->lo, &sc->sc_mdlo);
122	if (cpu_has_dsp) {
123		err |= __put_user(mfhi1(), &sc->sc_hi1);
124		err |= __put_user(mflo1(), &sc->sc_lo1);
125		err |= __put_user(mfhi2(), &sc->sc_hi2);
126		err |= __put_user(mflo2(), &sc->sc_lo2);
127		err |= __put_user(mfhi3(), &sc->sc_hi3);
128		err |= __put_user(mflo3(), &sc->sc_lo3);
129		err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
130	}
131
132	used_math = !!used_math();
133	err |= __put_user(used_math, &sc->sc_used_math);
134
135	if (used_math) {
136		/*
137		 * Save FPU state to signal context. Signal handler
138		 * will "inherit" current FPU state.
139		 */
140		err |= protected_save_fp_context(sc);
141	}
142	return err;
143}
144
145int fpcsr_pending(unsigned int __user *fpcsr)
146{
147	int err, sig = 0;
148	unsigned int csr, enabled;
149
150	err = __get_user(csr, fpcsr);
151	enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
152	/*
153	 * If the signal handler set some FPU exceptions, clear it and
154	 * send SIGFPE.
155	 */
156	if (csr & enabled) {
157		csr &= ~enabled;
158		err |= __put_user(csr, fpcsr);
159		sig = SIGFPE;
160	}
161	return err ?: sig;
162}
163
164static int
165check_and_restore_fp_context(struct sigcontext __user *sc)
166{
167	int err, sig;
168
169	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
170	if (err > 0)
171		err = 0;
172	err |= protected_restore_fp_context(sc);
173	return err ?: sig;
174}
175
176int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
177{
178	unsigned int used_math;
179	unsigned long treg;
180	int err = 0;
181	int i;
182
183	/* Always make any pending restarted system calls return -EINTR */
184	current_thread_info()->restart_block.fn = do_no_restart_syscall;
185
186	err |= __get_user(regs->cp0_epc, &sc->sc_pc);
187
188#ifdef CONFIG_CPU_HAS_SMARTMIPS
189	err |= __get_user(regs->acx, &sc->sc_acx);
190#endif
191	err |= __get_user(regs->hi, &sc->sc_mdhi);
192	err |= __get_user(regs->lo, &sc->sc_mdlo);
193	if (cpu_has_dsp) {
194		err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
195		err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
196		err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
197		err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
198		err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
199		err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
200		err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
201	}
202
203	for (i = 1; i < 32; i++)
204		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
205
206	err |= __get_user(used_math, &sc->sc_used_math);
207	conditional_used_math(used_math);
208
209	if (used_math) {
210		/* restore fpu context if we have used it before */
211		if (!err)
212			err = check_and_restore_fp_context(sc);
213	} else {
214		/* signal handler may have used FPU.  Give it up. */
215		lose_fpu(0);
216	}
217
218	return err;
219}
220
221void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
222			  size_t frame_size)
223{
224	unsigned long sp;
225
226	/* Default to using normal stack */
227	sp = regs->regs[29];
228
229	/*
230	 * FPU emulator may have it's own trampoline active just
231	 * above the user stack, 16-bytes before the next lowest
232	 * 16 byte boundary.  Try to avoid trashing it.
233	 */
234	sp -= 32;
235
236	/* This is the X/Open sanctioned signal stack switching.  */
237	if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
238		sp = current->sas_ss_sp + current->sas_ss_size;
239
240	return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
241}
242
243/*
244 * Atomically swap in the new signal mask, and wait for a signal.
245 */
246
247#ifdef CONFIG_TRAD_SIGNALS
248asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
249{
250	sigset_t newset;
251	sigset_t __user *uset;
252
253	uset = (sigset_t __user *) regs.regs[4];
254	if (copy_from_user(&newset, uset, sizeof(sigset_t)))
255		return -EFAULT;
256	sigdelsetmask(&newset, ~_BLOCKABLE);
257
258	spin_lock_irq(&current->sighand->siglock);
259	current->saved_sigmask = current->blocked;
260	current->blocked = newset;
261	recalc_sigpending();
262	spin_unlock_irq(&current->sighand->siglock);
263
264	current->state = TASK_INTERRUPTIBLE;
265	schedule();
266	set_thread_flag(TIF_RESTORE_SIGMASK);
267	return -ERESTARTNOHAND;
268}
269#endif
270
271asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
272{
273	sigset_t newset;
274	sigset_t __user *unewset;
275	size_t sigsetsize;
276
277	/* XXX Don't preclude handling different sized sigset_t's.  */
278	sigsetsize = regs.regs[5];
279	if (sigsetsize != sizeof(sigset_t))
280		return -EINVAL;
281
282	unewset = (sigset_t __user *) regs.regs[4];
283	if (copy_from_user(&newset, unewset, sizeof(newset)))
284		return -EFAULT;
285	sigdelsetmask(&newset, ~_BLOCKABLE);
286
287	spin_lock_irq(&current->sighand->siglock);
288	current->saved_sigmask = current->blocked;
289	current->blocked = newset;
290	recalc_sigpending();
291	spin_unlock_irq(&current->sighand->siglock);
292
293	current->state = TASK_INTERRUPTIBLE;
294	schedule();
295	set_thread_flag(TIF_RESTORE_SIGMASK);
296	return -ERESTARTNOHAND;
297}
298
299#ifdef CONFIG_TRAD_SIGNALS
300SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
301	struct sigaction __user *, oact)
302{
303	struct k_sigaction new_ka, old_ka;
304	int ret;
305	int err = 0;
306
307	if (act) {
308		old_sigset_t mask;
309
310		if (!access_ok(VERIFY_READ, act, sizeof(*act)))
311			return -EFAULT;
312		err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
313		err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
314		err |= __get_user(mask, &act->sa_mask.sig[0]);
315		if (err)
316			return -EFAULT;
317
318		siginitset(&new_ka.sa.sa_mask, mask);
319	}
320
321	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
322
323	if (!ret && oact) {
324		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
325			return -EFAULT;
326		err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
327		err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
328		err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
329		err |= __put_user(0, &oact->sa_mask.sig[1]);
330		err |= __put_user(0, &oact->sa_mask.sig[2]);
331		err |= __put_user(0, &oact->sa_mask.sig[3]);
332		if (err)
333			return -EFAULT;
334	}
335
336	return ret;
337}
338#endif
339
340asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
341{
342	const stack_t __user *uss = (const stack_t __user *) regs.regs[4];
343	stack_t __user *uoss = (stack_t __user *) regs.regs[5];
344	unsigned long usp = regs.regs[29];
345
346	return do_sigaltstack(uss, uoss, usp);
347}
348
349#ifdef CONFIG_TRAD_SIGNALS
350asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
351{
352	struct sigframe __user *frame;
353	sigset_t blocked;
354	int sig;
355
356	frame = (struct sigframe __user *) regs.regs[29];
357	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
358		goto badframe;
359	if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
360		goto badframe;
361
362	sigdelsetmask(&blocked, ~_BLOCKABLE);
363	spin_lock_irq(&current->sighand->siglock);
364	current->blocked = blocked;
365	recalc_sigpending();
366	spin_unlock_irq(&current->sighand->siglock);
367
368	sig = restore_sigcontext(&regs, &frame->sf_sc);
369	if (sig < 0)
370		goto badframe;
371	else if (sig)
372		force_sig(sig, current);
373
374	/*
375	 * Don't let your children do this ...
376	 */
377	__asm__ __volatile__(
378		"move\t$29, %0\n\t"
379		"j\tsyscall_exit"
380		:/* no outputs */
381		:"r" (&regs));
382	/* Unreached */
383
384badframe:
385	force_sig(SIGSEGV, current);
386}
387#endif /* CONFIG_TRAD_SIGNALS */
388
389asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
390{
391	struct rt_sigframe __user *frame;
392	sigset_t set;
393	stack_t st;
394	int sig;
395
396	frame = (struct rt_sigframe __user *) regs.regs[29];
397	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
398		goto badframe;
399	if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
400		goto badframe;
401
402	sigdelsetmask(&set, ~_BLOCKABLE);
403	spin_lock_irq(&current->sighand->siglock);
404	current->blocked = set;
405	recalc_sigpending();
406	spin_unlock_irq(&current->sighand->siglock);
407
408	sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
409	if (sig < 0)
410		goto badframe;
411	else if (sig)
412		force_sig(sig, current);
413
414	if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
415		goto badframe;
416	/* It is more difficult to avoid calling this function than to
417	   call it and ignore errors.  */
418	do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
419
420	/*
421	 * Don't let your children do this ...
422	 */
423	__asm__ __volatile__(
424		"move\t$29, %0\n\t"
425		"j\tsyscall_exit"
426		:/* no outputs */
427		:"r" (&regs));
428	/* Unreached */
429
430badframe:
431	force_sig(SIGSEGV, current);
432}
433
434#ifdef CONFIG_TRAD_SIGNALS
435static int setup_frame(void *sig_return, struct k_sigaction *ka,
436		       struct pt_regs *regs, int signr, sigset_t *set)
437{
438	struct sigframe __user *frame;
439	int err = 0;
440
441	frame = get_sigframe(ka, regs, sizeof(*frame));
442	if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
443		goto give_sigsegv;
444
445	err |= setup_sigcontext(regs, &frame->sf_sc);
446	err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
447	if (err)
448		goto give_sigsegv;
449
450	/*
451	 * Arguments to signal handler:
452	 *
453	 *   a0 = signal number
454	 *   a1 = 0 (should be cause)
455	 *   a2 = pointer to struct sigcontext
456	 *
457	 * $25 and c0_epc point to the signal handler, $29 points to the
458	 * struct sigframe.
459	 */
460	regs->regs[ 4] = signr;
461	regs->regs[ 5] = 0;
462	regs->regs[ 6] = (unsigned long) &frame->sf_sc;
463	regs->regs[29] = (unsigned long) frame;
464	regs->regs[31] = (unsigned long) sig_return;
465	regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
466
467	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
468	       current->comm, current->pid,
469	       frame, regs->cp0_epc, regs->regs[31]);
470	return 0;
471
472give_sigsegv:
473	force_sigsegv(signr, current);
474	return -EFAULT;
475}
476#endif
477
478static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
479			  struct pt_regs *regs,	int signr, sigset_t *set,
480			  siginfo_t *info)
481{
482	struct rt_sigframe __user *frame;
483	int err = 0;
484
485	frame = get_sigframe(ka, regs, sizeof(*frame));
486	if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
487		goto give_sigsegv;
488
489	/* Create siginfo.  */
490	err |= copy_siginfo_to_user(&frame->rs_info, info);
491
492	/* Create the ucontext.  */
493	err |= __put_user(0, &frame->rs_uc.uc_flags);
494	err |= __put_user(NULL, &frame->rs_uc.uc_link);
495	err |= __put_user((void __user *)current->sas_ss_sp,
496	                  &frame->rs_uc.uc_stack.ss_sp);
497	err |= __put_user(sas_ss_flags(regs->regs[29]),
498	                  &frame->rs_uc.uc_stack.ss_flags);
499	err |= __put_user(current->sas_ss_size,
500	                  &frame->rs_uc.uc_stack.ss_size);
501	err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
502	err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
503
504	if (err)
505		goto give_sigsegv;
506
507	/*
508	 * Arguments to signal handler:
509	 *
510	 *   a0 = signal number
511	 *   a1 = 0 (should be cause)
512	 *   a2 = pointer to ucontext
513	 *
514	 * $25 and c0_epc point to the signal handler, $29 points to
515	 * the struct rt_sigframe.
516	 */
517	regs->regs[ 4] = signr;
518	regs->regs[ 5] = (unsigned long) &frame->rs_info;
519	regs->regs[ 6] = (unsigned long) &frame->rs_uc;
520	regs->regs[29] = (unsigned long) frame;
521	regs->regs[31] = (unsigned long) sig_return;
522	regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
523
524	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
525	       current->comm, current->pid,
526	       frame, regs->cp0_epc, regs->regs[31]);
527
528	return 0;
529
530give_sigsegv:
531	force_sigsegv(signr, current);
532	return -EFAULT;
533}
534
535struct mips_abi mips_abi = {
536#ifdef CONFIG_TRAD_SIGNALS
537	.setup_frame	= setup_frame,
538	.signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
539#endif
540	.setup_rt_frame	= setup_rt_frame,
541	.rt_signal_return_offset =
542		offsetof(struct mips_vdso, rt_signal_trampoline),
543	.restart	= __NR_restart_syscall
544};
545
546static int handle_signal(unsigned long sig, siginfo_t *info,
547	struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
548{
549	int ret;
550	struct mips_abi *abi = current->thread.abi;
551	void *vdso = current->mm->context.vdso;
552
553	switch(regs->regs[0]) {
554	case ERESTART_RESTARTBLOCK:
555	case ERESTARTNOHAND:
556		regs->regs[2] = EINTR;
557		break;
558	case ERESTARTSYS:
559		if (!(ka->sa.sa_flags & SA_RESTART)) {
560			regs->regs[2] = EINTR;
561			break;
562		}
563	/* fallthrough */
564	case ERESTARTNOINTR:		/* Userland will reload $v0.  */
565		regs->regs[7] = regs->regs[26];
566		regs->cp0_epc -= 8;
567	}
568
569	regs->regs[0] = 0;		/* Don't deal with this again.  */
570
571	if (sig_uses_siginfo(ka))
572		ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
573					  ka, regs, sig, oldset, info);
574	else
575		ret = abi->setup_frame(vdso + abi->signal_return_offset,
576				       ka, regs, sig, oldset);
577
578	spin_lock_irq(&current->sighand->siglock);
579	sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
580	if (!(ka->sa.sa_flags & SA_NODEFER))
581		sigaddset(&current->blocked, sig);
582	recalc_sigpending();
583	spin_unlock_irq(&current->sighand->siglock);
584
585	return ret;
586}
587
588static void do_signal(struct pt_regs *regs)
589{
590	struct k_sigaction ka;
591	sigset_t *oldset;
592	siginfo_t info;
593	int signr;
594
595	/*
596	 * We want the common case to go fast, which is why we may in certain
597	 * cases get here from kernel mode. Just return without doing anything
598	 * if so.
599	 */
600	if (!user_mode(regs))
601		return;
602
603	if (test_thread_flag(TIF_RESTORE_SIGMASK))
604		oldset = &current->saved_sigmask;
605	else
606		oldset = &current->blocked;
607
608	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
609	if (signr > 0) {
610		/* Whee!  Actually deliver the signal.  */
611		if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
612			/*
613			 * A signal was successfully delivered; the saved
614			 * sigmask will have been stored in the signal frame,
615			 * and will be restored by sigreturn, so we can simply
616			 * clear the TIF_RESTORE_SIGMASK flag.
617			 */
618			if (test_thread_flag(TIF_RESTORE_SIGMASK))
619				clear_thread_flag(TIF_RESTORE_SIGMASK);
620		}
621
622		return;
623	}
624
625	/*
626	 * Who's code doesn't conform to the restartable syscall convention
627	 * dies here!!!  The li instruction, a single machine instruction,
628	 * must directly be followed by the syscall instruction.
629	 */
630	if (regs->regs[0]) {
631		if (regs->regs[2] == ERESTARTNOHAND ||
632		    regs->regs[2] == ERESTARTSYS ||
633		    regs->regs[2] == ERESTARTNOINTR) {
634			regs->regs[7] = regs->regs[26];
635			regs->cp0_epc -= 8;
636		}
637		if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
638			regs->regs[2] = current->thread.abi->restart;
639			regs->regs[7] = regs->regs[26];
640			regs->cp0_epc -= 4;
641		}
642		regs->regs[0] = 0;	/* Don't deal with this again.  */
643	}
644
645	/*
646	 * If there's no signal to deliver, we just put the saved sigmask
647	 * back
648	 */
649	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
650		clear_thread_flag(TIF_RESTORE_SIGMASK);
651		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
652	}
653}
654
655/*
656 * notification of userspace execution resumption
657 * - triggered by the TIF_WORK_MASK flags
658 */
659asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
660	__u32 thread_info_flags)
661{
662	/* deal with pending signal delivery */
663	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
664		do_signal(regs);
665
666	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
667		clear_thread_flag(TIF_NOTIFY_RESUME);
668		tracehook_notify_resume(regs);
669		if (current->replacement_session_keyring)
670			key_replace_session_keyring();
671	}
672}
673
674#ifdef CONFIG_SMP
675static int smp_save_fp_context(struct sigcontext __user *sc)
676{
677	return raw_cpu_has_fpu
678	       ? _save_fp_context(sc)
679	       : fpu_emulator_save_context(sc);
680}
681
682static int smp_restore_fp_context(struct sigcontext __user *sc)
683{
684	return raw_cpu_has_fpu
685	       ? _restore_fp_context(sc)
686	       : fpu_emulator_restore_context(sc);
687}
688#endif
689
690static int signal_setup(void)
691{
692#ifdef CONFIG_SMP
693	/* For now just do the cpu_has_fpu check when the functions are invoked */
694	save_fp_context = smp_save_fp_context;
695	restore_fp_context = smp_restore_fp_context;
696#else
697	if (cpu_has_fpu) {
698		save_fp_context = _save_fp_context;
699		restore_fp_context = _restore_fp_context;
700	} else {
701		save_fp_context = fpu_emulator_save_context;
702		restore_fp_context = fpu_emulator_restore_context;
703	}
704#endif
705
706	return 0;
707}
708
709arch_initcall(signal_setup);
710