vm86_32.c revision bae9c19bf12bb2a914a8e530270f41d36cc87c63
1/*
2 *  Copyright (C) 1994  Linus Torvalds
3 *
4 *  29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5 *                stack - Manfred Spraul <manfred@colorfullife.com>
6 *
7 *  22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 *                them correctly. Now the emulation will be in a
9 *                consistent state after stackfaults - Kasper Dupont
10 *                <kasperd@daimi.au.dk>
11 *
12 *  22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13 *                <kasperd@daimi.au.dk>
14 *
15 *  ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16 *                caused by Kasper Dupont's changes - Stas Sergeev
17 *
18 *   4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19 *                Kasper Dupont <kasperd@daimi.au.dk>
20 *
21 *   9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22 *                Kasper Dupont <kasperd@daimi.au.dk>
23 *
24 *   9 apr 2002 - Changed stack access macros to jump to a label
25 *                instead of returning to userspace. This simplifies
26 *                do_int, and is needed by handle_vm6_fault. Kasper
27 *                Dupont <kasperd@daimi.au.dk>
28 *
29 */
30
31#include <linux/capability.h>
32#include <linux/errno.h>
33#include <linux/interrupt.h>
34#include <linux/sched.h>
35#include <linux/kernel.h>
36#include <linux/signal.h>
37#include <linux/string.h>
38#include <linux/mm.h>
39#include <linux/smp.h>
40#include <linux/highmem.h>
41#include <linux/ptrace.h>
42#include <linux/audit.h>
43#include <linux/stddef.h>
44
45#include <asm/uaccess.h>
46#include <asm/io.h>
47#include <asm/tlbflush.h>
48#include <asm/irq.h>
49#include <asm/syscalls.h>
50
51/*
52 * Known problems:
53 *
54 * Interrupt handling is not guaranteed:
55 * - a real x86 will disable all interrupts for one instruction
56 *   after a "mov ss,xx" to make stack handling atomic even without
57 *   the 'lss' instruction. We can't guarantee this in v86 mode,
58 *   as the next instruction might result in a page fault or similar.
59 * - a real x86 will have interrupts disabled for one instruction
60 *   past the 'sti' that enables them. We don't bother with all the
61 *   details yet.
62 *
63 * Let's hope these problems do not actually matter for anything.
64 */
65
66
67#define KVM86	((struct kernel_vm86_struct *)regs)
68#define VMPI	KVM86->vm86plus
69
70
71/*
72 * 8- and 16-bit register defines..
73 */
74#define AL(regs)	(((unsigned char *)&((regs)->pt.ax))[0])
75#define AH(regs)	(((unsigned char *)&((regs)->pt.ax))[1])
76#define IP(regs)	(*(unsigned short *)&((regs)->pt.ip))
77#define SP(regs)	(*(unsigned short *)&((regs)->pt.sp))
78
79/*
80 * virtual flags (16 and 32-bit versions)
81 */
82#define VFLAGS	(*(unsigned short *)&(current->thread.v86flags))
83#define VEFLAGS	(current->thread.v86flags)
84
85#define set_flags(X, new, mask) \
86((X) = ((X) & ~(mask)) | ((new) & (mask)))
87
88#define SAFE_MASK	(0xDD5)
89#define RETURN_MASK	(0xDFF)
90
91/* convert kernel_vm86_regs to vm86_regs */
92static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
93				  const struct kernel_vm86_regs *regs)
94{
95	int ret = 0;
96
97	/*
98	 * kernel_vm86_regs is missing gs, so copy everything up to
99	 * (but not including) orig_eax, and then rest including orig_eax.
100	 */
101	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
102	ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
103			    sizeof(struct kernel_vm86_regs) -
104			    offsetof(struct kernel_vm86_regs, pt.orig_ax));
105
106	return ret;
107}
108
109/* convert vm86_regs to kernel_vm86_regs */
110static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
111				    const struct vm86_regs __user *user,
112				    unsigned extra)
113{
114	int ret = 0;
115
116	/* copy ax-fs inclusive */
117	ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
118	/* copy orig_ax-__gsh+extra */
119	ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
120			      sizeof(struct kernel_vm86_regs) -
121			      offsetof(struct kernel_vm86_regs, pt.orig_ax) +
122			      extra);
123	return ret;
124}
125
126struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
127{
128	struct tss_struct *tss;
129	struct pt_regs *ret;
130	unsigned long tmp;
131
132	/*
133	 * This gets called from entry.S with interrupts disabled, but
134	 * from process context. Enable interrupts here, before trying
135	 * to access user space.
136	 */
137	local_irq_enable();
138
139	if (!current->thread.vm86_info) {
140		printk("no vm86_info: BAD\n");
141		do_exit(SIGSEGV);
142	}
143	set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
144	tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs);
145	tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap);
146	if (tmp) {
147		printk("vm86: could not access userspace vm86_info\n");
148		do_exit(SIGSEGV);
149	}
150
151	tss = &per_cpu(init_tss, get_cpu());
152	current->thread.sp0 = current->thread.saved_sp0;
153	current->thread.sysenter_cs = __KERNEL_CS;
154	load_sp0(tss, &current->thread);
155	current->thread.saved_sp0 = 0;
156	put_cpu();
157
158	ret = KVM86->regs32;
159
160	ret->fs = current->thread.saved_fs;
161	set_user_gs(ret, current->thread.saved_gs);
162
163	return ret;
164}
165
166static void mark_screen_rdonly(struct mm_struct *mm)
167{
168	pgd_t *pgd;
169	pud_t *pud;
170	pmd_t *pmd;
171	pte_t *pte;
172	spinlock_t *ptl;
173	int i;
174
175	pgd = pgd_offset(mm, 0xA0000);
176	if (pgd_none_or_clear_bad(pgd))
177		goto out;
178	pud = pud_offset(pgd, 0xA0000);
179	if (pud_none_or_clear_bad(pud))
180		goto out;
181	pmd = pmd_offset(pud, 0xA0000);
182	split_huge_page_pmd(mm, pmd);
183	if (pmd_none_or_clear_bad(pmd))
184		goto out;
185	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
186	for (i = 0; i < 32; i++) {
187		if (pte_present(*pte))
188			set_pte(pte, pte_wrprotect(*pte));
189		pte++;
190	}
191	pte_unmap_unlock(pte, ptl);
192out:
193	flush_tlb();
194}
195
196
197
198static int do_vm86_irq_handling(int subfunction, int irqnumber);
199static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
200
201int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
202{
203	struct kernel_vm86_struct info; /* declare this _on top_,
204					 * this avoids wasting of stack space.
205					 * This remains on the stack until we
206					 * return to 32 bit user space.
207					 */
208	struct task_struct *tsk;
209	int tmp, ret = -EPERM;
210
211	tsk = current;
212	if (tsk->thread.saved_sp0)
213		goto out;
214	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
215				       offsetof(struct kernel_vm86_struct, vm86plus) -
216				       sizeof(info.regs));
217	ret = -EFAULT;
218	if (tmp)
219		goto out;
220	memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
221	info.regs32 = regs;
222	tsk->thread.vm86_info = v86;
223	do_sys_vm86(&info, tsk);
224	ret = 0;	/* we never return here */
225out:
226	return ret;
227}
228
229
230int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
231{
232	struct kernel_vm86_struct info; /* declare this _on top_,
233					 * this avoids wasting of stack space.
234					 * This remains on the stack until we
235					 * return to 32 bit user space.
236					 */
237	struct task_struct *tsk;
238	int tmp, ret;
239	struct vm86plus_struct __user *v86;
240
241	tsk = current;
242	switch (cmd) {
243	case VM86_REQUEST_IRQ:
244	case VM86_FREE_IRQ:
245	case VM86_GET_IRQ_BITS:
246	case VM86_GET_AND_RESET_IRQ:
247		ret = do_vm86_irq_handling(cmd, (int)arg);
248		goto out;
249	case VM86_PLUS_INSTALL_CHECK:
250		/*
251		 * NOTE: on old vm86 stuff this will return the error
252		 *  from access_ok(), because the subfunction is
253		 *  interpreted as (invalid) address to vm86_struct.
254		 *  So the installation check works.
255		 */
256		ret = 0;
257		goto out;
258	}
259
260	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
261	ret = -EPERM;
262	if (tsk->thread.saved_sp0)
263		goto out;
264	v86 = (struct vm86plus_struct __user *)arg;
265	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
266				       offsetof(struct kernel_vm86_struct, regs32) -
267				       sizeof(info.regs));
268	ret = -EFAULT;
269	if (tmp)
270		goto out;
271	info.regs32 = regs;
272	info.vm86plus.is_vm86pus = 1;
273	tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
274	do_sys_vm86(&info, tsk);
275	ret = 0;	/* we never return here */
276out:
277	return ret;
278}
279
280
281static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
282{
283	struct tss_struct *tss;
284/*
285 * make sure the vm86() system call doesn't try to do anything silly
286 */
287	info->regs.pt.ds = 0;
288	info->regs.pt.es = 0;
289	info->regs.pt.fs = 0;
290#ifndef CONFIG_X86_32_LAZY_GS
291	info->regs.pt.gs = 0;
292#endif
293
294/*
295 * The flags register is also special: we cannot trust that the user
296 * has set it up safely, so this makes sure interrupt etc flags are
297 * inherited from protected mode.
298 */
299	VEFLAGS = info->regs.pt.flags;
300	info->regs.pt.flags &= SAFE_MASK;
301	info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
302	info->regs.pt.flags |= X86_VM_MASK;
303
304	switch (info->cpu_type) {
305	case CPU_286:
306		tsk->thread.v86mask = 0;
307		break;
308	case CPU_386:
309		tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
310		break;
311	case CPU_486:
312		tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
313		break;
314	default:
315		tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
316		break;
317	}
318
319/*
320 * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
321 */
322	info->regs32->ax = VM86_SIGNAL;
323	tsk->thread.saved_sp0 = tsk->thread.sp0;
324	tsk->thread.saved_fs = info->regs32->fs;
325	tsk->thread.saved_gs = get_user_gs(info->regs32);
326
327	tss = &per_cpu(init_tss, get_cpu());
328	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
329	if (cpu_has_sep)
330		tsk->thread.sysenter_cs = 0;
331	load_sp0(tss, &tsk->thread);
332	put_cpu();
333
334	tsk->thread.screen_bitmap = info->screen_bitmap;
335	if (info->flags & VM86_SCREEN_BITMAP)
336		mark_screen_rdonly(tsk->mm);
337
338	/*call audit_syscall_exit since we do not exit via the normal paths */
339	if (unlikely(current->audit_context))
340		audit_syscall_exit(AUDITSC_RESULT(0), 0);
341
342	__asm__ __volatile__(
343		"movl %0,%%esp\n\t"
344		"movl %1,%%ebp\n\t"
345#ifdef CONFIG_X86_32_LAZY_GS
346		"mov  %2, %%gs\n\t"
347#endif
348		"jmp resume_userspace"
349		: /* no outputs */
350		:"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
351	/* we never return here */
352}
353
354static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
355{
356	struct pt_regs *regs32;
357
358	regs32 = save_v86_state(regs16);
359	regs32->ax = retval;
360	__asm__ __volatile__("movl %0,%%esp\n\t"
361		"movl %1,%%ebp\n\t"
362		"jmp resume_userspace"
363		: : "r" (regs32), "r" (current_thread_info()));
364}
365
366static inline void set_IF(struct kernel_vm86_regs *regs)
367{
368	VEFLAGS |= X86_EFLAGS_VIF;
369	if (VEFLAGS & X86_EFLAGS_VIP)
370		return_to_32bit(regs, VM86_STI);
371}
372
373static inline void clear_IF(struct kernel_vm86_regs *regs)
374{
375	VEFLAGS &= ~X86_EFLAGS_VIF;
376}
377
378static inline void clear_TF(struct kernel_vm86_regs *regs)
379{
380	regs->pt.flags &= ~X86_EFLAGS_TF;
381}
382
383static inline void clear_AC(struct kernel_vm86_regs *regs)
384{
385	regs->pt.flags &= ~X86_EFLAGS_AC;
386}
387
388/*
389 * It is correct to call set_IF(regs) from the set_vflags_*
390 * functions. However someone forgot to call clear_IF(regs)
391 * in the opposite case.
392 * After the command sequence CLI PUSHF STI POPF you should
393 * end up with interrupts disabled, but you ended up with
394 * interrupts enabled.
395 *  ( I was testing my own changes, but the only bug I
396 *    could find was in a function I had not changed. )
397 * [KD]
398 */
399
400static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
401{
402	set_flags(VEFLAGS, flags, current->thread.v86mask);
403	set_flags(regs->pt.flags, flags, SAFE_MASK);
404	if (flags & X86_EFLAGS_IF)
405		set_IF(regs);
406	else
407		clear_IF(regs);
408}
409
410static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
411{
412	set_flags(VFLAGS, flags, current->thread.v86mask);
413	set_flags(regs->pt.flags, flags, SAFE_MASK);
414	if (flags & X86_EFLAGS_IF)
415		set_IF(regs);
416	else
417		clear_IF(regs);
418}
419
420static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
421{
422	unsigned long flags = regs->pt.flags & RETURN_MASK;
423
424	if (VEFLAGS & X86_EFLAGS_VIF)
425		flags |= X86_EFLAGS_IF;
426	flags |= X86_EFLAGS_IOPL;
427	return flags | (VEFLAGS & current->thread.v86mask);
428}
429
430static inline int is_revectored(int nr, struct revectored_struct *bitmap)
431{
432	__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
433		:"=r" (nr)
434		:"m" (*bitmap), "r" (nr));
435	return nr;
436}
437
438#define val_byte(val, n) (((__u8 *)&val)[n])
439
440#define pushb(base, ptr, val, err_label) \
441	do { \
442		__u8 __val = val; \
443		ptr--; \
444		if (put_user(__val, base + ptr) < 0) \
445			goto err_label; \
446	} while (0)
447
448#define pushw(base, ptr, val, err_label) \
449	do { \
450		__u16 __val = val; \
451		ptr--; \
452		if (put_user(val_byte(__val, 1), base + ptr) < 0) \
453			goto err_label; \
454		ptr--; \
455		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
456			goto err_label; \
457	} while (0)
458
459#define pushl(base, ptr, val, err_label) \
460	do { \
461		__u32 __val = val; \
462		ptr--; \
463		if (put_user(val_byte(__val, 3), base + ptr) < 0) \
464			goto err_label; \
465		ptr--; \
466		if (put_user(val_byte(__val, 2), base + ptr) < 0) \
467			goto err_label; \
468		ptr--; \
469		if (put_user(val_byte(__val, 1), base + ptr) < 0) \
470			goto err_label; \
471		ptr--; \
472		if (put_user(val_byte(__val, 0), base + ptr) < 0) \
473			goto err_label; \
474	} while (0)
475
476#define popb(base, ptr, err_label) \
477	({ \
478		__u8 __res; \
479		if (get_user(__res, base + ptr) < 0) \
480			goto err_label; \
481		ptr++; \
482		__res; \
483	})
484
485#define popw(base, ptr, err_label) \
486	({ \
487		__u16 __res; \
488		if (get_user(val_byte(__res, 0), base + ptr) < 0) \
489			goto err_label; \
490		ptr++; \
491		if (get_user(val_byte(__res, 1), base + ptr) < 0) \
492			goto err_label; \
493		ptr++; \
494		__res; \
495	})
496
497#define popl(base, ptr, err_label) \
498	({ \
499		__u32 __res; \
500		if (get_user(val_byte(__res, 0), base + ptr) < 0) \
501			goto err_label; \
502		ptr++; \
503		if (get_user(val_byte(__res, 1), base + ptr) < 0) \
504			goto err_label; \
505		ptr++; \
506		if (get_user(val_byte(__res, 2), base + ptr) < 0) \
507			goto err_label; \
508		ptr++; \
509		if (get_user(val_byte(__res, 3), base + ptr) < 0) \
510			goto err_label; \
511		ptr++; \
512		__res; \
513	})
514
515/* There are so many possible reasons for this function to return
516 * VM86_INTx, so adding another doesn't bother me. We can expect
517 * userspace programs to be able to handle it. (Getting a problem
518 * in userspace is always better than an Oops anyway.) [KD]
519 */
520static void do_int(struct kernel_vm86_regs *regs, int i,
521    unsigned char __user *ssp, unsigned short sp)
522{
523	unsigned long __user *intr_ptr;
524	unsigned long segoffs;
525
526	if (regs->pt.cs == BIOSSEG)
527		goto cannot_handle;
528	if (is_revectored(i, &KVM86->int_revectored))
529		goto cannot_handle;
530	if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
531		goto cannot_handle;
532	intr_ptr = (unsigned long __user *) (i << 2);
533	if (get_user(segoffs, intr_ptr))
534		goto cannot_handle;
535	if ((segoffs >> 16) == BIOSSEG)
536		goto cannot_handle;
537	pushw(ssp, sp, get_vflags(regs), cannot_handle);
538	pushw(ssp, sp, regs->pt.cs, cannot_handle);
539	pushw(ssp, sp, IP(regs), cannot_handle);
540	regs->pt.cs = segoffs >> 16;
541	SP(regs) -= 6;
542	IP(regs) = segoffs & 0xffff;
543	clear_TF(regs);
544	clear_IF(regs);
545	clear_AC(regs);
546	return;
547
548cannot_handle:
549	return_to_32bit(regs, VM86_INTx + (i << 8));
550}
551
552int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
553{
554	if (VMPI.is_vm86pus) {
555		if ((trapno == 3) || (trapno == 1)) {
556			KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
557			/* setting this flag forces the code in entry_32.S to
558			   call save_v86_state() and change the stack pointer
559			   to KVM86->regs32 */
560			set_thread_flag(TIF_IRET);
561			return 0;
562		}
563		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
564		return 0;
565	}
566	if (trapno != 1)
567		return 1; /* we let this handle by the calling routine */
568	current->thread.trap_no = trapno;
569	current->thread.error_code = error_code;
570	force_sig(SIGTRAP, current);
571	return 0;
572}
573
574void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
575{
576	unsigned char opcode;
577	unsigned char __user *csp;
578	unsigned char __user *ssp;
579	unsigned short ip, sp, orig_flags;
580	int data32, pref_done;
581
582#define CHECK_IF_IN_TRAP \
583	if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
584		newflags |= X86_EFLAGS_TF
585#define VM86_FAULT_RETURN do { \
586	if (VMPI.force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
587		return_to_32bit(regs, VM86_PICRETURN); \
588	if (orig_flags & X86_EFLAGS_TF) \
589		handle_vm86_trap(regs, 0, 1); \
590	return; } while (0)
591
592	orig_flags = *(unsigned short *)&regs->pt.flags;
593
594	csp = (unsigned char __user *) (regs->pt.cs << 4);
595	ssp = (unsigned char __user *) (regs->pt.ss << 4);
596	sp = SP(regs);
597	ip = IP(regs);
598
599	data32 = 0;
600	pref_done = 0;
601	do {
602		switch (opcode = popb(csp, ip, simulate_sigsegv)) {
603		case 0x66:      /* 32-bit data */     data32 = 1; break;
604		case 0x67:      /* 32-bit address */  break;
605		case 0x2e:      /* CS */              break;
606		case 0x3e:      /* DS */              break;
607		case 0x26:      /* ES */              break;
608		case 0x36:      /* SS */              break;
609		case 0x65:      /* GS */              break;
610		case 0x64:      /* FS */              break;
611		case 0xf2:      /* repnz */       break;
612		case 0xf3:      /* rep */             break;
613		default: pref_done = 1;
614		}
615	} while (!pref_done);
616
617	switch (opcode) {
618
619	/* pushf */
620	case 0x9c:
621		if (data32) {
622			pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
623			SP(regs) -= 4;
624		} else {
625			pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
626			SP(regs) -= 2;
627		}
628		IP(regs) = ip;
629		VM86_FAULT_RETURN;
630
631	/* popf */
632	case 0x9d:
633		{
634		unsigned long newflags;
635		if (data32) {
636			newflags = popl(ssp, sp, simulate_sigsegv);
637			SP(regs) += 4;
638		} else {
639			newflags = popw(ssp, sp, simulate_sigsegv);
640			SP(regs) += 2;
641		}
642		IP(regs) = ip;
643		CHECK_IF_IN_TRAP;
644		if (data32)
645			set_vflags_long(newflags, regs);
646		else
647			set_vflags_short(newflags, regs);
648
649		VM86_FAULT_RETURN;
650		}
651
652	/* int xx */
653	case 0xcd: {
654		int intno = popb(csp, ip, simulate_sigsegv);
655		IP(regs) = ip;
656		if (VMPI.vm86dbg_active) {
657			if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
658				return_to_32bit(regs, VM86_INTx + (intno << 8));
659		}
660		do_int(regs, intno, ssp, sp);
661		return;
662	}
663
664	/* iret */
665	case 0xcf:
666		{
667		unsigned long newip;
668		unsigned long newcs;
669		unsigned long newflags;
670		if (data32) {
671			newip = popl(ssp, sp, simulate_sigsegv);
672			newcs = popl(ssp, sp, simulate_sigsegv);
673			newflags = popl(ssp, sp, simulate_sigsegv);
674			SP(regs) += 12;
675		} else {
676			newip = popw(ssp, sp, simulate_sigsegv);
677			newcs = popw(ssp, sp, simulate_sigsegv);
678			newflags = popw(ssp, sp, simulate_sigsegv);
679			SP(regs) += 6;
680		}
681		IP(regs) = newip;
682		regs->pt.cs = newcs;
683		CHECK_IF_IN_TRAP;
684		if (data32) {
685			set_vflags_long(newflags, regs);
686		} else {
687			set_vflags_short(newflags, regs);
688		}
689		VM86_FAULT_RETURN;
690		}
691
692	/* cli */
693	case 0xfa:
694		IP(regs) = ip;
695		clear_IF(regs);
696		VM86_FAULT_RETURN;
697
698	/* sti */
699	/*
700	 * Damn. This is incorrect: the 'sti' instruction should actually
701	 * enable interrupts after the /next/ instruction. Not good.
702	 *
703	 * Probably needs some horsing around with the TF flag. Aiee..
704	 */
705	case 0xfb:
706		IP(regs) = ip;
707		set_IF(regs);
708		VM86_FAULT_RETURN;
709
710	default:
711		return_to_32bit(regs, VM86_UNKNOWN);
712	}
713
714	return;
715
716simulate_sigsegv:
717	/* FIXME: After a long discussion with Stas we finally
718	 *        agreed, that this is wrong. Here we should
719	 *        really send a SIGSEGV to the user program.
720	 *        But how do we create the correct context? We
721	 *        are inside a general protection fault handler
722	 *        and has just returned from a page fault handler.
723	 *        The correct context for the signal handler
724	 *        should be a mixture of the two, but how do we
725	 *        get the information? [KD]
726	 */
727	return_to_32bit(regs, VM86_UNKNOWN);
728}
729
730/* ---------------- vm86 special IRQ passing stuff ----------------- */
731
732#define VM86_IRQNAME		"vm86irq"
733
734static struct vm86_irqs {
735	struct task_struct *tsk;
736	int sig;
737} vm86_irqs[16];
738
739static DEFINE_SPINLOCK(irqbits_lock);
740static int irqbits;
741
742#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
743	| (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO)  | (1 << SIGURG) \
744	| (1 << SIGUNUSED))
745
746static irqreturn_t irq_handler(int intno, void *dev_id)
747{
748	int irq_bit;
749	unsigned long flags;
750
751	spin_lock_irqsave(&irqbits_lock, flags);
752	irq_bit = 1 << intno;
753	if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
754		goto out;
755	irqbits |= irq_bit;
756	if (vm86_irqs[intno].sig)
757		send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
758	/*
759	 * IRQ will be re-enabled when user asks for the irq (whether
760	 * polling or as a result of the signal)
761	 */
762	disable_irq_nosync(intno);
763	spin_unlock_irqrestore(&irqbits_lock, flags);
764	return IRQ_HANDLED;
765
766out:
767	spin_unlock_irqrestore(&irqbits_lock, flags);
768	return IRQ_NONE;
769}
770
771static inline void free_vm86_irq(int irqnumber)
772{
773	unsigned long flags;
774
775	free_irq(irqnumber, NULL);
776	vm86_irqs[irqnumber].tsk = NULL;
777
778	spin_lock_irqsave(&irqbits_lock, flags);
779	irqbits &= ~(1 << irqnumber);
780	spin_unlock_irqrestore(&irqbits_lock, flags);
781}
782
783void release_vm86_irqs(struct task_struct *task)
784{
785	int i;
786	for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
787	    if (vm86_irqs[i].tsk == task)
788		free_vm86_irq(i);
789}
790
791static inline int get_and_reset_irq(int irqnumber)
792{
793	int bit;
794	unsigned long flags;
795	int ret = 0;
796
797	if (invalid_vm86_irq(irqnumber)) return 0;
798	if (vm86_irqs[irqnumber].tsk != current) return 0;
799	spin_lock_irqsave(&irqbits_lock, flags);
800	bit = irqbits & (1 << irqnumber);
801	irqbits &= ~bit;
802	if (bit) {
803		enable_irq(irqnumber);
804		ret = 1;
805	}
806
807	spin_unlock_irqrestore(&irqbits_lock, flags);
808	return ret;
809}
810
811
812static int do_vm86_irq_handling(int subfunction, int irqnumber)
813{
814	int ret;
815	switch (subfunction) {
816		case VM86_GET_AND_RESET_IRQ: {
817			return get_and_reset_irq(irqnumber);
818		}
819		case VM86_GET_IRQ_BITS: {
820			return irqbits;
821		}
822		case VM86_REQUEST_IRQ: {
823			int sig = irqnumber >> 8;
824			int irq = irqnumber & 255;
825			if (!capable(CAP_SYS_ADMIN)) return -EPERM;
826			if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
827			if (invalid_vm86_irq(irq)) return -EPERM;
828			if (vm86_irqs[irq].tsk) return -EPERM;
829			ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
830			if (ret) return ret;
831			vm86_irqs[irq].sig = sig;
832			vm86_irqs[irq].tsk = current;
833			return irq;
834		}
835		case  VM86_FREE_IRQ: {
836			if (invalid_vm86_irq(irqnumber)) return -EPERM;
837			if (!vm86_irqs[irqnumber].tsk) return 0;
838			if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
839			free_vm86_irq(irqnumber);
840			return 0;
841		}
842	}
843	return -EINVAL;
844}
845
846