vm86_32.c revision 65ea5b0349903585bfed9720fa06f5edb4f1cd25
1/* 2 * Copyright (C) 1994 Linus Torvalds 3 * 4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 5 * stack - Manfred Spraul <manfred@colorfullife.com> 6 * 7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle 8 * them correctly. Now the emulation will be in a 9 * consistent state after stackfaults - Kasper Dupont 10 * <kasperd@daimi.au.dk> 11 * 12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont 13 * <kasperd@daimi.au.dk> 14 * 15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault 16 * caused by Kasper Dupont's changes - Stas Sergeev 17 * 18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. 19 * Kasper Dupont <kasperd@daimi.au.dk> 20 * 21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. 22 * Kasper Dupont <kasperd@daimi.au.dk> 23 * 24 * 9 apr 2002 - Changed stack access macros to jump to a label 25 * instead of returning to userspace. This simplifies 26 * do_int, and is needed by handle_vm6_fault. Kasper 27 * Dupont <kasperd@daimi.au.dk> 28 * 29 */ 30 31#include <linux/capability.h> 32#include <linux/errno.h> 33#include <linux/interrupt.h> 34#include <linux/sched.h> 35#include <linux/kernel.h> 36#include <linux/signal.h> 37#include <linux/string.h> 38#include <linux/mm.h> 39#include <linux/smp.h> 40#include <linux/highmem.h> 41#include <linux/ptrace.h> 42#include <linux/audit.h> 43#include <linux/stddef.h> 44 45#include <asm/uaccess.h> 46#include <asm/io.h> 47#include <asm/tlbflush.h> 48#include <asm/irq.h> 49 50/* 51 * Known problems: 52 * 53 * Interrupt handling is not guaranteed: 54 * - a real x86 will disable all interrupts for one instruction 55 * after a "mov ss,xx" to make stack handling atomic even without 56 * the 'lss' instruction. We can't guarantee this in v86 mode, 57 * as the next instruction might result in a page fault or similar. 58 * - a real x86 will have interrupts disabled for one instruction 59 * past the 'sti' that enables them. We don't bother with all the 60 * details yet. 61 * 62 * Let's hope these problems do not actually matter for anything. 63 */ 64 65 66#define KVM86 ((struct kernel_vm86_struct *)regs) 67#define VMPI KVM86->vm86plus 68 69 70/* 71 * 8- and 16-bit register defines.. 72 */ 73#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) 74#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) 75#define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) 76#define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) 77 78/* 79 * virtual flags (16 and 32-bit versions) 80 */ 81#define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) 82#define VEFLAGS (current->thread.v86flags) 83 84#define set_flags(X,new,mask) \ 85((X) = ((X) & ~(mask)) | ((new) & (mask))) 86 87#define SAFE_MASK (0xDD5) 88#define RETURN_MASK (0xDFF) 89 90/* convert kernel_vm86_regs to vm86_regs */ 91static int copy_vm86_regs_to_user(struct vm86_regs __user *user, 92 const struct kernel_vm86_regs *regs) 93{ 94 int ret = 0; 95 96 /* kernel_vm86_regs is missing gs, so copy everything up to 97 (but not including) orig_eax, and then rest including orig_eax. */ 98 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); 99 ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, 100 sizeof(struct kernel_vm86_regs) - 101 offsetof(struct kernel_vm86_regs, pt.orig_ax)); 102 103 return ret; 104} 105 106/* convert vm86_regs to kernel_vm86_regs */ 107static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, 108 const struct vm86_regs __user *user, 109 unsigned extra) 110{ 111 int ret = 0; 112 113 /* copy ax-fs inclusive */ 114 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); 115 /* copy orig_ax-__gsh+extra */ 116 ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax, 117 sizeof(struct kernel_vm86_regs) - 118 offsetof(struct kernel_vm86_regs, pt.orig_ax) + 119 extra); 120 return ret; 121} 122 123struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); 124struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) 125{ 126 struct tss_struct *tss; 127 struct pt_regs *ret; 128 unsigned long tmp; 129 130 /* 131 * This gets called from entry.S with interrupts disabled, but 132 * from process context. Enable interrupts here, before trying 133 * to access user space. 134 */ 135 local_irq_enable(); 136 137 if (!current->thread.vm86_info) { 138 printk("no vm86_info: BAD\n"); 139 do_exit(SIGSEGV); 140 } 141 set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); 142 tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); 143 tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); 144 if (tmp) { 145 printk("vm86: could not access userspace vm86_info\n"); 146 do_exit(SIGSEGV); 147 } 148 149 tss = &per_cpu(init_tss, get_cpu()); 150 current->thread.esp0 = current->thread.saved_esp0; 151 current->thread.sysenter_cs = __KERNEL_CS; 152 load_esp0(tss, ¤t->thread); 153 current->thread.saved_esp0 = 0; 154 put_cpu(); 155 156 ret = KVM86->regs32; 157 158 ret->fs = current->thread.saved_fs; 159 loadsegment(gs, current->thread.saved_gs); 160 161 return ret; 162} 163 164static void mark_screen_rdonly(struct mm_struct *mm) 165{ 166 pgd_t *pgd; 167 pud_t *pud; 168 pmd_t *pmd; 169 pte_t *pte; 170 spinlock_t *ptl; 171 int i; 172 173 pgd = pgd_offset(mm, 0xA0000); 174 if (pgd_none_or_clear_bad(pgd)) 175 goto out; 176 pud = pud_offset(pgd, 0xA0000); 177 if (pud_none_or_clear_bad(pud)) 178 goto out; 179 pmd = pmd_offset(pud, 0xA0000); 180 if (pmd_none_or_clear_bad(pmd)) 181 goto out; 182 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); 183 for (i = 0; i < 32; i++) { 184 if (pte_present(*pte)) 185 set_pte(pte, pte_wrprotect(*pte)); 186 pte++; 187 } 188 pte_unmap_unlock(pte, ptl); 189out: 190 flush_tlb(); 191} 192 193 194 195static int do_vm86_irq_handling(int subfunction, int irqnumber); 196static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); 197 198asmlinkage int sys_vm86old(struct pt_regs regs) 199{ 200 struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx; 201 struct kernel_vm86_struct info; /* declare this _on top_, 202 * this avoids wasting of stack space. 203 * This remains on the stack until we 204 * return to 32 bit user space. 205 */ 206 struct task_struct *tsk; 207 int tmp, ret = -EPERM; 208 209 tsk = current; 210 if (tsk->thread.saved_esp0) 211 goto out; 212 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 213 offsetof(struct kernel_vm86_struct, vm86plus) - 214 sizeof(info.regs)); 215 ret = -EFAULT; 216 if (tmp) 217 goto out; 218 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); 219 info.regs32 = ®s; 220 tsk->thread.vm86_info = v86; 221 do_sys_vm86(&info, tsk); 222 ret = 0; /* we never return here */ 223out: 224 return ret; 225} 226 227 228asmlinkage int sys_vm86(struct pt_regs regs) 229{ 230 struct kernel_vm86_struct info; /* declare this _on top_, 231 * this avoids wasting of stack space. 232 * This remains on the stack until we 233 * return to 32 bit user space. 234 */ 235 struct task_struct *tsk; 236 int tmp, ret; 237 struct vm86plus_struct __user *v86; 238 239 tsk = current; 240 switch (regs.bx) { 241 case VM86_REQUEST_IRQ: 242 case VM86_FREE_IRQ: 243 case VM86_GET_IRQ_BITS: 244 case VM86_GET_AND_RESET_IRQ: 245 ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); 246 goto out; 247 case VM86_PLUS_INSTALL_CHECK: 248 /* NOTE: on old vm86 stuff this will return the error 249 from access_ok(), because the subfunction is 250 interpreted as (invalid) address to vm86_struct. 251 So the installation check works. 252 */ 253 ret = 0; 254 goto out; 255 } 256 257 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ 258 ret = -EPERM; 259 if (tsk->thread.saved_esp0) 260 goto out; 261 v86 = (struct vm86plus_struct __user *)regs.cx; 262 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, 263 offsetof(struct kernel_vm86_struct, regs32) - 264 sizeof(info.regs)); 265 ret = -EFAULT; 266 if (tmp) 267 goto out; 268 info.regs32 = ®s; 269 info.vm86plus.is_vm86pus = 1; 270 tsk->thread.vm86_info = (struct vm86_struct __user *)v86; 271 do_sys_vm86(&info, tsk); 272 ret = 0; /* we never return here */ 273out: 274 return ret; 275} 276 277 278static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) 279{ 280 struct tss_struct *tss; 281/* 282 * make sure the vm86() system call doesn't try to do anything silly 283 */ 284 info->regs.pt.ds = 0; 285 info->regs.pt.es = 0; 286 info->regs.pt.fs = 0; 287 288/* we are clearing gs later just before "jmp resume_userspace", 289 * because it is not saved/restored. 290 */ 291 292/* 293 * The flags register is also special: we cannot trust that the user 294 * has set it up safely, so this makes sure interrupt etc flags are 295 * inherited from protected mode. 296 */ 297 VEFLAGS = info->regs.pt.flags; 298 info->regs.pt.flags &= SAFE_MASK; 299 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; 300 info->regs.pt.flags |= VM_MASK; 301 302 switch (info->cpu_type) { 303 case CPU_286: 304 tsk->thread.v86mask = 0; 305 break; 306 case CPU_386: 307 tsk->thread.v86mask = NT_MASK | IOPL_MASK; 308 break; 309 case CPU_486: 310 tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; 311 break; 312 default: 313 tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; 314 break; 315 } 316 317/* 318 * Save old state, set default return value (%ax) to 0 319 */ 320 info->regs32->ax = 0; 321 tsk->thread.saved_esp0 = tsk->thread.esp0; 322 tsk->thread.saved_fs = info->regs32->fs; 323 savesegment(gs, tsk->thread.saved_gs); 324 325 tss = &per_cpu(init_tss, get_cpu()); 326 tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; 327 if (cpu_has_sep) 328 tsk->thread.sysenter_cs = 0; 329 load_esp0(tss, &tsk->thread); 330 put_cpu(); 331 332 tsk->thread.screen_bitmap = info->screen_bitmap; 333 if (info->flags & VM86_SCREEN_BITMAP) 334 mark_screen_rdonly(tsk->mm); 335 336 /*call audit_syscall_exit since we do not exit via the normal paths */ 337 if (unlikely(current->audit_context)) 338 audit_syscall_exit(AUDITSC_RESULT(0), 0); 339 340 __asm__ __volatile__( 341 "movl %0,%%esp\n\t" 342 "movl %1,%%ebp\n\t" 343 "mov %2, %%gs\n\t" 344 "jmp resume_userspace" 345 : /* no outputs */ 346 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); 347 /* we never return here */ 348} 349 350static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) 351{ 352 struct pt_regs * regs32; 353 354 regs32 = save_v86_state(regs16); 355 regs32->ax = retval; 356 __asm__ __volatile__("movl %0,%%esp\n\t" 357 "movl %1,%%ebp\n\t" 358 "jmp resume_userspace" 359 : : "r" (regs32), "r" (current_thread_info())); 360} 361 362static inline void set_IF(struct kernel_vm86_regs * regs) 363{ 364 VEFLAGS |= VIF_MASK; 365 if (VEFLAGS & VIP_MASK) 366 return_to_32bit(regs, VM86_STI); 367} 368 369static inline void clear_IF(struct kernel_vm86_regs * regs) 370{ 371 VEFLAGS &= ~VIF_MASK; 372} 373 374static inline void clear_TF(struct kernel_vm86_regs * regs) 375{ 376 regs->pt.flags &= ~TF_MASK; 377} 378 379static inline void clear_AC(struct kernel_vm86_regs * regs) 380{ 381 regs->pt.flags &= ~AC_MASK; 382} 383 384/* It is correct to call set_IF(regs) from the set_vflags_* 385 * functions. However someone forgot to call clear_IF(regs) 386 * in the opposite case. 387 * After the command sequence CLI PUSHF STI POPF you should 388 * end up with interrups disabled, but you ended up with 389 * interrupts enabled. 390 * ( I was testing my own changes, but the only bug I 391 * could find was in a function I had not changed. ) 392 * [KD] 393 */ 394 395static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs) 396{ 397 set_flags(VEFLAGS, flags, current->thread.v86mask); 398 set_flags(regs->pt.flags, flags, SAFE_MASK); 399 if (flags & IF_MASK) 400 set_IF(regs); 401 else 402 clear_IF(regs); 403} 404 405static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) 406{ 407 set_flags(VFLAGS, flags, current->thread.v86mask); 408 set_flags(regs->pt.flags, flags, SAFE_MASK); 409 if (flags & IF_MASK) 410 set_IF(regs); 411 else 412 clear_IF(regs); 413} 414 415static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) 416{ 417 unsigned long flags = regs->pt.flags & RETURN_MASK; 418 419 if (VEFLAGS & VIF_MASK) 420 flags |= IF_MASK; 421 flags |= IOPL_MASK; 422 return flags | (VEFLAGS & current->thread.v86mask); 423} 424 425static inline int is_revectored(int nr, struct revectored_struct * bitmap) 426{ 427 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" 428 :"=r" (nr) 429 :"m" (*bitmap),"r" (nr)); 430 return nr; 431} 432 433#define val_byte(val, n) (((__u8 *)&val)[n]) 434 435#define pushb(base, ptr, val, err_label) \ 436 do { \ 437 __u8 __val = val; \ 438 ptr--; \ 439 if (put_user(__val, base + ptr) < 0) \ 440 goto err_label; \ 441 } while(0) 442 443#define pushw(base, ptr, val, err_label) \ 444 do { \ 445 __u16 __val = val; \ 446 ptr--; \ 447 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 448 goto err_label; \ 449 ptr--; \ 450 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 451 goto err_label; \ 452 } while(0) 453 454#define pushl(base, ptr, val, err_label) \ 455 do { \ 456 __u32 __val = val; \ 457 ptr--; \ 458 if (put_user(val_byte(__val, 3), base + ptr) < 0) \ 459 goto err_label; \ 460 ptr--; \ 461 if (put_user(val_byte(__val, 2), base + ptr) < 0) \ 462 goto err_label; \ 463 ptr--; \ 464 if (put_user(val_byte(__val, 1), base + ptr) < 0) \ 465 goto err_label; \ 466 ptr--; \ 467 if (put_user(val_byte(__val, 0), base + ptr) < 0) \ 468 goto err_label; \ 469 } while(0) 470 471#define popb(base, ptr, err_label) \ 472 ({ \ 473 __u8 __res; \ 474 if (get_user(__res, base + ptr) < 0) \ 475 goto err_label; \ 476 ptr++; \ 477 __res; \ 478 }) 479 480#define popw(base, ptr, err_label) \ 481 ({ \ 482 __u16 __res; \ 483 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 484 goto err_label; \ 485 ptr++; \ 486 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 487 goto err_label; \ 488 ptr++; \ 489 __res; \ 490 }) 491 492#define popl(base, ptr, err_label) \ 493 ({ \ 494 __u32 __res; \ 495 if (get_user(val_byte(__res, 0), base + ptr) < 0) \ 496 goto err_label; \ 497 ptr++; \ 498 if (get_user(val_byte(__res, 1), base + ptr) < 0) \ 499 goto err_label; \ 500 ptr++; \ 501 if (get_user(val_byte(__res, 2), base + ptr) < 0) \ 502 goto err_label; \ 503 ptr++; \ 504 if (get_user(val_byte(__res, 3), base + ptr) < 0) \ 505 goto err_label; \ 506 ptr++; \ 507 __res; \ 508 }) 509 510/* There are so many possible reasons for this function to return 511 * VM86_INTx, so adding another doesn't bother me. We can expect 512 * userspace programs to be able to handle it. (Getting a problem 513 * in userspace is always better than an Oops anyway.) [KD] 514 */ 515static void do_int(struct kernel_vm86_regs *regs, int i, 516 unsigned char __user * ssp, unsigned short sp) 517{ 518 unsigned long __user *intr_ptr; 519 unsigned long segoffs; 520 521 if (regs->pt.cs == BIOSSEG) 522 goto cannot_handle; 523 if (is_revectored(i, &KVM86->int_revectored)) 524 goto cannot_handle; 525 if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) 526 goto cannot_handle; 527 intr_ptr = (unsigned long __user *) (i << 2); 528 if (get_user(segoffs, intr_ptr)) 529 goto cannot_handle; 530 if ((segoffs >> 16) == BIOSSEG) 531 goto cannot_handle; 532 pushw(ssp, sp, get_vflags(regs), cannot_handle); 533 pushw(ssp, sp, regs->pt.cs, cannot_handle); 534 pushw(ssp, sp, IP(regs), cannot_handle); 535 regs->pt.cs = segoffs >> 16; 536 SP(regs) -= 6; 537 IP(regs) = segoffs & 0xffff; 538 clear_TF(regs); 539 clear_IF(regs); 540 clear_AC(regs); 541 return; 542 543cannot_handle: 544 return_to_32bit(regs, VM86_INTx + (i << 8)); 545} 546 547int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) 548{ 549 if (VMPI.is_vm86pus) { 550 if ( (trapno==3) || (trapno==1) ) 551 return_to_32bit(regs, VM86_TRAP + (trapno << 8)); 552 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); 553 return 0; 554 } 555 if (trapno !=1) 556 return 1; /* we let this handle by the calling routine */ 557 if (current->ptrace & PT_PTRACED) { 558 unsigned long flags; 559 spin_lock_irqsave(¤t->sighand->siglock, flags); 560 sigdelset(¤t->blocked, SIGTRAP); 561 recalc_sigpending(); 562 spin_unlock_irqrestore(¤t->sighand->siglock, flags); 563 } 564 send_sig(SIGTRAP, current, 1); 565 current->thread.trap_no = trapno; 566 current->thread.error_code = error_code; 567 return 0; 568} 569 570void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) 571{ 572 unsigned char opcode; 573 unsigned char __user *csp; 574 unsigned char __user *ssp; 575 unsigned short ip, sp, orig_flags; 576 int data32, pref_done; 577 578#define CHECK_IF_IN_TRAP \ 579 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ 580 newflags |= TF_MASK 581#define VM86_FAULT_RETURN do { \ 582 if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ 583 return_to_32bit(regs, VM86_PICRETURN); \ 584 if (orig_flags & TF_MASK) \ 585 handle_vm86_trap(regs, 0, 1); \ 586 return; } while (0) 587 588 orig_flags = *(unsigned short *)®s->pt.flags; 589 590 csp = (unsigned char __user *) (regs->pt.cs << 4); 591 ssp = (unsigned char __user *) (regs->pt.ss << 4); 592 sp = SP(regs); 593 ip = IP(regs); 594 595 data32 = 0; 596 pref_done = 0; 597 do { 598 switch (opcode = popb(csp, ip, simulate_sigsegv)) { 599 case 0x66: /* 32-bit data */ data32=1; break; 600 case 0x67: /* 32-bit address */ break; 601 case 0x2e: /* CS */ break; 602 case 0x3e: /* DS */ break; 603 case 0x26: /* ES */ break; 604 case 0x36: /* SS */ break; 605 case 0x65: /* GS */ break; 606 case 0x64: /* FS */ break; 607 case 0xf2: /* repnz */ break; 608 case 0xf3: /* rep */ break; 609 default: pref_done = 1; 610 } 611 } while (!pref_done); 612 613 switch (opcode) { 614 615 /* pushf */ 616 case 0x9c: 617 if (data32) { 618 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); 619 SP(regs) -= 4; 620 } else { 621 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); 622 SP(regs) -= 2; 623 } 624 IP(regs) = ip; 625 VM86_FAULT_RETURN; 626 627 /* popf */ 628 case 0x9d: 629 { 630 unsigned long newflags; 631 if (data32) { 632 newflags=popl(ssp, sp, simulate_sigsegv); 633 SP(regs) += 4; 634 } else { 635 newflags = popw(ssp, sp, simulate_sigsegv); 636 SP(regs) += 2; 637 } 638 IP(regs) = ip; 639 CHECK_IF_IN_TRAP; 640 if (data32) { 641 set_vflags_long(newflags, regs); 642 } else { 643 set_vflags_short(newflags, regs); 644 } 645 VM86_FAULT_RETURN; 646 } 647 648 /* int xx */ 649 case 0xcd: { 650 int intno=popb(csp, ip, simulate_sigsegv); 651 IP(regs) = ip; 652 if (VMPI.vm86dbg_active) { 653 if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) 654 return_to_32bit(regs, VM86_INTx + (intno << 8)); 655 } 656 do_int(regs, intno, ssp, sp); 657 return; 658 } 659 660 /* iret */ 661 case 0xcf: 662 { 663 unsigned long newip; 664 unsigned long newcs; 665 unsigned long newflags; 666 if (data32) { 667 newip=popl(ssp, sp, simulate_sigsegv); 668 newcs=popl(ssp, sp, simulate_sigsegv); 669 newflags=popl(ssp, sp, simulate_sigsegv); 670 SP(regs) += 12; 671 } else { 672 newip = popw(ssp, sp, simulate_sigsegv); 673 newcs = popw(ssp, sp, simulate_sigsegv); 674 newflags = popw(ssp, sp, simulate_sigsegv); 675 SP(regs) += 6; 676 } 677 IP(regs) = newip; 678 regs->pt.cs = newcs; 679 CHECK_IF_IN_TRAP; 680 if (data32) { 681 set_vflags_long(newflags, regs); 682 } else { 683 set_vflags_short(newflags, regs); 684 } 685 VM86_FAULT_RETURN; 686 } 687 688 /* cli */ 689 case 0xfa: 690 IP(regs) = ip; 691 clear_IF(regs); 692 VM86_FAULT_RETURN; 693 694 /* sti */ 695 /* 696 * Damn. This is incorrect: the 'sti' instruction should actually 697 * enable interrupts after the /next/ instruction. Not good. 698 * 699 * Probably needs some horsing around with the TF flag. Aiee.. 700 */ 701 case 0xfb: 702 IP(regs) = ip; 703 set_IF(regs); 704 VM86_FAULT_RETURN; 705 706 default: 707 return_to_32bit(regs, VM86_UNKNOWN); 708 } 709 710 return; 711 712simulate_sigsegv: 713 /* FIXME: After a long discussion with Stas we finally 714 * agreed, that this is wrong. Here we should 715 * really send a SIGSEGV to the user program. 716 * But how do we create the correct context? We 717 * are inside a general protection fault handler 718 * and has just returned from a page fault handler. 719 * The correct context for the signal handler 720 * should be a mixture of the two, but how do we 721 * get the information? [KD] 722 */ 723 return_to_32bit(regs, VM86_UNKNOWN); 724} 725 726/* ---------------- vm86 special IRQ passing stuff ----------------- */ 727 728#define VM86_IRQNAME "vm86irq" 729 730static struct vm86_irqs { 731 struct task_struct *tsk; 732 int sig; 733} vm86_irqs[16]; 734 735static DEFINE_SPINLOCK(irqbits_lock); 736static int irqbits; 737 738#define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ 739 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ 740 | (1 << SIGUNUSED) ) 741 742static irqreturn_t irq_handler(int intno, void *dev_id) 743{ 744 int irq_bit; 745 unsigned long flags; 746 747 spin_lock_irqsave(&irqbits_lock, flags); 748 irq_bit = 1 << intno; 749 if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) 750 goto out; 751 irqbits |= irq_bit; 752 if (vm86_irqs[intno].sig) 753 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); 754 /* 755 * IRQ will be re-enabled when user asks for the irq (whether 756 * polling or as a result of the signal) 757 */ 758 disable_irq_nosync(intno); 759 spin_unlock_irqrestore(&irqbits_lock, flags); 760 return IRQ_HANDLED; 761 762out: 763 spin_unlock_irqrestore(&irqbits_lock, flags); 764 return IRQ_NONE; 765} 766 767static inline void free_vm86_irq(int irqnumber) 768{ 769 unsigned long flags; 770 771 free_irq(irqnumber, NULL); 772 vm86_irqs[irqnumber].tsk = NULL; 773 774 spin_lock_irqsave(&irqbits_lock, flags); 775 irqbits &= ~(1 << irqnumber); 776 spin_unlock_irqrestore(&irqbits_lock, flags); 777} 778 779void release_vm86_irqs(struct task_struct *task) 780{ 781 int i; 782 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) 783 if (vm86_irqs[i].tsk == task) 784 free_vm86_irq(i); 785} 786 787static inline int get_and_reset_irq(int irqnumber) 788{ 789 int bit; 790 unsigned long flags; 791 int ret = 0; 792 793 if (invalid_vm86_irq(irqnumber)) return 0; 794 if (vm86_irqs[irqnumber].tsk != current) return 0; 795 spin_lock_irqsave(&irqbits_lock, flags); 796 bit = irqbits & (1 << irqnumber); 797 irqbits &= ~bit; 798 if (bit) { 799 enable_irq(irqnumber); 800 ret = 1; 801 } 802 803 spin_unlock_irqrestore(&irqbits_lock, flags); 804 return ret; 805} 806 807 808static int do_vm86_irq_handling(int subfunction, int irqnumber) 809{ 810 int ret; 811 switch (subfunction) { 812 case VM86_GET_AND_RESET_IRQ: { 813 return get_and_reset_irq(irqnumber); 814 } 815 case VM86_GET_IRQ_BITS: { 816 return irqbits; 817 } 818 case VM86_REQUEST_IRQ: { 819 int sig = irqnumber >> 8; 820 int irq = irqnumber & 255; 821 if (!capable(CAP_SYS_ADMIN)) return -EPERM; 822 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; 823 if (invalid_vm86_irq(irq)) return -EPERM; 824 if (vm86_irqs[irq].tsk) return -EPERM; 825 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); 826 if (ret) return ret; 827 vm86_irqs[irq].sig = sig; 828 vm86_irqs[irq].tsk = current; 829 return irq; 830 } 831 case VM86_FREE_IRQ: { 832 if (invalid_vm86_irq(irqnumber)) return -EPERM; 833 if (!vm86_irqs[irqnumber].tsk) return 0; 834 if (vm86_irqs[irqnumber].tsk != current) return -EPERM; 835 free_vm86_irq(irqnumber); 836 return 0; 837 } 838 } 839 return -EINVAL; 840} 841 842