1/*
2 *  i386 emulator main execution loop
3 *
4 *  Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include "config.h"
20#include "cpu.h"
21#include "disas/disas.h"
22#include "tcg.h"
23#include "sysemu/kvm.h"
24#include "exec/hax.h"
25#include "qemu/atomic.h"
26
27#if !defined(CONFIG_SOFTMMU)
28#undef EAX
29#undef ECX
30#undef EDX
31#undef EBX
32#undef ESP
33#undef EBP
34#undef ESI
35#undef EDI
36#undef EIP
37#include <signal.h>
38#ifdef __linux__
39#include <sys/ucontext.h>
40#endif
41#endif
42
43int tb_invalidated_flag;
44
45//#define CONFIG_DEBUG_EXEC
46//#define DEBUG_SIGNAL
47
48bool qemu_cpu_has_work(CPUState *cpu)
49{
50    return cpu_has_work(cpu);
51}
52
53void cpu_loop_exit(CPUArchState* env)
54{
55    env->current_tb = NULL;
56    longjmp(env->jmp_env, 1);
57}
58
59/* exit the current TB from a signal handler. The host registers are
60   restored in a state compatible with the CPU emulator
61 */
62void cpu_resume_from_signal(CPUArchState *env, void *puc)
63{
64#if !defined(CONFIG_SOFTMMU)
65#ifdef __linux__
66    struct ucontext *uc = puc;
67#elif defined(__OpenBSD__)
68    struct sigcontext *uc = puc;
69#endif
70#endif
71
72    /* XXX: restore cpu registers saved in host registers */
73
74#if !defined(CONFIG_SOFTMMU)
75    if (puc) {
76        /* XXX: use siglongjmp ? */
77#ifdef __linux__
78#ifdef __ia64
79        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
80#else
81        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
82#endif
83#elif defined(__OpenBSD__)
84        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
85#endif
86    }
87#endif
88    env->exception_index = -1;
89    longjmp(env->jmp_env, 1);
90}
91
92/* Execute the code without caching the generated code. An interpreter
93   could be used if available. */
94static void cpu_exec_nocache(CPUArchState *env, int max_cycles, TranslationBlock *orig_tb)
95{
96    tcg_target_ulong next_tb;
97    TranslationBlock *tb;
98
99    /* Should never happen.
100       We only end up here when an existing TB is too long.  */
101    if (max_cycles > CF_COUNT_MASK)
102        max_cycles = CF_COUNT_MASK;
103
104    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105                     max_cycles);
106    env->current_tb = tb;
107    /* execute the generated code */
108    next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
109    env->current_tb = NULL;
110
111    if ((next_tb & 3) == 2) {
112        /* Restore PC.  This may happen if async event occurs before
113           the TB starts executing.  */
114        cpu_pc_from_tb(env, tb);
115    }
116    tb_phys_invalidate(tb, -1);
117    tb_free(tb);
118}
119
120static TranslationBlock *tb_find_slow(CPUArchState *env,
121                                      target_ulong pc,
122                                      target_ulong cs_base,
123                                      uint64_t flags)
124{
125    TranslationBlock *tb, **ptb1;
126    unsigned int h;
127    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
128
129    tb_invalidated_flag = 0;
130
131    /* find translated block using physical mappings */
132    phys_pc = get_page_addr_code(env, pc);
133    phys_page1 = phys_pc & TARGET_PAGE_MASK;
134    phys_page2 = -1;
135    h = tb_phys_hash_func(phys_pc);
136    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
137    for(;;) {
138        tb = *ptb1;
139        if (!tb)
140            goto not_found;
141        if (tb->pc == pc &&
142            tb->page_addr[0] == phys_page1 &&
143            tb->cs_base == cs_base &&
144            tb->flags == flags) {
145            /* check next page if needed */
146            if (tb->page_addr[1] != -1) {
147                virt_page2 = (pc & TARGET_PAGE_MASK) +
148                    TARGET_PAGE_SIZE;
149                phys_page2 = get_page_addr_code(env, virt_page2);
150                if (tb->page_addr[1] == phys_page2)
151                    goto found;
152            } else {
153                goto found;
154            }
155        }
156        ptb1 = &tb->phys_hash_next;
157    }
158 not_found:
159   /* if no translated code available, then translate it now */
160    tb = tb_gen_code(env, pc, cs_base, flags, 0);
161
162 found:
163    /* Move the last found TB to the head of the list */
164    if (likely(*ptb1)) {
165        *ptb1 = tb->phys_hash_next;
166        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
167        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
168    }
169    /* we add the TB in the virtual pc hash table */
170    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
171    return tb;
172}
173
174static inline TranslationBlock *tb_find_fast(CPUArchState *env)
175{
176    TranslationBlock *tb;
177    target_ulong cs_base, pc;
178    int flags;
179
180    /* we record a subset of the CPU state. It will
181       always be the same before a given translated block
182       is executed. */
183    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
184    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
185    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
186                 tb->flags != flags)) {
187        tb = tb_find_slow(env, pc, cs_base, flags);
188    }
189    return tb;
190}
191
192static CPUDebugExcpHandler *debug_excp_handler;
193
194void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195{
196    debug_excp_handler = handler;
197}
198
199static void cpu_handle_debug_exception(CPUOldState *env)
200{
201    CPUWatchpoint *wp;
202
203    if (!env->watchpoint_hit) {
204        QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
205            wp->flags &= ~BP_WATCHPOINT_HIT;
206        }
207    }
208    if (debug_excp_handler) {
209        debug_excp_handler(env);
210    }
211}
212
213/* main execution loop */
214
215volatile sig_atomic_t exit_request;
216
217/*
218 * Qemu emulation can happen because of MMIO or emulation mode,
219 * i.e. non-PG mode.  For MMIO cases, the pending interrupt should not
220 * be emulated in qemu because MMIO is emulated for only one
221 * instruction now and then back to the HAX kernel module.
222 */
223int need_handle_intr_request(CPUOldState *env)
224{
225    CPUState *cpu = ENV_GET_CPU(env);
226#ifdef CONFIG_HAX
227    if (!hax_enabled() || hax_vcpu_emulation_mode(cpu))
228        return cpu->interrupt_request;
229    return 0;
230#else
231    return cpu->interrupt_request;
232#endif
233}
234
235int cpu_exec(CPUOldState *env)
236{
237    int ret, interrupt_request;
238    TranslationBlock *tb;
239    uint8_t *tc_ptr;
240    tcg_target_ulong next_tb;
241    CPUState *cpu = ENV_GET_CPU(env);
242
243    if (cpu->halted) {
244        if (!cpu_has_work(cpu)) {
245        return EXCP_HALTED;
246        }
247
248        cpu->halted = 0;
249    }
250
251    current_cpu = cpu;
252    //cpu_single_env = env;
253
254    if (unlikely(exit_request)) {
255        cpu->exit_request = 1;
256    }
257
258#if defined(TARGET_I386)
259    if (!kvm_enabled()) {
260        /* put eflags in CPU temporary format */
261        CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
262        DF = 1 - (2 * ((env->eflags >> 10) & 1));
263        CC_OP = CC_OP_EFLAGS;
264        env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
265    }
266#elif defined(TARGET_SPARC)
267#elif defined(TARGET_M68K)
268    env->cc_op = CC_OP_FLAGS;
269    env->cc_dest = env->sr & 0xf;
270    env->cc_x = (env->sr >> 4) & 1;
271#elif defined(TARGET_ALPHA)
272#elif defined(TARGET_ARM)
273#elif defined(TARGET_UNICORE32)
274#elif defined(TARGET_PPC)
275#elif defined(TARGET_LM32)
276#elif defined(TARGET_MICROBLAZE)
277#elif defined(TARGET_MIPS)
278#elif defined(TARGET_SH4)
279#elif defined(TARGET_CRIS)
280#elif defined(TARGET_S390X)
281    /* XXXXX */
282#else
283#error unsupported target CPU
284#endif
285    env->exception_index = -1;
286
287    /* prepare setjmp context for exception handling */
288    for(;;) {
289        if (setjmp(env->jmp_env) == 0) {
290            /* if an exception is pending, we execute it here */
291            if (env->exception_index >= 0) {
292                if (env->exception_index >= EXCP_INTERRUPT) {
293                    /* exit request from the cpu execution loop */
294                    ret = env->exception_index;
295                    if (ret == EXCP_DEBUG) {
296                        cpu_handle_debug_exception(env);
297                    }
298                    break;
299                } else {
300#if defined(CONFIG_USER_ONLY)
301                    /* if user mode only, we simulate a fake exception
302                       which will be handled outside the cpu execution
303                       loop */
304#if defined(TARGET_I386)
305                    do_interrupt(env);
306#endif
307                    ret = env->exception_index;
308                    break;
309#else
310                    do_interrupt(env);
311                    env->exception_index = -1;
312#endif
313                }
314            }
315
316#ifdef CONFIG_HAX
317            if (hax_enabled() && !hax_vcpu_exec(cpu))
318                longjmp(env->jmp_env, 1);
319#endif
320
321            if (kvm_enabled()) {
322                kvm_cpu_exec(cpu);
323                longjmp(env->jmp_env, 1);
324            }
325
326            next_tb = 0; /* force lookup of first TB */
327            for(;;) {
328                interrupt_request = cpu->interrupt_request;
329                if (unlikely(need_handle_intr_request(env))) {
330                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
331                        /* Mask out external interrupts for this step. */
332                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
333                    }
334                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
335                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
336                        env->exception_index = EXCP_DEBUG;
337                        cpu_loop_exit(env);
338                    }
339#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
340    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
341    defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
342                    if (interrupt_request & CPU_INTERRUPT_HALT) {
343                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
344                        cpu->halted = 1;
345                        env->exception_index = EXCP_HLT;
346                        cpu_loop_exit(env);
347                    }
348#endif
349#if defined(TARGET_I386)
350                    if (interrupt_request & CPU_INTERRUPT_INIT) {
351                            svm_check_intercept(env, SVM_EXIT_INIT);
352                            do_cpu_init(env);
353                            env->exception_index = EXCP_HALTED;
354                            cpu_loop_exit(env);
355                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
356                            do_cpu_sipi(env);
357                    } else if (env->hflags2 & HF2_GIF_MASK) {
358                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
359                            !(env->hflags & HF_SMM_MASK)) {
360                            svm_check_intercept(env, SVM_EXIT_SMI);
361                            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
362                            do_smm_enter(env);
363                            next_tb = 0;
364                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
365                                   !(env->hflags2 & HF2_NMI_MASK)) {
366                            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
367                            env->hflags2 |= HF2_NMI_MASK;
368                            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
369                            next_tb = 0;
370			} else if (interrupt_request & CPU_INTERRUPT_MCE) {
371                            cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
372                            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
373                            next_tb = 0;
374                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
375                                   (((env->hflags2 & HF2_VINTR_MASK) &&
376                                     (env->hflags2 & HF2_HIF_MASK)) ||
377                                    (!(env->hflags2 & HF2_VINTR_MASK) &&
378                                     (env->eflags & IF_MASK &&
379                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
380                            int intno;
381                            svm_check_intercept(env, SVM_EXIT_INTR);
382                            cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
383                            intno = cpu_get_pic_interrupt(env);
384                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
385                            do_interrupt_x86_hardirq(env, intno, 1);
386                            /* ensure that no TB jump will be modified as
387                               the program flow was changed */
388                            next_tb = 0;
389#if !defined(CONFIG_USER_ONLY)
390                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
391                                   (env->eflags & IF_MASK) &&
392                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
393                            int intno;
394                            /* FIXME: this should respect TPR */
395                            svm_check_intercept(env, SVM_EXIT_VINTR);
396                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
397                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
398                            do_interrupt_x86_hardirq(env, intno, 1);
399                            cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
400                            next_tb = 0;
401#endif
402                        }
403                    }
404#elif defined(TARGET_PPC)
405#if 0
406                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
407                        cpu_reset(env);
408                    }
409#endif
410                    if (interrupt_request & CPU_INTERRUPT_HARD) {
411                        ppc_hw_interrupt(env);
412                        if (env->pending_interrupts == 0)
413                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
414                        next_tb = 0;
415                    }
416#elif defined(TARGET_LM32)
417                    if ((interrupt_request & CPU_INTERRUPT_HARD)
418                        && (env->ie & IE_IE)) {
419                        env->exception_index = EXCP_IRQ;
420                        do_interrupt(env);
421                        next_tb = 0;
422                    }
423#elif defined(TARGET_MICROBLAZE)
424                    if ((interrupt_request & CPU_INTERRUPT_HARD)
425                        && (env->sregs[SR_MSR] & MSR_IE)
426                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
427                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
428                        env->exception_index = EXCP_IRQ;
429                        do_interrupt(env);
430                        next_tb = 0;
431                    }
432#elif defined(TARGET_MIPS)
433                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
434                        cpu_mips_hw_interrupts_pending(env)) {
435                        /* Raise it */
436                        env->exception_index = EXCP_EXT_INTERRUPT;
437                        env->error_code = 0;
438                        do_interrupt(env);
439                        next_tb = 0;
440                    }
441#elif defined(TARGET_SPARC)
442                    if (interrupt_request & CPU_INTERRUPT_HARD) {
443                        if (cpu_interrupts_enabled(env) &&
444                            env->interrupt_index > 0) {
445                            int pil = env->interrupt_index & 0xf;
446                            int type = env->interrupt_index & 0xf0;
447
448                            if (((type == TT_EXTINT) &&
449                                  cpu_pil_allowed(env, pil)) ||
450                                  type != TT_EXTINT) {
451                                env->exception_index = env->interrupt_index;
452                                do_interrupt(env);
453                                next_tb = 0;
454                            }
455                        }
456		    }
457#elif defined(TARGET_ARM)
458                    if (interrupt_request & CPU_INTERRUPT_FIQ
459                        && !(env->uncached_cpsr & CPSR_F)) {
460                        env->exception_index = EXCP_FIQ;
461                        do_interrupt(env);
462                        next_tb = 0;
463                    }
464                    /* ARMv7-M interrupt return works by loading a magic value
465                       into the PC.  On real hardware the load causes the
466                       return to occur.  The qemu implementation performs the
467                       jump normally, then does the exception return when the
468                       CPU tries to execute code at the magic address.
469                       This will cause the magic PC value to be pushed to
470                       the stack if an interrupt occurred at the wrong time.
471                       We avoid this by disabling interrupts when
472                       pc contains a magic address.  */
473                    if (interrupt_request & CPU_INTERRUPT_HARD
474                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
475                            || !(env->uncached_cpsr & CPSR_I))) {
476                        env->exception_index = EXCP_IRQ;
477                        do_interrupt(env);
478                        next_tb = 0;
479                    }
480#elif defined(TARGET_UNICORE32)
481                    if (interrupt_request & CPU_INTERRUPT_HARD
482                        && !(env->uncached_asr & ASR_I)) {
483                        do_interrupt(env);
484                        next_tb = 0;
485                    }
486#elif defined(TARGET_SH4)
487                    if (interrupt_request & CPU_INTERRUPT_HARD) {
488                        do_interrupt(env);
489                        next_tb = 0;
490                    }
491#elif defined(TARGET_ALPHA)
492                    if (interrupt_request & CPU_INTERRUPT_HARD) {
493                        do_interrupt(env);
494                        next_tb = 0;
495                    }
496#elif defined(TARGET_CRIS)
497                    if (interrupt_request & CPU_INTERRUPT_HARD
498                        && (env->pregs[PR_CCS] & I_FLAG)
499                        && !env->locked_irq) {
500                        env->exception_index = EXCP_IRQ;
501                        do_interrupt(env);
502                        next_tb = 0;
503                    }
504                    if (interrupt_request & CPU_INTERRUPT_NMI
505                        && (env->pregs[PR_CCS] & M_FLAG)) {
506                        env->exception_index = EXCP_NMI;
507                        do_interrupt(env);
508                        next_tb = 0;
509                    }
510#elif defined(TARGET_M68K)
511                    if (interrupt_request & CPU_INTERRUPT_HARD
512                        && ((env->sr & SR_I) >> SR_I_SHIFT)
513                            < env->pending_level) {
514                        /* Real hardware gets the interrupt vector via an
515                           IACK cycle at this point.  Current emulated
516                           hardware doesn't rely on this, so we
517                           provide/save the vector when the interrupt is
518                           first signalled.  */
519                        env->exception_index = env->pending_vector;
520                        do_interrupt_m68k_hardirq(env, 1);
521                        next_tb = 0;
522                    }
523#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
524                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
525                        (env->psw.mask & PSW_MASK_EXT)) {
526                        do_interrupt(env);
527                        next_tb = 0;
528                    }
529#endif
530                   /* Don't use the cached interrupt_request value,
531                      do_interrupt may have updated the EXITTB flag. */
532                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
533                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
534                        /* ensure that no TB jump will be modified as
535                           the program flow was changed */
536                        next_tb = 0;
537                    }
538                }
539                if (unlikely(cpu->exit_request)) {
540                    cpu->exit_request = 0;
541                    env->exception_index = EXCP_INTERRUPT;
542                    cpu_loop_exit(env);
543                }
544#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
545                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
546                    /* restore flags in standard format */
547#if defined(TARGET_I386)
548                    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
549                        | (DF & DF_MASK);
550                    log_cpu_state(cpu, X86_DUMP_CCOP);
551                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
552#elif defined(TARGET_M68K)
553                    cpu_m68k_flush_flags(env, env->cc_op);
554                    env->cc_op = CC_OP_FLAGS;
555                    env->sr = (env->sr & 0xffe0)
556                              | env->cc_dest | (env->cc_x << 4);
557                    log_cpu_state(cpu, 0);
558#else
559                    log_cpu_state(cpu, 0);
560#endif
561                }
562#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
563                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
564                tb = tb_find_fast(env);
565                /* Note: we do it here to avoid a gcc bug on Mac OS X when
566                   doing it in tb_find_slow */
567                if (tb_invalidated_flag) {
568                    /* as some TB could have been invalidated because
569                       of memory exceptions while generating the code, we
570                       must recompute the hash index here */
571                    next_tb = 0;
572                    tb_invalidated_flag = 0;
573                }
574#ifdef CONFIG_DEBUG_EXEC
575                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
576                             (long)tb->tc_ptr, tb->pc,
577                             lookup_symbol(tb->pc));
578#endif
579                /* see if we can patch the calling TB. When the TB
580                   spans two pages, we cannot safely do a direct
581                   jump. */
582                if (next_tb != 0 && tb->page_addr[1] == -1) {
583                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
584                }
585                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
586
587                /* cpu_interrupt might be called while translating the
588                   TB, but before it is linked into a potentially
589                   infinite loop and becomes env->current_tb. Avoid
590                   starting execution if there is a pending interrupt. */
591                env->current_tb = tb;
592                barrier();
593                if (likely(!cpu->exit_request)) {
594                    tc_ptr = tb->tc_ptr;
595                /* execute the generated code */
596                    next_tb = tcg_qemu_tb_exec(env, tc_ptr);
597                    if ((next_tb & 3) == 2) {
598                        /* Instruction counter expired.  */
599                        int insns_left;
600                        tb = (TranslationBlock *)(long)(next_tb & ~3);
601                        /* Restore PC.  */
602                        cpu_pc_from_tb(env, tb);
603                        insns_left = env->icount_decr.u32;
604                        if (env->icount_extra && insns_left >= 0) {
605                            /* Refill decrementer and continue execution.  */
606                            env->icount_extra += insns_left;
607                            if (env->icount_extra > 0xffff) {
608                                insns_left = 0xffff;
609                            } else {
610                                insns_left = env->icount_extra;
611                            }
612                            env->icount_extra -= insns_left;
613                            env->icount_decr.u16.low = insns_left;
614                        } else {
615                            if (insns_left > 0) {
616                                /* Execute remaining instructions.  */
617                                cpu_exec_nocache(env, insns_left, tb);
618                            }
619                            env->exception_index = EXCP_INTERRUPT;
620                            next_tb = 0;
621                            cpu_loop_exit(env);
622                        }
623                    }
624                }
625                env->current_tb = NULL;
626#ifdef CONFIG_HAX
627                if (hax_enabled() && hax_stop_emulation(cpu))
628                    cpu_loop_exit(env);
629#endif
630                /* reset soft MMU for next block (it can currently
631                   only be set by a memory fault) */
632            } /* for(;;) */
633        } else {
634            /* Reload env after longjmp - the compiler may have smashed all
635             * local variables as longjmp is marked 'noreturn'. */
636            env = cpu_single_env;
637        }
638    } /* for(;;) */
639
640
641#if defined(TARGET_I386)
642    /* restore flags in standard format */
643    env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
644        | (DF & DF_MASK);
645#elif defined(TARGET_ARM)
646    /* XXX: Save/restore host fpu exception state?.  */
647#elif defined(TARGET_UNICORE32)
648#elif defined(TARGET_SPARC)
649#elif defined(TARGET_PPC)
650#elif defined(TARGET_LM32)
651#elif defined(TARGET_M68K)
652    cpu_m68k_flush_flags(env, env->cc_op);
653    env->cc_op = CC_OP_FLAGS;
654    env->sr = (env->sr & 0xffe0)
655              | env->cc_dest | (env->cc_x << 4);
656#elif defined(TARGET_MICROBLAZE)
657#elif defined(TARGET_MIPS)
658#elif defined(TARGET_SH4)
659#elif defined(TARGET_ALPHA)
660#elif defined(TARGET_CRIS)
661#elif defined(TARGET_S390X)
662    /* XXXXX */
663#else
664#error unsupported target CPU
665#endif
666
667    /* fail safe : never use cpu_single_env outside cpu_exec() */
668    current_cpu = NULL;
669    return ret;
670}
671
672#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
673
674void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
675{
676    CPUX86State *saved_env;
677
678    saved_env = env;
679    env = s;
680    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
681        selector &= 0xffff;
682        cpu_x86_load_seg_cache(env, seg_reg, selector,
683                               (selector << 4), 0xffff, 0);
684    } else {
685        helper_load_seg(seg_reg, selector);
686    }
687    env = saved_env;
688}
689
690void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
691{
692    CPUX86State *saved_env;
693
694    saved_env = env;
695    env = s;
696
697    helper_fsave(ptr, data32);
698
699    env = saved_env;
700}
701
702void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
703{
704    CPUX86State *saved_env;
705
706    saved_env = env;
707    env = s;
708
709    helper_frstor(ptr, data32);
710
711    env = saved_env;
712}
713
714#endif /* TARGET_I386 */
715
716#if !defined(CONFIG_SOFTMMU)
717
718#if defined(TARGET_I386)
719#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
720#else
721#define EXCEPTION_ACTION cpu_loop_exit(env)
722#endif
723
724/* 'pc' is the host PC at which the exception was raised. 'address' is
725   the effective address of the memory exception. 'is_write' is 1 if a
726   write caused the exception and otherwise 0'. 'old_set' is the
727   signal set which should be restored */
728static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
729                                    int is_write, sigset_t *old_set,
730                                    void *puc)
731{
732    TranslationBlock *tb;
733    int ret;
734
735    if (cpu_single_env)
736        env = cpu_single_env; /* XXX: find a correct solution for multithread */
737#if defined(DEBUG_SIGNAL)
738    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
739                pc, address, is_write, *(unsigned long *)old_set);
740#endif
741    /* XXX: locking issue */
742    if (is_write && page_unprotect(h2g(address), pc, puc)) {
743        return 1;
744    }
745
746    /* see if it is an MMU fault */
747    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
748    if (ret < 0)
749        return 0; /* not an MMU fault */
750    if (ret == 0)
751        return 1; /* the MMU fault was handled without causing real CPU fault */
752    /* now we have a real cpu fault */
753    tb = tb_find_pc(pc);
754    if (tb) {
755        /* the PC is inside the translated code. It means that we have
756           a virtual CPU fault */
757        cpu_restore_state(env, pc);
758    }
759
760    /* we restore the process signal mask as the sigreturn should
761       do it (XXX: use sigsetjmp) */
762    sigprocmask(SIG_SETMASK, old_set, NULL);
763    EXCEPTION_ACTION;
764
765    /* never comes here */
766    return 1;
767}
768
769#if defined(__i386__)
770
771#if defined(__APPLE__)
772# include <sys/ucontext.h>
773
774# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
775# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
776# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
777# define MASK_sig(context)    ((context)->uc_sigmask)
778#elif defined (__NetBSD__)
779# include <ucontext.h>
780
781# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
782# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
783# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
784# define MASK_sig(context)    ((context)->uc_sigmask)
785#elif defined (__FreeBSD__) || defined(__DragonFly__)
786# include <ucontext.h>
787
788# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
789# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
790# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
791# define MASK_sig(context)    ((context)->uc_sigmask)
792#elif defined(__OpenBSD__)
793# define EIP_sig(context)     ((context)->sc_eip)
794# define TRAP_sig(context)    ((context)->sc_trapno)
795# define ERROR_sig(context)   ((context)->sc_err)
796# define MASK_sig(context)    ((context)->sc_mask)
797#else
798# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
799# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
800# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
801# define MASK_sig(context)    ((context)->uc_sigmask)
802#endif
803
804int cpu_signal_handler(int host_signum, void *pinfo,
805                       void *puc)
806{
807    siginfo_t *info = pinfo;
808#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
809    ucontext_t *uc = puc;
810#elif defined(__OpenBSD__)
811    struct sigcontext *uc = puc;
812#else
813    struct ucontext *uc = puc;
814#endif
815    unsigned long pc;
816    int trapno;
817
818#ifndef REG_EIP
819/* for glibc 2.1 */
820#define REG_EIP    EIP
821#define REG_ERR    ERR
822#define REG_TRAPNO TRAPNO
823#endif
824    pc = EIP_sig(uc);
825    trapno = TRAP_sig(uc);
826    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
827                             trapno == 0xe ?
828                             (ERROR_sig(uc) >> 1) & 1 : 0,
829                             &MASK_sig(uc), puc);
830}
831
832#elif defined(__x86_64__)
833
834#ifdef __NetBSD__
835#define PC_sig(context)       _UC_MACHINE_PC(context)
836#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
837#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
838#define MASK_sig(context)     ((context)->uc_sigmask)
839#elif defined(__OpenBSD__)
840#define PC_sig(context)       ((context)->sc_rip)
841#define TRAP_sig(context)     ((context)->sc_trapno)
842#define ERROR_sig(context)    ((context)->sc_err)
843#define MASK_sig(context)     ((context)->sc_mask)
844#elif defined (__FreeBSD__) || defined(__DragonFly__)
845#include <ucontext.h>
846
847#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
848#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
849#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
850#define MASK_sig(context)     ((context)->uc_sigmask)
851#else
852#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
853#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
854#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
855#define MASK_sig(context)     ((context)->uc_sigmask)
856#endif
857
858int cpu_signal_handler(int host_signum, void *pinfo,
859                       void *puc)
860{
861    siginfo_t *info = pinfo;
862    unsigned long pc;
863#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
864    ucontext_t *uc = puc;
865#elif defined(__OpenBSD__)
866    struct sigcontext *uc = puc;
867#else
868    struct ucontext *uc = puc;
869#endif
870
871    pc = PC_sig(uc);
872    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
873                             TRAP_sig(uc) == 0xe ?
874                             (ERROR_sig(uc) >> 1) & 1 : 0,
875                             &MASK_sig(uc), puc);
876}
877
878#elif defined(_ARCH_PPC)
879
880/***********************************************************************
881 * signal context platform-specific definitions
882 * From Wine
883 */
884#ifdef linux
885/* All Registers access - only for local access */
886# define REG_sig(reg_name, context)		((context)->uc_mcontext.regs->reg_name)
887/* Gpr Registers access  */
888# define GPR_sig(reg_num, context)		REG_sig(gpr[reg_num], context)
889# define IAR_sig(context)			REG_sig(nip, context)	/* Program counter */
890# define MSR_sig(context)			REG_sig(msr, context)   /* Machine State Register (Supervisor) */
891# define CTR_sig(context)			REG_sig(ctr, context)   /* Count register */
892# define XER_sig(context)			REG_sig(xer, context) /* User's integer exception register */
893# define LR_sig(context)			REG_sig(link, context) /* Link register */
894# define CR_sig(context)			REG_sig(ccr, context) /* Condition register */
895/* Float Registers access  */
896# define FLOAT_sig(reg_num, context)		(((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
897# define FPSCR_sig(context)			(*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
898/* Exception Registers access */
899# define DAR_sig(context)			REG_sig(dar, context)
900# define DSISR_sig(context)			REG_sig(dsisr, context)
901# define TRAP_sig(context)			REG_sig(trap, context)
902#endif /* linux */
903
904#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
905#include <ucontext.h>
906# define IAR_sig(context)		((context)->uc_mcontext.mc_srr0)
907# define MSR_sig(context)		((context)->uc_mcontext.mc_srr1)
908# define CTR_sig(context)		((context)->uc_mcontext.mc_ctr)
909# define XER_sig(context)		((context)->uc_mcontext.mc_xer)
910# define LR_sig(context)		((context)->uc_mcontext.mc_lr)
911# define CR_sig(context)		((context)->uc_mcontext.mc_cr)
912/* Exception Registers access */
913# define DAR_sig(context)		((context)->uc_mcontext.mc_dar)
914# define DSISR_sig(context)		((context)->uc_mcontext.mc_dsisr)
915# define TRAP_sig(context)		((context)->uc_mcontext.mc_exc)
916#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
917
918#ifdef __APPLE__
919# include <sys/ucontext.h>
920typedef struct ucontext SIGCONTEXT;
921/* All Registers access - only for local access */
922# define REG_sig(reg_name, context)		((context)->uc_mcontext->ss.reg_name)
923# define FLOATREG_sig(reg_name, context)	((context)->uc_mcontext->fs.reg_name)
924# define EXCEPREG_sig(reg_name, context)	((context)->uc_mcontext->es.reg_name)
925# define VECREG_sig(reg_name, context)		((context)->uc_mcontext->vs.reg_name)
926/* Gpr Registers access */
927# define GPR_sig(reg_num, context)		REG_sig(r##reg_num, context)
928# define IAR_sig(context)			REG_sig(srr0, context)	/* Program counter */
929# define MSR_sig(context)			REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
930# define CTR_sig(context)			REG_sig(ctr, context)
931# define XER_sig(context)			REG_sig(xer, context) /* Link register */
932# define LR_sig(context)			REG_sig(lr, context)  /* User's integer exception register */
933# define CR_sig(context)			REG_sig(cr, context)  /* Condition register */
934/* Float Registers access */
935# define FLOAT_sig(reg_num, context)		FLOATREG_sig(fpregs[reg_num], context)
936# define FPSCR_sig(context)			((double)FLOATREG_sig(fpscr, context))
937/* Exception Registers access */
938# define DAR_sig(context)			EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
939# define DSISR_sig(context)			EXCEPREG_sig(dsisr, context)
940# define TRAP_sig(context)			EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
941#endif /* __APPLE__ */
942
943int cpu_signal_handler(int host_signum, void *pinfo,
944                       void *puc)
945{
946    siginfo_t *info = pinfo;
947#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
948    ucontext_t *uc = puc;
949#else
950    struct ucontext *uc = puc;
951#endif
952    unsigned long pc;
953    int is_write;
954
955    pc = IAR_sig(uc);
956    is_write = 0;
957#if 0
958    /* ppc 4xx case */
959    if (DSISR_sig(uc) & 0x00800000)
960        is_write = 1;
961#else
962    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
963        is_write = 1;
964#endif
965    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
966                             is_write, &uc->uc_sigmask, puc);
967}
968
969#elif defined(__alpha__)
970
971int cpu_signal_handler(int host_signum, void *pinfo,
972                           void *puc)
973{
974    siginfo_t *info = pinfo;
975    struct ucontext *uc = puc;
976    uint32_t *pc = uc->uc_mcontext.sc_pc;
977    uint32_t insn = *pc;
978    int is_write = 0;
979
980    /* XXX: need kernel patch to get write flag faster */
981    switch (insn >> 26) {
982    case 0x0d: // stw
983    case 0x0e: // stb
984    case 0x0f: // stq_u
985    case 0x24: // stf
986    case 0x25: // stg
987    case 0x26: // sts
988    case 0x27: // stt
989    case 0x2c: // stl
990    case 0x2d: // stq
991    case 0x2e: // stl_c
992    case 0x2f: // stq_c
993	is_write = 1;
994    }
995
996    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
997                             is_write, &uc->uc_sigmask, puc);
998}
999#elif defined(__sparc__)
1000
1001int cpu_signal_handler(int host_signum, void *pinfo,
1002                       void *puc)
1003{
1004    siginfo_t *info = pinfo;
1005    int is_write;
1006    uint32_t insn;
1007#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1008    uint32_t *regs = (uint32_t *)(info + 1);
1009    void *sigmask = (regs + 20);
1010    /* XXX: is there a standard glibc define ? */
1011    unsigned long pc = regs[1];
1012#else
1013#ifdef __linux__
1014    struct sigcontext *sc = puc;
1015    unsigned long pc = sc->sigc_regs.tpc;
1016    void *sigmask = (void *)sc->sigc_mask;
1017#elif defined(__OpenBSD__)
1018    struct sigcontext *uc = puc;
1019    unsigned long pc = uc->sc_pc;
1020    void *sigmask = (void *)(long)uc->sc_mask;
1021#endif
1022#endif
1023
1024    /* XXX: need kernel patch to get write flag faster */
1025    is_write = 0;
1026    insn = *(uint32_t *)pc;
1027    if ((insn >> 30) == 3) {
1028      switch((insn >> 19) & 0x3f) {
1029      case 0x05: // stb
1030      case 0x15: // stba
1031      case 0x06: // sth
1032      case 0x16: // stha
1033      case 0x04: // st
1034      case 0x14: // sta
1035      case 0x07: // std
1036      case 0x17: // stda
1037      case 0x0e: // stx
1038      case 0x1e: // stxa
1039      case 0x24: // stf
1040      case 0x34: // stfa
1041      case 0x27: // stdf
1042      case 0x37: // stdfa
1043      case 0x26: // stqf
1044      case 0x36: // stqfa
1045      case 0x25: // stfsr
1046      case 0x3c: // casa
1047      case 0x3e: // casxa
1048	is_write = 1;
1049	break;
1050      }
1051    }
1052    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1053                             is_write, sigmask, NULL);
1054}
1055
1056#elif defined(__arm__)
1057
1058int cpu_signal_handler(int host_signum, void *pinfo,
1059                       void *puc)
1060{
1061    siginfo_t *info = pinfo;
1062    struct ucontext *uc = puc;
1063    unsigned long pc;
1064    int is_write;
1065
1066#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1067    pc = uc->uc_mcontext.gregs[R15];
1068#else
1069    pc = uc->uc_mcontext.arm_pc;
1070#endif
1071    /* XXX: compute is_write */
1072    is_write = 0;
1073    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1074                             is_write,
1075                             &uc->uc_sigmask, puc);
1076}
1077
1078#elif defined(__mc68000)
1079
1080int cpu_signal_handler(int host_signum, void *pinfo,
1081                       void *puc)
1082{
1083    siginfo_t *info = pinfo;
1084    struct ucontext *uc = puc;
1085    unsigned long pc;
1086    int is_write;
1087
1088    pc = uc->uc_mcontext.gregs[16];
1089    /* XXX: compute is_write */
1090    is_write = 0;
1091    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1092                             is_write,
1093                             &uc->uc_sigmask, puc);
1094}
1095
1096#elif defined(__ia64)
1097
1098#ifndef __ISR_VALID
1099  /* This ought to be in <bits/siginfo.h>... */
1100# define __ISR_VALID	1
1101#endif
1102
1103int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1104{
1105    siginfo_t *info = pinfo;
1106    struct ucontext *uc = puc;
1107    unsigned long ip;
1108    int is_write = 0;
1109
1110    ip = uc->uc_mcontext.sc_ip;
1111    switch (host_signum) {
1112      case SIGILL:
1113      case SIGFPE:
1114      case SIGSEGV:
1115      case SIGBUS:
1116      case SIGTRAP:
1117	  if (info->si_code && (info->si_segvflags & __ISR_VALID))
1118	      /* ISR.W (write-access) is bit 33:  */
1119	      is_write = (info->si_isr >> 33) & 1;
1120	  break;
1121
1122      default:
1123	  break;
1124    }
1125    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1126                             is_write,
1127                             (sigset_t *)&uc->uc_sigmask, puc);
1128}
1129
1130#elif defined(__s390__)
1131
1132int cpu_signal_handler(int host_signum, void *pinfo,
1133                       void *puc)
1134{
1135    siginfo_t *info = pinfo;
1136    struct ucontext *uc = puc;
1137    unsigned long pc;
1138    uint16_t *pinsn;
1139    int is_write = 0;
1140
1141    pc = uc->uc_mcontext.psw.addr;
1142
1143    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1144       of the normal 2 arguments.  The 3rd argument contains the "int_code"
1145       from the hardware which does in fact contain the is_write value.
1146       The rt signal handler, as far as I can tell, does not give this value
1147       at all.  Not that we could get to it from here even if it were.  */
1148    /* ??? This is not even close to complete, since it ignores all
1149       of the read-modify-write instructions.  */
1150    pinsn = (uint16_t *)pc;
1151    switch (pinsn[0] >> 8) {
1152    case 0x50: /* ST */
1153    case 0x42: /* STC */
1154    case 0x40: /* STH */
1155        is_write = 1;
1156        break;
1157    case 0xc4: /* RIL format insns */
1158        switch (pinsn[0] & 0xf) {
1159        case 0xf: /* STRL */
1160        case 0xb: /* STGRL */
1161        case 0x7: /* STHRL */
1162            is_write = 1;
1163        }
1164        break;
1165    case 0xe3: /* RXY format insns */
1166        switch (pinsn[2] & 0xff) {
1167        case 0x50: /* STY */
1168        case 0x24: /* STG */
1169        case 0x72: /* STCY */
1170        case 0x70: /* STHY */
1171        case 0x8e: /* STPQ */
1172        case 0x3f: /* STRVH */
1173        case 0x3e: /* STRV */
1174        case 0x2f: /* STRVG */
1175            is_write = 1;
1176        }
1177        break;
1178    }
1179    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1180                             is_write, &uc->uc_sigmask, puc);
1181}
1182
1183#elif defined(__mips__)
1184
1185int cpu_signal_handler(int host_signum, void *pinfo,
1186                       void *puc)
1187{
1188    siginfo_t *info = pinfo;
1189    struct ucontext *uc = puc;
1190    greg_t pc = uc->uc_mcontext.pc;
1191    int is_write;
1192
1193    /* XXX: compute is_write */
1194    is_write = 0;
1195    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1196                             is_write, &uc->uc_sigmask, puc);
1197}
1198
1199#elif defined(__hppa__)
1200
1201int cpu_signal_handler(int host_signum, void *pinfo,
1202                       void *puc)
1203{
1204    struct siginfo *info = pinfo;
1205    struct ucontext *uc = puc;
1206    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1207    uint32_t insn = *(uint32_t *)pc;
1208    int is_write = 0;
1209
1210    /* XXX: need kernel patch to get write flag faster.  */
1211    switch (insn >> 26) {
1212    case 0x1a: /* STW */
1213    case 0x19: /* STH */
1214    case 0x18: /* STB */
1215    case 0x1b: /* STWM */
1216        is_write = 1;
1217        break;
1218
1219    case 0x09: /* CSTWX, FSTWX, FSTWS */
1220    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1221        /* Distinguish from coprocessor load ... */
1222        is_write = (insn >> 9) & 1;
1223        break;
1224
1225    case 0x03:
1226        switch ((insn >> 6) & 15) {
1227        case 0xa: /* STWS */
1228        case 0x9: /* STHS */
1229        case 0x8: /* STBS */
1230        case 0xe: /* STWAS */
1231        case 0xc: /* STBYS */
1232            is_write = 1;
1233        }
1234        break;
1235    }
1236
1237    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1238                             is_write, &uc->uc_sigmask, puc);
1239}
1240
1241#else
1242
1243#error host CPU specific signal handler needed
1244
1245#endif
1246
1247#endif /* !defined(CONFIG_SOFTMMU) */
1248