cpu-exec.c revision f645f7d6fd841e39524e5df8c1a7fd8347f92ac1
1/*
2 *  i386 emulator main execution loop
3 *
4 *  Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include "config.h"
20#include "exec.h"
21#include "disas.h"
22#include "tcg.h"
23#include "kvm.h"
24#include "qemu-barrier.h"
25
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
37#ifdef __linux__
38#include <sys/ucontext.h>
39#endif
40#endif
41
42#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43// Work around ugly bugs in glibc that mangle global register contents
44#undef env
45#define env cpu_single_env
46#endif
47
48int tb_invalidated_flag;
49
50//#define CONFIG_DEBUG_EXEC
51//#define DEBUG_SIGNAL
52
53int qemu_cpu_has_work(CPUState *env)
54{
55    return cpu_has_work(env);
56}
57
58void cpu_loop_exit(void)
59{
60    env->current_tb = NULL;
61    longjmp(env->jmp_env, 1);
62}
63
64/* exit the current TB from a signal handler. The host registers are
65   restored in a state compatible with the CPU emulator
66 */
67void cpu_resume_from_signal(CPUState *env1, void *puc)
68{
69#if !defined(CONFIG_SOFTMMU)
70#ifdef __linux__
71    struct ucontext *uc = puc;
72#elif defined(__OpenBSD__)
73    struct sigcontext *uc = puc;
74#endif
75#endif
76
77    env = env1;
78
79    /* XXX: restore cpu registers saved in host registers */
80
81#if !defined(CONFIG_SOFTMMU)
82    if (puc) {
83        /* XXX: use siglongjmp ? */
84#ifdef __linux__
85#ifdef __ia64
86        sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87#else
88        sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89#endif
90#elif defined(__OpenBSD__)
91        sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92#endif
93    }
94#endif
95    env->exception_index = -1;
96    longjmp(env->jmp_env, 1);
97}
98
99/* Execute the code without caching the generated code. An interpreter
100   could be used if available. */
101static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102{
103    unsigned long next_tb;
104    TranslationBlock *tb;
105
106    /* Should never happen.
107       We only end up here when an existing TB is too long.  */
108    if (max_cycles > CF_COUNT_MASK)
109        max_cycles = CF_COUNT_MASK;
110
111    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112                     max_cycles);
113    env->current_tb = tb;
114    /* execute the generated code */
115    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116    env->current_tb = NULL;
117
118    if ((next_tb & 3) == 2) {
119        /* Restore PC.  This may happen if async event occurs before
120           the TB starts executing.  */
121        cpu_pc_from_tb(env, tb);
122    }
123    tb_phys_invalidate(tb, -1);
124    tb_free(tb);
125}
126
127static TranslationBlock *tb_find_slow(target_ulong pc,
128                                      target_ulong cs_base,
129                                      uint64_t flags)
130{
131    TranslationBlock *tb, **ptb1;
132    unsigned int h;
133    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
134
135    tb_invalidated_flag = 0;
136
137    /* find translated block using physical mappings */
138    phys_pc = get_phys_addr_code(env, pc);
139    phys_page1 = phys_pc & TARGET_PAGE_MASK;
140    phys_page2 = -1;
141    h = tb_phys_hash_func(phys_pc);
142    ptb1 = &tb_phys_hash[h];
143    for(;;) {
144        tb = *ptb1;
145        if (!tb)
146            goto not_found;
147        if (tb->pc == pc &&
148            tb->page_addr[0] == phys_page1 &&
149            tb->cs_base == cs_base &&
150            tb->flags == flags) {
151            /* check next page if needed */
152            if (tb->page_addr[1] != -1) {
153                virt_page2 = (pc & TARGET_PAGE_MASK) +
154                    TARGET_PAGE_SIZE;
155                phys_page2 = get_phys_addr_code(env, virt_page2);
156                if (tb->page_addr[1] == phys_page2)
157                    goto found;
158            } else {
159                goto found;
160            }
161        }
162        ptb1 = &tb->phys_hash_next;
163    }
164 not_found:
165   /* if no translated code available, then translate it now */
166    tb = tb_gen_code(env, pc, cs_base, flags, 0);
167
168 found:
169    /* we add the TB in the virtual pc hash table */
170    env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
171    return tb;
172}
173
174static inline TranslationBlock *tb_find_fast(void)
175{
176    TranslationBlock *tb;
177    target_ulong cs_base, pc;
178    int flags;
179
180    /* we record a subset of the CPU state. It will
181       always be the same before a given translated block
182       is executed. */
183    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
184    tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
185    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
186                 tb->flags != flags)) {
187        tb = tb_find_slow(pc, cs_base, flags);
188    }
189    return tb;
190}
191
192static CPUDebugExcpHandler *debug_excp_handler;
193
194CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195{
196    CPUDebugExcpHandler *old_handler = debug_excp_handler;
197
198    debug_excp_handler = handler;
199    return old_handler;
200}
201
202static void cpu_handle_debug_exception(CPUState *env)
203{
204    CPUWatchpoint *wp;
205
206    if (!env->watchpoint_hit)
207        QTAILQ_FOREACH(wp, &env->watchpoints, entry)
208            wp->flags &= ~BP_WATCHPOINT_HIT;
209
210    if (debug_excp_handler)
211        debug_excp_handler(env);
212}
213
214/* main execution loop */
215
216volatile sig_atomic_t exit_request;
217
218int cpu_exec(CPUState *env1)
219{
220    volatile host_reg_t saved_env_reg;
221    int ret, interrupt_request;
222    TranslationBlock *tb;
223    uint8_t *tc_ptr;
224    unsigned long next_tb;
225
226    if (cpu_halted(env1) == EXCP_HALTED)
227        return EXCP_HALTED;
228
229    cpu_single_env = env1;
230
231    /* the access to env below is actually saving the global register's
232       value, so that files not including target-xyz/exec.h are free to
233       use it.  */
234    QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
235    saved_env_reg = (host_reg_t) env;
236    barrier();
237    env = env1;
238
239    if (unlikely(exit_request)) {
240        env->exit_request = 1;
241    }
242
243#if defined(TARGET_I386)
244    if (!kvm_enabled()) {
245        /* put eflags in CPU temporary format */
246        CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
247        DF = 1 - (2 * ((env->eflags >> 10) & 1));
248        CC_OP = CC_OP_EFLAGS;
249        env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
250    }
251#elif defined(TARGET_SPARC)
252#elif defined(TARGET_M68K)
253    env->cc_op = CC_OP_FLAGS;
254    env->cc_dest = env->sr & 0xf;
255    env->cc_x = (env->sr >> 4) & 1;
256#elif defined(TARGET_ALPHA)
257#elif defined(TARGET_ARM)
258#elif defined(TARGET_PPC)
259#elif defined(TARGET_MICROBLAZE)
260#elif defined(TARGET_MIPS)
261#elif defined(TARGET_SH4)
262#elif defined(TARGET_CRIS)
263#elif defined(TARGET_S390X)
264    /* XXXXX */
265#else
266#error unsupported target CPU
267#endif
268    env->exception_index = -1;
269
270    /* prepare setjmp context for exception handling */
271    for(;;) {
272        if (setjmp(env->jmp_env) == 0) {
273#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
274#undef env
275                    env = cpu_single_env;
276#define env cpu_single_env
277#endif
278            /* if an exception is pending, we execute it here */
279            if (env->exception_index >= 0) {
280                if (env->exception_index >= EXCP_INTERRUPT) {
281                    /* exit request from the cpu execution loop */
282                    ret = env->exception_index;
283                    if (ret == EXCP_DEBUG)
284                        cpu_handle_debug_exception(env);
285                    break;
286                } else {
287#if defined(CONFIG_USER_ONLY)
288                    /* if user mode only, we simulate a fake exception
289                       which will be handled outside the cpu execution
290                       loop */
291#if defined(TARGET_I386)
292                    do_interrupt_user(env->exception_index,
293                                      env->exception_is_int,
294                                      env->error_code,
295                                      env->exception_next_eip);
296                    /* successfully delivered */
297                    env->old_exception = -1;
298#endif
299                    ret = env->exception_index;
300                    break;
301#else
302#if defined(TARGET_I386)
303                    /* simulate a real cpu exception. On i386, it can
304                       trigger new exceptions, but we do not handle
305                       double or triple faults yet. */
306                    do_interrupt(env->exception_index,
307                                 env->exception_is_int,
308                                 env->error_code,
309                                 env->exception_next_eip, 0);
310                    /* successfully delivered */
311                    env->old_exception = -1;
312#elif defined(TARGET_PPC)
313                    do_interrupt(env);
314#elif defined(TARGET_MICROBLAZE)
315                    do_interrupt(env);
316#elif defined(TARGET_MIPS)
317                    do_interrupt(env);
318#elif defined(TARGET_SPARC)
319                    do_interrupt(env);
320#elif defined(TARGET_ARM)
321                    do_interrupt(env);
322#elif defined(TARGET_SH4)
323		    do_interrupt(env);
324#elif defined(TARGET_ALPHA)
325                    do_interrupt(env);
326#elif defined(TARGET_CRIS)
327                    do_interrupt(env);
328#elif defined(TARGET_M68K)
329                    do_interrupt(0);
330#endif
331                    env->exception_index = -1;
332#endif
333                }
334            }
335
336            if (kvm_enabled()) {
337                kvm_cpu_exec(env);
338                longjmp(env->jmp_env, 1);
339            }
340
341            next_tb = 0; /* force lookup of first TB */
342            for(;;) {
343                interrupt_request = env->interrupt_request;
344                if (unlikely(interrupt_request)) {
345                    if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
346                        /* Mask out external interrupts for this step. */
347                        interrupt_request &= ~(CPU_INTERRUPT_HARD |
348                                               CPU_INTERRUPT_FIQ |
349                                               CPU_INTERRUPT_SMI |
350                                               CPU_INTERRUPT_NMI);
351                    }
352                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
353                        env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
354                        env->exception_index = EXCP_DEBUG;
355                        cpu_loop_exit();
356                    }
357#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
358    defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
359    defined(TARGET_MICROBLAZE)
360                    if (interrupt_request & CPU_INTERRUPT_HALT) {
361                        env->interrupt_request &= ~CPU_INTERRUPT_HALT;
362                        env->halted = 1;
363                        env->exception_index = EXCP_HLT;
364                        cpu_loop_exit();
365                    }
366#endif
367#if defined(TARGET_I386)
368                    if (interrupt_request & CPU_INTERRUPT_INIT) {
369                            svm_check_intercept(SVM_EXIT_INIT);
370                            do_cpu_init(env);
371                            env->exception_index = EXCP_HALTED;
372                            cpu_loop_exit();
373                    } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
374                            do_cpu_sipi(env);
375                    } else if (env->hflags2 & HF2_GIF_MASK) {
376                        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
377                            !(env->hflags & HF_SMM_MASK)) {
378                            svm_check_intercept(SVM_EXIT_SMI);
379                            env->interrupt_request &= ~CPU_INTERRUPT_SMI;
380                            do_smm_enter();
381                            next_tb = 0;
382                        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
383                                   !(env->hflags2 & HF2_NMI_MASK)) {
384                            env->interrupt_request &= ~CPU_INTERRUPT_NMI;
385                            env->hflags2 |= HF2_NMI_MASK;
386                            do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
387                            next_tb = 0;
388			} else if (interrupt_request & CPU_INTERRUPT_MCE) {
389                            env->interrupt_request &= ~CPU_INTERRUPT_MCE;
390                            do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
391                            next_tb = 0;
392                        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
393                                   (((env->hflags2 & HF2_VINTR_MASK) &&
394                                     (env->hflags2 & HF2_HIF_MASK)) ||
395                                    (!(env->hflags2 & HF2_VINTR_MASK) &&
396                                     (env->eflags & IF_MASK &&
397                                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
398                            int intno;
399                            svm_check_intercept(SVM_EXIT_INTR);
400                            env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
401                            intno = cpu_get_pic_interrupt(env);
402                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
403#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
404#undef env
405                    env = cpu_single_env;
406#define env cpu_single_env
407#endif
408                            do_interrupt(intno, 0, 0, 0, 1);
409                            /* ensure that no TB jump will be modified as
410                               the program flow was changed */
411                            next_tb = 0;
412#if !defined(CONFIG_USER_ONLY)
413                        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
414                                   (env->eflags & IF_MASK) &&
415                                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
416                            int intno;
417                            /* FIXME: this should respect TPR */
418                            svm_check_intercept(SVM_EXIT_VINTR);
419                            intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
420                            qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
421                            do_interrupt(intno, 0, 0, 0, 1);
422                            env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
423                            next_tb = 0;
424#endif
425                        }
426                    }
427#elif defined(TARGET_PPC)
428#if 0
429                    if ((interrupt_request & CPU_INTERRUPT_RESET)) {
430                        cpu_reset(env);
431                    }
432#endif
433                    if (interrupt_request & CPU_INTERRUPT_HARD) {
434                        ppc_hw_interrupt(env);
435                        if (env->pending_interrupts == 0)
436                            env->interrupt_request &= ~CPU_INTERRUPT_HARD;
437                        next_tb = 0;
438                    }
439#elif defined(TARGET_MICROBLAZE)
440                    if ((interrupt_request & CPU_INTERRUPT_HARD)
441                        && (env->sregs[SR_MSR] & MSR_IE)
442                        && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
443                        && !(env->iflags & (D_FLAG | IMM_FLAG))) {
444                        env->exception_index = EXCP_IRQ;
445                        do_interrupt(env);
446                        next_tb = 0;
447                    }
448#elif defined(TARGET_MIPS)
449                    if ((interrupt_request & CPU_INTERRUPT_HARD) &&
450                        cpu_mips_hw_interrupts_pending(env) &&
451                        (env->CP0_Status & (1 << CP0St_IE)) &&
452                        !(env->CP0_Status & (1 << CP0St_EXL)) &&
453                        !(env->CP0_Status & (1 << CP0St_ERL)) &&
454                        !(env->hflags & MIPS_HFLAG_DM)) {
455                        /* Raise it */
456                        env->exception_index = EXCP_EXT_INTERRUPT;
457                        env->error_code = 0;
458                        do_interrupt(env);
459                        next_tb = 0;
460                    }
461#elif defined(TARGET_SPARC)
462                    if (interrupt_request & CPU_INTERRUPT_HARD) {
463                        if (cpu_interrupts_enabled(env) &&
464                            env->interrupt_index > 0) {
465                            int pil = env->interrupt_index & 0xf;
466                            int type = env->interrupt_index & 0xf0;
467
468                            if (((type == TT_EXTINT) &&
469                                  cpu_pil_allowed(env, pil)) ||
470                                  type != TT_EXTINT) {
471                                env->exception_index = env->interrupt_index;
472                                do_interrupt(env);
473                                next_tb = 0;
474                            }
475                        }
476		    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
477			//do_interrupt(0, 0, 0, 0, 0);
478			env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
479		    }
480#elif defined(TARGET_ARM)
481                    if (interrupt_request & CPU_INTERRUPT_FIQ
482                        && !(env->uncached_cpsr & CPSR_F)) {
483                        env->exception_index = EXCP_FIQ;
484                        do_interrupt(env);
485                        next_tb = 0;
486                    }
487                    /* ARMv7-M interrupt return works by loading a magic value
488                       into the PC.  On real hardware the load causes the
489                       return to occur.  The qemu implementation performs the
490                       jump normally, then does the exception return when the
491                       CPU tries to execute code at the magic address.
492                       This will cause the magic PC value to be pushed to
493                       the stack if an interrupt occured at the wrong time.
494                       We avoid this by disabling interrupts when
495                       pc contains a magic address.  */
496                    if (interrupt_request & CPU_INTERRUPT_HARD
497                        && ((IS_M(env) && env->regs[15] < 0xfffffff0)
498                            || !(env->uncached_cpsr & CPSR_I))) {
499                        env->exception_index = EXCP_IRQ;
500                        do_interrupt(env);
501                        next_tb = 0;
502                    }
503#elif defined(TARGET_SH4)
504                    if (interrupt_request & CPU_INTERRUPT_HARD) {
505                        do_interrupt(env);
506                        next_tb = 0;
507                    }
508#elif defined(TARGET_ALPHA)
509                    if (interrupt_request & CPU_INTERRUPT_HARD) {
510                        do_interrupt(env);
511                        next_tb = 0;
512                    }
513#elif defined(TARGET_CRIS)
514                    if (interrupt_request & CPU_INTERRUPT_HARD
515                        && (env->pregs[PR_CCS] & I_FLAG)
516                        && !env->locked_irq) {
517                        env->exception_index = EXCP_IRQ;
518                        do_interrupt(env);
519                        next_tb = 0;
520                    }
521                    if (interrupt_request & CPU_INTERRUPT_NMI
522                        && (env->pregs[PR_CCS] & M_FLAG)) {
523                        env->exception_index = EXCP_NMI;
524                        do_interrupt(env);
525                        next_tb = 0;
526                    }
527#elif defined(TARGET_M68K)
528                    if (interrupt_request & CPU_INTERRUPT_HARD
529                        && ((env->sr & SR_I) >> SR_I_SHIFT)
530                            < env->pending_level) {
531                        /* Real hardware gets the interrupt vector via an
532                           IACK cycle at this point.  Current emulated
533                           hardware doesn't rely on this, so we
534                           provide/save the vector when the interrupt is
535                           first signalled.  */
536                        env->exception_index = env->pending_vector;
537                        do_interrupt(1);
538                        next_tb = 0;
539                    }
540#endif
541                   /* Don't use the cached interupt_request value,
542                      do_interrupt may have updated the EXITTB flag. */
543                    if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
544                        env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
545                        /* ensure that no TB jump will be modified as
546                           the program flow was changed */
547                        next_tb = 0;
548                    }
549                }
550                if (unlikely(env->exit_request)) {
551                    env->exit_request = 0;
552                    env->exception_index = EXCP_INTERRUPT;
553                    cpu_loop_exit();
554                }
555#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
556                if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
557                    /* restore flags in standard format */
558#if defined(TARGET_I386)
559                    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
560                    log_cpu_state(env, X86_DUMP_CCOP);
561                    env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
562#elif defined(TARGET_M68K)
563                    cpu_m68k_flush_flags(env, env->cc_op);
564                    env->cc_op = CC_OP_FLAGS;
565                    env->sr = (env->sr & 0xffe0)
566                              | env->cc_dest | (env->cc_x << 4);
567                    log_cpu_state(env, 0);
568#else
569                    log_cpu_state(env, 0);
570#endif
571                }
572#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
573                spin_lock(&tb_lock);
574                tb = tb_find_fast();
575                /* Note: we do it here to avoid a gcc bug on Mac OS X when
576                   doing it in tb_find_slow */
577                if (tb_invalidated_flag) {
578                    /* as some TB could have been invalidated because
579                       of memory exceptions while generating the code, we
580                       must recompute the hash index here */
581                    next_tb = 0;
582                    tb_invalidated_flag = 0;
583                }
584#ifdef CONFIG_DEBUG_EXEC
585                qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
586                             (long)tb->tc_ptr, tb->pc,
587                             lookup_symbol(tb->pc));
588#endif
589                /* see if we can patch the calling TB. When the TB
590                   spans two pages, we cannot safely do a direct
591                   jump. */
592                if (next_tb != 0 && tb->page_addr[1] == -1) {
593                    tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
594                }
595                spin_unlock(&tb_lock);
596
597                /* cpu_interrupt might be called while translating the
598                   TB, but before it is linked into a potentially
599                   infinite loop and becomes env->current_tb. Avoid
600                   starting execution if there is a pending interrupt. */
601                env->current_tb = tb;
602                barrier();
603                if (likely(!env->exit_request)) {
604                    tc_ptr = tb->tc_ptr;
605                /* execute the generated code */
606#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
607#undef env
608                    env = cpu_single_env;
609#define env cpu_single_env
610#endif
611                    next_tb = tcg_qemu_tb_exec(tc_ptr);
612                    if ((next_tb & 3) == 2) {
613                        /* Instruction counter expired.  */
614                        int insns_left;
615                        tb = (TranslationBlock *)(long)(next_tb & ~3);
616                        /* Restore PC.  */
617                        cpu_pc_from_tb(env, tb);
618                        insns_left = env->icount_decr.u32;
619                        if (env->icount_extra && insns_left >= 0) {
620                            /* Refill decrementer and continue execution.  */
621                            env->icount_extra += insns_left;
622                            if (env->icount_extra > 0xffff) {
623                                insns_left = 0xffff;
624                            } else {
625                                insns_left = env->icount_extra;
626                            }
627                            env->icount_extra -= insns_left;
628                            env->icount_decr.u16.low = insns_left;
629                        } else {
630                            if (insns_left > 0) {
631                                /* Execute remaining instructions.  */
632                                cpu_exec_nocache(insns_left, tb);
633                            }
634                            env->exception_index = EXCP_INTERRUPT;
635                            next_tb = 0;
636                            cpu_loop_exit();
637                        }
638                    }
639                }
640                env->current_tb = NULL;
641                /* reset soft MMU for next block (it can currently
642                   only be set by a memory fault) */
643            } /* for(;;) */
644        }
645    } /* for(;;) */
646
647
648#if defined(TARGET_I386)
649    /* restore flags in standard format */
650    env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
651#elif defined(TARGET_ARM)
652    /* XXX: Save/restore host fpu exception state?.  */
653#elif defined(TARGET_SPARC)
654#elif defined(TARGET_PPC)
655#elif defined(TARGET_M68K)
656    cpu_m68k_flush_flags(env, env->cc_op);
657    env->cc_op = CC_OP_FLAGS;
658    env->sr = (env->sr & 0xffe0)
659              | env->cc_dest | (env->cc_x << 4);
660#elif defined(TARGET_MICROBLAZE)
661#elif defined(TARGET_MIPS)
662#elif defined(TARGET_SH4)
663#elif defined(TARGET_ALPHA)
664#elif defined(TARGET_CRIS)
665#elif defined(TARGET_S390X)
666    /* XXXXX */
667#else
668#error unsupported target CPU
669#endif
670
671    /* restore global registers */
672    barrier();
673    env = (void *) saved_env_reg;
674
675    /* fail safe : never use cpu_single_env outside cpu_exec() */
676    cpu_single_env = NULL;
677    return ret;
678}
679
680/* must only be called from the generated code as an exception can be
681   generated */
682void tb_invalidate_page_range(target_ulong start, target_ulong end)
683{
684    /* XXX: cannot enable it yet because it yields to MMU exception
685       where NIP != read address on PowerPC */
686#if 0
687    target_ulong phys_addr;
688    phys_addr = get_phys_addr_code(env, start);
689    tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
690#endif
691}
692
693#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
694
695void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
696{
697    CPUX86State *saved_env;
698
699    saved_env = env;
700    env = s;
701    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
702        selector &= 0xffff;
703        cpu_x86_load_seg_cache(env, seg_reg, selector,
704                               (selector << 4), 0xffff, 0);
705    } else {
706        helper_load_seg(seg_reg, selector);
707    }
708    env = saved_env;
709}
710
711void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
712{
713    CPUX86State *saved_env;
714
715    saved_env = env;
716    env = s;
717
718    helper_fsave(ptr, data32);
719
720    env = saved_env;
721}
722
723void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
724{
725    CPUX86State *saved_env;
726
727    saved_env = env;
728    env = s;
729
730    helper_frstor(ptr, data32);
731
732    env = saved_env;
733}
734
735#endif /* TARGET_I386 */
736
737#if !defined(CONFIG_SOFTMMU)
738
739#if defined(TARGET_I386)
740#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
741#else
742#define EXCEPTION_ACTION cpu_loop_exit()
743#endif
744
745/* 'pc' is the host PC at which the exception was raised. 'address' is
746   the effective address of the memory exception. 'is_write' is 1 if a
747   write caused the exception and otherwise 0'. 'old_set' is the
748   signal set which should be restored */
749static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
750                                    int is_write, sigset_t *old_set,
751                                    void *puc)
752{
753    TranslationBlock *tb;
754    int ret;
755
756    if (cpu_single_env)
757        env = cpu_single_env; /* XXX: find a correct solution for multithread */
758#if defined(DEBUG_SIGNAL)
759    qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
760                pc, address, is_write, *(unsigned long *)old_set);
761#endif
762    /* XXX: locking issue */
763    if (is_write && page_unprotect(h2g(address), pc, puc)) {
764        return 1;
765    }
766
767    /* see if it is an MMU fault */
768    ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
769    if (ret < 0)
770        return 0; /* not an MMU fault */
771    if (ret == 0)
772        return 1; /* the MMU fault was handled without causing real CPU fault */
773    /* now we have a real cpu fault */
774    tb = tb_find_pc(pc);
775    if (tb) {
776        /* the PC is inside the translated code. It means that we have
777           a virtual CPU fault */
778        cpu_restore_state(tb, env, pc);
779    }
780
781    /* we restore the process signal mask as the sigreturn should
782       do it (XXX: use sigsetjmp) */
783    sigprocmask(SIG_SETMASK, old_set, NULL);
784    EXCEPTION_ACTION;
785
786    /* never comes here */
787    return 1;
788}
789
790#if defined(__i386__)
791
792#if defined(__APPLE__)
793# include <sys/ucontext.h>
794
795# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
796# define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
797# define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
798# define MASK_sig(context)    ((context)->uc_sigmask)
799#elif defined (__NetBSD__)
800# include <ucontext.h>
801
802# define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
803# define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
804# define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
805# define MASK_sig(context)    ((context)->uc_sigmask)
806#elif defined (__FreeBSD__) || defined(__DragonFly__)
807# include <ucontext.h>
808
809# define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
810# define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
811# define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
812# define MASK_sig(context)    ((context)->uc_sigmask)
813#elif defined(__OpenBSD__)
814# define EIP_sig(context)     ((context)->sc_eip)
815# define TRAP_sig(context)    ((context)->sc_trapno)
816# define ERROR_sig(context)   ((context)->sc_err)
817# define MASK_sig(context)    ((context)->sc_mask)
818#else
819# define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
820# define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
821# define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
822# define MASK_sig(context)    ((context)->uc_sigmask)
823#endif
824
825int cpu_signal_handler(int host_signum, void *pinfo,
826                       void *puc)
827{
828    siginfo_t *info = pinfo;
829#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
830    ucontext_t *uc = puc;
831#elif defined(__OpenBSD__)
832    struct sigcontext *uc = puc;
833#else
834    struct ucontext *uc = puc;
835#endif
836    unsigned long pc;
837    int trapno;
838
839#ifndef REG_EIP
840/* for glibc 2.1 */
841#define REG_EIP    EIP
842#define REG_ERR    ERR
843#define REG_TRAPNO TRAPNO
844#endif
845    pc = EIP_sig(uc);
846    trapno = TRAP_sig(uc);
847    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
848                             trapno == 0xe ?
849                             (ERROR_sig(uc) >> 1) & 1 : 0,
850                             &MASK_sig(uc), puc);
851}
852
853#elif defined(__x86_64__)
854
855#ifdef __NetBSD__
856#define PC_sig(context)       _UC_MACHINE_PC(context)
857#define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
858#define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
859#define MASK_sig(context)     ((context)->uc_sigmask)
860#elif defined(__OpenBSD__)
861#define PC_sig(context)       ((context)->sc_rip)
862#define TRAP_sig(context)     ((context)->sc_trapno)
863#define ERROR_sig(context)    ((context)->sc_err)
864#define MASK_sig(context)     ((context)->sc_mask)
865#elif defined (__FreeBSD__) || defined(__DragonFly__)
866#include <ucontext.h>
867
868#define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
869#define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
870#define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
871#define MASK_sig(context)     ((context)->uc_sigmask)
872#else
873#define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
874#define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
875#define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
876#define MASK_sig(context)     ((context)->uc_sigmask)
877#endif
878
879int cpu_signal_handler(int host_signum, void *pinfo,
880                       void *puc)
881{
882    siginfo_t *info = pinfo;
883    unsigned long pc;
884#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
885    ucontext_t *uc = puc;
886#elif defined(__OpenBSD__)
887    struct sigcontext *uc = puc;
888#else
889    struct ucontext *uc = puc;
890#endif
891
892    pc = PC_sig(uc);
893    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
894                             TRAP_sig(uc) == 0xe ?
895                             (ERROR_sig(uc) >> 1) & 1 : 0,
896                             &MASK_sig(uc), puc);
897}
898
899#elif defined(_ARCH_PPC)
900
901/***********************************************************************
902 * signal context platform-specific definitions
903 * From Wine
904 */
905#ifdef linux
906/* All Registers access - only for local access */
907# define REG_sig(reg_name, context)		((context)->uc_mcontext.regs->reg_name)
908/* Gpr Registers access  */
909# define GPR_sig(reg_num, context)		REG_sig(gpr[reg_num], context)
910# define IAR_sig(context)			REG_sig(nip, context)	/* Program counter */
911# define MSR_sig(context)			REG_sig(msr, context)   /* Machine State Register (Supervisor) */
912# define CTR_sig(context)			REG_sig(ctr, context)   /* Count register */
913# define XER_sig(context)			REG_sig(xer, context) /* User's integer exception register */
914# define LR_sig(context)			REG_sig(link, context) /* Link register */
915# define CR_sig(context)			REG_sig(ccr, context) /* Condition register */
916/* Float Registers access  */
917# define FLOAT_sig(reg_num, context)		(((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
918# define FPSCR_sig(context)			(*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
919/* Exception Registers access */
920# define DAR_sig(context)			REG_sig(dar, context)
921# define DSISR_sig(context)			REG_sig(dsisr, context)
922# define TRAP_sig(context)			REG_sig(trap, context)
923#endif /* linux */
924
925#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
926#include <ucontext.h>
927# define IAR_sig(context)		((context)->uc_mcontext.mc_srr0)
928# define MSR_sig(context)		((context)->uc_mcontext.mc_srr1)
929# define CTR_sig(context)		((context)->uc_mcontext.mc_ctr)
930# define XER_sig(context)		((context)->uc_mcontext.mc_xer)
931# define LR_sig(context)		((context)->uc_mcontext.mc_lr)
932# define CR_sig(context)		((context)->uc_mcontext.mc_cr)
933/* Exception Registers access */
934# define DAR_sig(context)		((context)->uc_mcontext.mc_dar)
935# define DSISR_sig(context)		((context)->uc_mcontext.mc_dsisr)
936# define TRAP_sig(context)		((context)->uc_mcontext.mc_exc)
937#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
938
939#ifdef __APPLE__
940# include <sys/ucontext.h>
941typedef struct ucontext SIGCONTEXT;
942/* All Registers access - only for local access */
943# define REG_sig(reg_name, context)		((context)->uc_mcontext->ss.reg_name)
944# define FLOATREG_sig(reg_name, context)	((context)->uc_mcontext->fs.reg_name)
945# define EXCEPREG_sig(reg_name, context)	((context)->uc_mcontext->es.reg_name)
946# define VECREG_sig(reg_name, context)		((context)->uc_mcontext->vs.reg_name)
947/* Gpr Registers access */
948# define GPR_sig(reg_num, context)		REG_sig(r##reg_num, context)
949# define IAR_sig(context)			REG_sig(srr0, context)	/* Program counter */
950# define MSR_sig(context)			REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
951# define CTR_sig(context)			REG_sig(ctr, context)
952# define XER_sig(context)			REG_sig(xer, context) /* Link register */
953# define LR_sig(context)			REG_sig(lr, context)  /* User's integer exception register */
954# define CR_sig(context)			REG_sig(cr, context)  /* Condition register */
955/* Float Registers access */
956# define FLOAT_sig(reg_num, context)		FLOATREG_sig(fpregs[reg_num], context)
957# define FPSCR_sig(context)			((double)FLOATREG_sig(fpscr, context))
958/* Exception Registers access */
959# define DAR_sig(context)			EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
960# define DSISR_sig(context)			EXCEPREG_sig(dsisr, context)
961# define TRAP_sig(context)			EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
962#endif /* __APPLE__ */
963
964int cpu_signal_handler(int host_signum, void *pinfo,
965                       void *puc)
966{
967    siginfo_t *info = pinfo;
968#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
969    ucontext_t *uc = puc;
970#else
971    struct ucontext *uc = puc;
972#endif
973    unsigned long pc;
974    int is_write;
975
976    pc = IAR_sig(uc);
977    is_write = 0;
978#if 0
979    /* ppc 4xx case */
980    if (DSISR_sig(uc) & 0x00800000)
981        is_write = 1;
982#else
983    if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
984        is_write = 1;
985#endif
986    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
987                             is_write, &uc->uc_sigmask, puc);
988}
989
990#elif defined(__alpha__)
991
992int cpu_signal_handler(int host_signum, void *pinfo,
993                           void *puc)
994{
995    siginfo_t *info = pinfo;
996    struct ucontext *uc = puc;
997    uint32_t *pc = uc->uc_mcontext.sc_pc;
998    uint32_t insn = *pc;
999    int is_write = 0;
1000
1001    /* XXX: need kernel patch to get write flag faster */
1002    switch (insn >> 26) {
1003    case 0x0d: // stw
1004    case 0x0e: // stb
1005    case 0x0f: // stq_u
1006    case 0x24: // stf
1007    case 0x25: // stg
1008    case 0x26: // sts
1009    case 0x27: // stt
1010    case 0x2c: // stl
1011    case 0x2d: // stq
1012    case 0x2e: // stl_c
1013    case 0x2f: // stq_c
1014	is_write = 1;
1015    }
1016
1017    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1018                             is_write, &uc->uc_sigmask, puc);
1019}
1020#elif defined(__sparc__)
1021
1022int cpu_signal_handler(int host_signum, void *pinfo,
1023                       void *puc)
1024{
1025    siginfo_t *info = pinfo;
1026    int is_write;
1027    uint32_t insn;
1028#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1029    uint32_t *regs = (uint32_t *)(info + 1);
1030    void *sigmask = (regs + 20);
1031    /* XXX: is there a standard glibc define ? */
1032    unsigned long pc = regs[1];
1033#else
1034#ifdef __linux__
1035    struct sigcontext *sc = puc;
1036    unsigned long pc = sc->sigc_regs.tpc;
1037    void *sigmask = (void *)sc->sigc_mask;
1038#elif defined(__OpenBSD__)
1039    struct sigcontext *uc = puc;
1040    unsigned long pc = uc->sc_pc;
1041    void *sigmask = (void *)(long)uc->sc_mask;
1042#endif
1043#endif
1044
1045    /* XXX: need kernel patch to get write flag faster */
1046    is_write = 0;
1047    insn = *(uint32_t *)pc;
1048    if ((insn >> 30) == 3) {
1049      switch((insn >> 19) & 0x3f) {
1050      case 0x05: // stb
1051      case 0x15: // stba
1052      case 0x06: // sth
1053      case 0x16: // stha
1054      case 0x04: // st
1055      case 0x14: // sta
1056      case 0x07: // std
1057      case 0x17: // stda
1058      case 0x0e: // stx
1059      case 0x1e: // stxa
1060      case 0x24: // stf
1061      case 0x34: // stfa
1062      case 0x27: // stdf
1063      case 0x37: // stdfa
1064      case 0x26: // stqf
1065      case 0x36: // stqfa
1066      case 0x25: // stfsr
1067      case 0x3c: // casa
1068      case 0x3e: // casxa
1069	is_write = 1;
1070	break;
1071      }
1072    }
1073    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1074                             is_write, sigmask, NULL);
1075}
1076
1077#elif defined(__arm__)
1078
1079int cpu_signal_handler(int host_signum, void *pinfo,
1080                       void *puc)
1081{
1082    siginfo_t *info = pinfo;
1083    struct ucontext *uc = puc;
1084    unsigned long pc;
1085    int is_write;
1086
1087#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1088    pc = uc->uc_mcontext.gregs[R15];
1089#else
1090    pc = uc->uc_mcontext.arm_pc;
1091#endif
1092    /* XXX: compute is_write */
1093    is_write = 0;
1094    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1095                             is_write,
1096                             &uc->uc_sigmask, puc);
1097}
1098
1099#elif defined(__mc68000)
1100
1101int cpu_signal_handler(int host_signum, void *pinfo,
1102                       void *puc)
1103{
1104    siginfo_t *info = pinfo;
1105    struct ucontext *uc = puc;
1106    unsigned long pc;
1107    int is_write;
1108
1109    pc = uc->uc_mcontext.gregs[16];
1110    /* XXX: compute is_write */
1111    is_write = 0;
1112    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1113                             is_write,
1114                             &uc->uc_sigmask, puc);
1115}
1116
1117#elif defined(__ia64)
1118
1119#ifndef __ISR_VALID
1120  /* This ought to be in <bits/siginfo.h>... */
1121# define __ISR_VALID	1
1122#endif
1123
1124int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1125{
1126    siginfo_t *info = pinfo;
1127    struct ucontext *uc = puc;
1128    unsigned long ip;
1129    int is_write = 0;
1130
1131    ip = uc->uc_mcontext.sc_ip;
1132    switch (host_signum) {
1133      case SIGILL:
1134      case SIGFPE:
1135      case SIGSEGV:
1136      case SIGBUS:
1137      case SIGTRAP:
1138	  if (info->si_code && (info->si_segvflags & __ISR_VALID))
1139	      /* ISR.W (write-access) is bit 33:  */
1140	      is_write = (info->si_isr >> 33) & 1;
1141	  break;
1142
1143      default:
1144	  break;
1145    }
1146    return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1147                             is_write,
1148                             (sigset_t *)&uc->uc_sigmask, puc);
1149}
1150
1151#elif defined(__s390__)
1152
1153int cpu_signal_handler(int host_signum, void *pinfo,
1154                       void *puc)
1155{
1156    siginfo_t *info = pinfo;
1157    struct ucontext *uc = puc;
1158    unsigned long pc;
1159    uint16_t *pinsn;
1160    int is_write = 0;
1161
1162    pc = uc->uc_mcontext.psw.addr;
1163
1164    /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1165       of the normal 2 arguments.  The 3rd argument contains the "int_code"
1166       from the hardware which does in fact contain the is_write value.
1167       The rt signal handler, as far as I can tell, does not give this value
1168       at all.  Not that we could get to it from here even if it were.  */
1169    /* ??? This is not even close to complete, since it ignores all
1170       of the read-modify-write instructions.  */
1171    pinsn = (uint16_t *)pc;
1172    switch (pinsn[0] >> 8) {
1173    case 0x50: /* ST */
1174    case 0x42: /* STC */
1175    case 0x40: /* STH */
1176        is_write = 1;
1177        break;
1178    case 0xc4: /* RIL format insns */
1179        switch (pinsn[0] & 0xf) {
1180        case 0xf: /* STRL */
1181        case 0xb: /* STGRL */
1182        case 0x7: /* STHRL */
1183            is_write = 1;
1184        }
1185        break;
1186    case 0xe3: /* RXY format insns */
1187        switch (pinsn[2] & 0xff) {
1188        case 0x50: /* STY */
1189        case 0x24: /* STG */
1190        case 0x72: /* STCY */
1191        case 0x70: /* STHY */
1192        case 0x8e: /* STPQ */
1193        case 0x3f: /* STRVH */
1194        case 0x3e: /* STRV */
1195        case 0x2f: /* STRVG */
1196            is_write = 1;
1197        }
1198        break;
1199    }
1200    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1201                             is_write, &uc->uc_sigmask, puc);
1202}
1203
1204#elif defined(__mips__)
1205
1206int cpu_signal_handler(int host_signum, void *pinfo,
1207                       void *puc)
1208{
1209    siginfo_t *info = pinfo;
1210    struct ucontext *uc = puc;
1211    greg_t pc = uc->uc_mcontext.pc;
1212    int is_write;
1213
1214    /* XXX: compute is_write */
1215    is_write = 0;
1216    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1217                             is_write, &uc->uc_sigmask, puc);
1218}
1219
1220#elif defined(__hppa__)
1221
1222int cpu_signal_handler(int host_signum, void *pinfo,
1223                       void *puc)
1224{
1225    struct siginfo *info = pinfo;
1226    struct ucontext *uc = puc;
1227    unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1228    uint32_t insn = *(uint32_t *)pc;
1229    int is_write = 0;
1230
1231    /* XXX: need kernel patch to get write flag faster.  */
1232    switch (insn >> 26) {
1233    case 0x1a: /* STW */
1234    case 0x19: /* STH */
1235    case 0x18: /* STB */
1236    case 0x1b: /* STWM */
1237        is_write = 1;
1238        break;
1239
1240    case 0x09: /* CSTWX, FSTWX, FSTWS */
1241    case 0x0b: /* CSTDX, FSTDX, FSTDS */
1242        /* Distinguish from coprocessor load ... */
1243        is_write = (insn >> 9) & 1;
1244        break;
1245
1246    case 0x03:
1247        switch ((insn >> 6) & 15) {
1248        case 0xa: /* STWS */
1249        case 0x9: /* STHS */
1250        case 0x8: /* STBS */
1251        case 0xe: /* STWAS */
1252        case 0xc: /* STBYS */
1253            is_write = 1;
1254        }
1255        break;
1256    }
1257
1258    return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1259                             is_write, &uc->uc_sigmask, puc);
1260}
1261
1262#else
1263
1264#error host CPU specific signal handler needed
1265
1266#endif
1267
1268#endif /* !defined(CONFIG_SOFTMMU) */
1269