1#include <stdio.h>
2#include <stdlib.h>
3#include <string.h>
4
5#include "cpu.h"
6#include "exec-all.h"
7#include "gdbstub.h"
8#include "helper.h"
9#include "qemu-common.h"
10#include "host-utils.h"
11#if !defined(CONFIG_USER_ONLY)
12//#include "hw/loader.h"
13#ifdef CONFIG_TRACE
14#include "android-trace.h"
15#endif
16#endif
17
18static uint32_t cortexa9_cp15_c0_c1[8] =
19{ 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
20
21static uint32_t cortexa9_cp15_c0_c2[8] =
22{ 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
23
24static uint32_t cortexa8_cp15_c0_c1[8] =
25{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
26
27static uint32_t cortexa8_cp15_c0_c2[8] =
28{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
29
30static uint32_t cortexa8r2_cp15_c0_c2[8] =
31{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00011142, 0, 0, 0 };
32
33static uint32_t mpcore_cp15_c0_c1[8] =
34{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
35
36static uint32_t mpcore_cp15_c0_c2[8] =
37{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
38
39static uint32_t arm1136_cp15_c0_c1[8] =
40{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
41
42static uint32_t arm1136_cp15_c0_c2[8] =
43{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
44
45static uint32_t cpu_arm_find_by_name(const char *name);
46
47static inline void set_feature(CPUARMState *env, int feature)
48{
49    env->features |= 1u << feature;
50}
51
52static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
53{
54    env->cp15.c0_cpuid = id;
55    switch (id) {
56    case ARM_CPUID_ARM926:
57        set_feature(env, ARM_FEATURE_V4T);
58        set_feature(env, ARM_FEATURE_V5);
59        set_feature(env, ARM_FEATURE_VFP);
60        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
61        env->cp15.c0_cachetype = 0x1dd20d2;
62        env->cp15.c1_sys = 0x00090078;
63        break;
64    case ARM_CPUID_ARM946:
65        set_feature(env, ARM_FEATURE_V4T);
66        set_feature(env, ARM_FEATURE_V5);
67        set_feature(env, ARM_FEATURE_MPU);
68        env->cp15.c0_cachetype = 0x0f004006;
69        env->cp15.c1_sys = 0x00000078;
70        break;
71    case ARM_CPUID_ARM1026:
72        set_feature(env, ARM_FEATURE_V4T);
73        set_feature(env, ARM_FEATURE_V5);
74        set_feature(env, ARM_FEATURE_VFP);
75        set_feature(env, ARM_FEATURE_AUXCR);
76        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
77        env->cp15.c0_cachetype = 0x1dd20d2;
78        env->cp15.c1_sys = 0x00090078;
79        break;
80    case ARM_CPUID_ARM1136_R2:
81    case ARM_CPUID_ARM1136:
82        set_feature(env, ARM_FEATURE_V4T);
83        set_feature(env, ARM_FEATURE_V5);
84        set_feature(env, ARM_FEATURE_V6);
85        set_feature(env, ARM_FEATURE_VFP);
86        set_feature(env, ARM_FEATURE_AUXCR);
87        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
88        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
89        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
90        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
91        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
92        env->cp15.c0_cachetype = 0x1dd20d2;
93        env->cp15.c1_sys = 0x00050078;
94        break;
95    case ARM_CPUID_ARM11MPCORE:
96        set_feature(env, ARM_FEATURE_V4T);
97        set_feature(env, ARM_FEATURE_V5);
98        set_feature(env, ARM_FEATURE_V6);
99        set_feature(env, ARM_FEATURE_V6K);
100        set_feature(env, ARM_FEATURE_VFP);
101        set_feature(env, ARM_FEATURE_AUXCR);
102        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
103        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
104        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
105        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
106        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
107        env->cp15.c0_cachetype = 0x1dd20d2;
108        break;
109    case ARM_CPUID_CORTEXA8:
110        set_feature(env, ARM_FEATURE_V4T);
111        set_feature(env, ARM_FEATURE_V5);
112        set_feature(env, ARM_FEATURE_V6);
113        set_feature(env, ARM_FEATURE_V6K);
114        set_feature(env, ARM_FEATURE_V7);
115        set_feature(env, ARM_FEATURE_AUXCR);
116        set_feature(env, ARM_FEATURE_THUMB2);
117        set_feature(env, ARM_FEATURE_VFP);
118        set_feature(env, ARM_FEATURE_VFP3);
119        set_feature(env, ARM_FEATURE_NEON);
120        set_feature(env, ARM_FEATURE_THUMB2EE);
121        set_feature(env, ARM_FEATURE_TRUSTZONE);
122        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
123        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
124        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
125        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
126        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
127        env->cp15.c0_cachetype = 0x82048004;
128        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
129        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
130        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
131        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
132        env->cp15.c1_sys = 0x00c50078;
133        break;
134    case ARM_CPUID_CORTEXA8_R2:
135        set_feature(env, ARM_FEATURE_V4T);
136        set_feature(env, ARM_FEATURE_V5);
137        set_feature(env, ARM_FEATURE_V6);
138        set_feature(env, ARM_FEATURE_V6K);
139        set_feature(env, ARM_FEATURE_V7);
140        set_feature(env, ARM_FEATURE_AUXCR);
141        set_feature(env, ARM_FEATURE_THUMB2);
142        set_feature(env, ARM_FEATURE_VFP);
143        set_feature(env, ARM_FEATURE_VFP3);
144        set_feature(env, ARM_FEATURE_NEON);
145        set_feature(env, ARM_FEATURE_THUMB2EE);
146        set_feature(env, ARM_FEATURE_TRUSTZONE);
147        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c2;
148        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
149        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011111;
150        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
151        memcpy(env->cp15.c0_c2, cortexa8r2_cp15_c0_c2, 8 * sizeof(uint32_t));
152        env->cp15.c0_cachetype = 0x82048004;
153        env->cp15.c0_clid = (1 << 27) | (2 << 24) | (4 << 3) | 3;
154        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
155        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
156        env->cp15.c0_ccsid[2] = 0xf03fe03a; /* 256k L2 cache. */
157        env->cp15.c1_sys = 0x00c50078;
158        break;
159    case ARM_CPUID_CORTEXA9:
160        set_feature(env, ARM_FEATURE_V4T);
161        set_feature(env, ARM_FEATURE_V5);
162        set_feature(env, ARM_FEATURE_V6);
163        set_feature(env, ARM_FEATURE_V6K);
164        set_feature(env, ARM_FEATURE_V7);
165        set_feature(env, ARM_FEATURE_AUXCR);
166        set_feature(env, ARM_FEATURE_THUMB2);
167        set_feature(env, ARM_FEATURE_VFP);
168        set_feature(env, ARM_FEATURE_VFP3);
169        set_feature(env, ARM_FEATURE_VFP_FP16);
170        set_feature(env, ARM_FEATURE_NEON);
171        set_feature(env, ARM_FEATURE_THUMB2EE);
172        /* Note that A9 supports the MP extensions even for
173         * A9UP and single-core A9MP (which are both different
174         * and valid configurations; we don't model A9UP).
175         */
176        set_feature(env, ARM_FEATURE_V7MP);
177        set_feature(env, ARM_FEATURE_TRUSTZONE);
178        env->vfp.xregs[ARM_VFP_FPSID] = 0x41034000; /* Guess */
179        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
180        env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
181        memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
182        memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
183        env->cp15.c0_cachetype = 0x80038003;
184        env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
185        env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
186        env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
187        env->cp15.c1_sys = 0x00c50078;
188        break;
189    case ARM_CPUID_CORTEXM3:
190        set_feature(env, ARM_FEATURE_V4T);
191        set_feature(env, ARM_FEATURE_V5);
192        set_feature(env, ARM_FEATURE_V6);
193        set_feature(env, ARM_FEATURE_THUMB2);
194        set_feature(env, ARM_FEATURE_V7);
195        set_feature(env, ARM_FEATURE_M);
196        set_feature(env, ARM_FEATURE_DIV);
197        break;
198    case ARM_CPUID_ANY: /* For userspace emulation.  */
199        set_feature(env, ARM_FEATURE_V4T);
200        set_feature(env, ARM_FEATURE_V5);
201        set_feature(env, ARM_FEATURE_V6);
202        set_feature(env, ARM_FEATURE_V6K);
203        set_feature(env, ARM_FEATURE_V7);
204        set_feature(env, ARM_FEATURE_THUMB2);
205        set_feature(env, ARM_FEATURE_VFP);
206        set_feature(env, ARM_FEATURE_VFP3);
207        set_feature(env, ARM_FEATURE_VFP_FP16);
208        set_feature(env, ARM_FEATURE_NEON);
209        set_feature(env, ARM_FEATURE_THUMB2EE);
210        set_feature(env, ARM_FEATURE_DIV);
211        set_feature(env, ARM_FEATURE_V7MP);
212        break;
213    case ARM_CPUID_TI915T:
214    case ARM_CPUID_TI925T:
215        set_feature(env, ARM_FEATURE_V4T);
216        set_feature(env, ARM_FEATURE_OMAPCP);
217        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
218        env->cp15.c0_cachetype = 0x5109149;
219        env->cp15.c1_sys = 0x00000070;
220        env->cp15.c15_i_max = 0x000;
221        env->cp15.c15_i_min = 0xff0;
222        break;
223    case ARM_CPUID_PXA250:
224    case ARM_CPUID_PXA255:
225    case ARM_CPUID_PXA260:
226    case ARM_CPUID_PXA261:
227    case ARM_CPUID_PXA262:
228        set_feature(env, ARM_FEATURE_V4T);
229        set_feature(env, ARM_FEATURE_V5);
230        set_feature(env, ARM_FEATURE_XSCALE);
231        /* JTAG_ID is ((id << 28) | 0x09265013) */
232        env->cp15.c0_cachetype = 0xd172172;
233        env->cp15.c1_sys = 0x00000078;
234        break;
235    case ARM_CPUID_PXA270_A0:
236    case ARM_CPUID_PXA270_A1:
237    case ARM_CPUID_PXA270_B0:
238    case ARM_CPUID_PXA270_B1:
239    case ARM_CPUID_PXA270_C0:
240    case ARM_CPUID_PXA270_C5:
241        set_feature(env, ARM_FEATURE_V4T);
242        set_feature(env, ARM_FEATURE_V5);
243        set_feature(env, ARM_FEATURE_XSCALE);
244        /* JTAG_ID is ((id << 28) | 0x09265013) */
245        set_feature(env, ARM_FEATURE_IWMMXT);
246        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
247        env->cp15.c0_cachetype = 0xd172172;
248        env->cp15.c1_sys = 0x00000078;
249        break;
250    case ARM_CPUID_SA1100:
251    case ARM_CPUID_SA1110:
252        set_feature(env, ARM_FEATURE_STRONGARM);
253        env->cp15.c1_sys = 0x00000070;
254        break;
255    default:
256        cpu_abort(env, "Bad CPU ID: %x\n", id);
257        break;
258    }
259}
260
261void cpu_reset(CPUARMState *env)
262{
263    uint32_t id;
264
265    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
266        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
267        log_cpu_state(env, 0);
268    }
269
270    id = env->cp15.c0_cpuid;
271    memset(env, 0, offsetof(CPUARMState, breakpoints));
272    if (id)
273        cpu_reset_model_id(env, id);
274    /* DBGDIDR : we implement nothing, and just mirror the main ID
275     * register's Variant and Revision fields.
276     */
277    env->cp14_dbgdidr = (id >> 16 & 0xf0) | 0xf;
278#if defined (CONFIG_USER_ONLY)
279    env->uncached_cpsr = ARM_CPU_MODE_USR;
280    /* For user mode we must enable access to coprocessors */
281    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
282    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
283        env->cp15.c15_cpar = 3;
284    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
285        env->cp15.c15_cpar = 1;
286    }
287#else
288    /* SVC mode with interrupts disabled.  */
289    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
290    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
291       clear at reset.  Initial SP and PC are loaded from ROM.  */
292    if (IS_M(env)) {
293        uint32_t pc;
294        uint8_t *rom;
295        env->uncached_cpsr &= ~CPSR_I;
296#ifndef CONFIG_ANDROID  /* No hw/loader.h and no ROM support for now on Android */
297        rom = rom_ptr(0);
298        if (rom) {
299            /* We should really use ldl_phys here, in case the guest
300               modified flash and reset itself.  However images
301               loaded via -kenrel have not been copied yet, so load the
302               values directly from there.  */
303            env->regs[13] = ldl_p(rom);
304            pc = ldl_p(rom + 4);
305            env->thumb = pc & 1;
306            env->regs[15] = pc & ~1;
307        }
308#endif
309    }
310    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
311    env->cp15.c2_base_mask = 0xffffc000u;
312#endif
313    set_flush_to_zero(1, &env->vfp.standard_fp_status);
314    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
315    set_default_nan_mode(1, &env->vfp.standard_fp_status);
316    set_float_detect_tininess(float_tininess_before_rounding,
317                              &env->vfp.fp_status);
318    set_float_detect_tininess(float_tininess_before_rounding,
319                              &env->vfp.standard_fp_status);
320    tlb_flush(env, 1);
321}
322
323static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
324{
325    int nregs;
326
327    /* VFP data registers are always little-endian.  */
328    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
329    if (reg < nregs) {
330        stfq_le_p(buf, env->vfp.regs[reg]);
331        return 8;
332    }
333    if (arm_feature(env, ARM_FEATURE_NEON)) {
334        /* Aliases for Q regs.  */
335        nregs += 16;
336        if (reg < nregs) {
337            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
338            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
339            return 16;
340        }
341    }
342    switch (reg - nregs) {
343    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
344    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
345    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
346    }
347    return 0;
348}
349
350static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
351{
352    int nregs;
353
354    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
355    if (reg < nregs) {
356        env->vfp.regs[reg] = ldfq_le_p(buf);
357        return 8;
358    }
359    if (arm_feature(env, ARM_FEATURE_NEON)) {
360        nregs += 16;
361        if (reg < nregs) {
362            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
363            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
364            return 16;
365        }
366    }
367    switch (reg - nregs) {
368    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
369    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
370    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
371    }
372    return 0;
373}
374
375CPUARMState *cpu_arm_init(const char *cpu_model)
376{
377    CPUARMState *env;
378    uint32_t id;
379    static int inited = 0;
380
381    id = cpu_arm_find_by_name(cpu_model);
382    if (id == 0)
383        return NULL;
384    env = qemu_mallocz(sizeof(CPUARMState));
385    cpu_exec_init(env);
386    if (!inited) {
387        inited = 1;
388        arm_translate_init();
389    }
390
391    env->cpu_model_str = cpu_model;
392    env->cp15.c0_cpuid = id;
393    cpu_reset(env);
394    if (arm_feature(env, ARM_FEATURE_NEON)) {
395        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
396                                 51, "arm-neon.xml", 0);
397    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
398        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
399                                 35, "arm-vfp3.xml", 0);
400    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
401        gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
402                                 19, "arm-vfp.xml", 0);
403    }
404    qemu_init_vcpu(env);
405    return env;
406}
407
408struct arm_cpu_t {
409    uint32_t id;
410    const char *name;
411};
412
413static const struct arm_cpu_t arm_cpu_names[] = {
414    { ARM_CPUID_ARM926, "arm926"},
415    { ARM_CPUID_ARM946, "arm946"},
416    { ARM_CPUID_ARM1026, "arm1026"},
417    { ARM_CPUID_ARM1136, "arm1136"},
418    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
419    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
420    { ARM_CPUID_CORTEXM3, "cortex-m3"},
421    { ARM_CPUID_CORTEXA8, "cortex-a8"},
422    { ARM_CPUID_CORTEXA8_R2, "cortex-a8-r2"},
423    { ARM_CPUID_CORTEXA9, "cortex-a9"},
424    { ARM_CPUID_TI925T, "ti925t" },
425    { ARM_CPUID_PXA250, "pxa250" },
426    { ARM_CPUID_SA1100,    "sa1100" },
427    { ARM_CPUID_SA1110,    "sa1110" },
428    { ARM_CPUID_PXA255, "pxa255" },
429    { ARM_CPUID_PXA260, "pxa260" },
430    { ARM_CPUID_PXA261, "pxa261" },
431    { ARM_CPUID_PXA262, "pxa262" },
432    { ARM_CPUID_PXA270, "pxa270" },
433    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
434    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
435    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
436    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
437    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
438    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
439    { ARM_CPUID_ANY, "any"},
440    { 0, NULL}
441};
442
443void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
444{
445    int i;
446
447    (*cpu_fprintf)(f, "Available CPUs:\n");
448    for (i = 0; arm_cpu_names[i].name; i++) {
449        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
450    }
451}
452
453/* return 0 if not found */
454static uint32_t cpu_arm_find_by_name(const char *name)
455{
456    int i;
457    uint32_t id;
458
459    id = 0;
460    for (i = 0; arm_cpu_names[i].name; i++) {
461        if (strcmp(name, arm_cpu_names[i].name) == 0) {
462            id = arm_cpu_names[i].id;
463            break;
464        }
465    }
466    return id;
467}
468
469void cpu_arm_close(CPUARMState *env)
470{
471    free(env);
472}
473
474uint32_t cpsr_read(CPUARMState *env)
475{
476    int ZF;
477    ZF = (env->ZF == 0);
478    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
479        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
480        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
481        | ((env->condexec_bits & 0xfc) << 8)
482        | (env->GE << 16);
483}
484
485void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
486{
487    if (mask & CPSR_NZCV) {
488        env->ZF = (~val) & CPSR_Z;
489        env->NF = val;
490        env->CF = (val >> 29) & 1;
491        env->VF = (val << 3) & 0x80000000;
492    }
493    if (mask & CPSR_Q)
494        env->QF = ((val & CPSR_Q) != 0);
495    if (mask & CPSR_T)
496        env->thumb = ((val & CPSR_T) != 0);
497    if (mask & CPSR_IT_0_1) {
498        env->condexec_bits &= ~3;
499        env->condexec_bits |= (val >> 25) & 3;
500    }
501    if (mask & CPSR_IT_2_7) {
502        env->condexec_bits &= 3;
503        env->condexec_bits |= (val >> 8) & 0xfc;
504    }
505    if (mask & CPSR_GE) {
506        env->GE = (val >> 16) & 0xf;
507    }
508
509    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
510        switch_mode(env, val & CPSR_M);
511    }
512    mask &= ~CACHED_CPSR_BITS;
513    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
514}
515
516/* Sign/zero extend */
517uint32_t HELPER(sxtb16)(uint32_t x)
518{
519    uint32_t res;
520    res = (uint16_t)(int8_t)x;
521    res |= (uint32_t)(int8_t)(x >> 16) << 16;
522    return res;
523}
524
525uint32_t HELPER(uxtb16)(uint32_t x)
526{
527    uint32_t res;
528    res = (uint16_t)(uint8_t)x;
529    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
530    return res;
531}
532
533uint32_t HELPER(clz)(uint32_t x)
534{
535    return clz32(x);
536}
537
538int32_t HELPER(sdiv)(int32_t num, int32_t den)
539{
540    if (den == 0)
541      return 0;
542    if (num == INT_MIN && den == -1)
543      return INT_MIN;
544    return num / den;
545}
546
547uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
548{
549    if (den == 0)
550      return 0;
551    return num / den;
552}
553
554uint32_t HELPER(rbit)(uint32_t x)
555{
556    x =  ((x & 0xff000000) >> 24)
557       | ((x & 0x00ff0000) >> 8)
558       | ((x & 0x0000ff00) << 8)
559       | ((x & 0x000000ff) << 24);
560    x =  ((x & 0xf0f0f0f0) >> 4)
561       | ((x & 0x0f0f0f0f) << 4);
562    x =  ((x & 0x88888888) >> 3)
563       | ((x & 0x44444444) >> 1)
564       | ((x & 0x22222222) << 1)
565       | ((x & 0x11111111) << 3);
566    return x;
567}
568
569uint32_t HELPER(abs)(uint32_t x)
570{
571    return ((int32_t)x < 0) ? -x : x;
572}
573
574#if defined(CONFIG_USER_ONLY)
575
576void do_interrupt (CPUState *env)
577{
578    env->exception_index = -1;
579}
580
581int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
582                              int mmu_idx, int is_softmmu)
583{
584    if (rw == 2) {
585        env->exception_index = EXCP_PREFETCH_ABORT;
586        env->cp15.c6_insn = address;
587    } else {
588        env->exception_index = EXCP_DATA_ABORT;
589        env->cp15.c6_data = address;
590    }
591    return 1;
592}
593
594/* These should probably raise undefined insn exceptions.  */
595void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
596{
597    cpu_abort(env, "cp15 insn %08x\n", insn);
598}
599
600uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
601{
602    cpu_abort(env, "cp15 insn %08x\n", insn);
603    return 0;
604}
605
606/* These should probably raise undefined insn exceptions.  */
607void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
608{
609    cpu_abort(env, "v7m_mrs %d\n", reg);
610}
611
612uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
613{
614    cpu_abort(env, "v7m_mrs %d\n", reg);
615    return 0;
616}
617
618void switch_mode(CPUState *env, int mode)
619{
620    if (mode != ARM_CPU_MODE_USR)
621        cpu_abort(env, "Tried to switch out of user mode\n");
622}
623
624void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
625{
626    cpu_abort(env, "banked r13 write\n");
627}
628
629uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
630{
631    cpu_abort(env, "banked r13 read\n");
632    return 0;
633}
634
635#else
636
637extern int semihosting_enabled;
638
639/* Map CPU modes onto saved register banks.  */
640static inline int bank_number (int mode)
641{
642    switch (mode) {
643    case ARM_CPU_MODE_USR:
644    case ARM_CPU_MODE_SYS:
645        return 0;
646    case ARM_CPU_MODE_SVC:
647        return 1;
648    case ARM_CPU_MODE_ABT:
649        return 2;
650    case ARM_CPU_MODE_UND:
651        return 3;
652    case ARM_CPU_MODE_IRQ:
653        return 4;
654    case ARM_CPU_MODE_FIQ:
655        return 5;
656    case ARM_CPU_MODE_SMC:
657        return 6;
658    }
659    cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
660    return -1;
661}
662
663void switch_mode(CPUState *env, int mode)
664{
665    int old_mode;
666    int i;
667
668    old_mode = env->uncached_cpsr & CPSR_M;
669    if (mode == old_mode)
670        return;
671
672    if (old_mode == ARM_CPU_MODE_FIQ) {
673        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
674        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
675    } else if (mode == ARM_CPU_MODE_FIQ) {
676        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
677        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
678    }
679
680    i = bank_number(old_mode);
681    env->banked_r13[i] = env->regs[13];
682    env->banked_r14[i] = env->regs[14];
683    env->banked_spsr[i] = env->spsr;
684
685    i = bank_number(mode);
686    env->regs[13] = env->banked_r13[i];
687    env->regs[14] = env->banked_r14[i];
688    env->spsr = env->banked_spsr[i];
689}
690
691static void v7m_push(CPUARMState *env, uint32_t val)
692{
693    env->regs[13] -= 4;
694    stl_phys(env->regs[13], val);
695}
696
697static uint32_t v7m_pop(CPUARMState *env)
698{
699    uint32_t val;
700    val = ldl_phys(env->regs[13]);
701    env->regs[13] += 4;
702    return val;
703}
704
705/* Switch to V7M main or process stack pointer.  */
706static void switch_v7m_sp(CPUARMState *env, int process)
707{
708    uint32_t tmp;
709    if (env->v7m.current_sp != process) {
710        tmp = env->v7m.other_sp;
711        env->v7m.other_sp = env->regs[13];
712        env->regs[13] = tmp;
713        env->v7m.current_sp = process;
714    }
715}
716
717static void do_v7m_exception_exit(CPUARMState *env)
718{
719    uint32_t type;
720    uint32_t xpsr;
721
722    type = env->regs[15];
723    if (env->v7m.exception != 0)
724        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
725
726    /* Switch to the target stack.  */
727    switch_v7m_sp(env, (type & 4) != 0);
728    /* Pop registers.  */
729    env->regs[0] = v7m_pop(env);
730    env->regs[1] = v7m_pop(env);
731    env->regs[2] = v7m_pop(env);
732    env->regs[3] = v7m_pop(env);
733    env->regs[12] = v7m_pop(env);
734    env->regs[14] = v7m_pop(env);
735    env->regs[15] = v7m_pop(env);
736    xpsr = v7m_pop(env);
737    xpsr_write(env, xpsr, 0xfffffdff);
738    /* Undo stack alignment.  */
739    if (xpsr & 0x200)
740        env->regs[13] |= 4;
741    /* ??? The exception return type specifies Thread/Handler mode.  However
742       this is also implied by the xPSR value. Not sure what to do
743       if there is a mismatch.  */
744    /* ??? Likewise for mismatches between the CONTROL register and the stack
745       pointer.  */
746}
747
748static void do_interrupt_v7m(CPUARMState *env)
749{
750    uint32_t xpsr = xpsr_read(env);
751    uint32_t lr;
752    uint32_t addr;
753
754    lr = 0xfffffff1;
755    if (env->v7m.current_sp)
756        lr |= 4;
757    if (env->v7m.exception == 0)
758        lr |= 8;
759
760    /* For exceptions we just mark as pending on the NVIC, and let that
761       handle it.  */
762    /* TODO: Need to escalate if the current priority is higher than the
763       one we're raising.  */
764    switch (env->exception_index) {
765    case EXCP_UDEF:
766        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
767        return;
768    case EXCP_SWI:
769        env->regs[15] += 2;
770        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
771        return;
772    case EXCP_PREFETCH_ABORT:
773    case EXCP_DATA_ABORT:
774        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
775        return;
776    case EXCP_BKPT:
777        if (semihosting_enabled) {
778            int nr;
779            nr = lduw_code(env->regs[15]) & 0xff;
780            if (nr == 0xab) {
781                env->regs[15] += 2;
782                env->regs[0] = do_arm_semihosting(env);
783                return;
784            }
785        }
786        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
787        return;
788    case EXCP_IRQ:
789        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
790        break;
791    case EXCP_EXCEPTION_EXIT:
792        do_v7m_exception_exit(env);
793        return;
794    default:
795        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
796        return; /* Never happens.  Keep compiler happy.  */
797    }
798
799    /* Align stack pointer.  */
800    /* ??? Should only do this if Configuration Control Register
801       STACKALIGN bit is set.  */
802    if (env->regs[13] & 4) {
803        env->regs[13] -= 4;
804        xpsr |= 0x200;
805    }
806    /* Switch to the handler mode.  */
807    v7m_push(env, xpsr);
808    v7m_push(env, env->regs[15]);
809    v7m_push(env, env->regs[14]);
810    v7m_push(env, env->regs[12]);
811    v7m_push(env, env->regs[3]);
812    v7m_push(env, env->regs[2]);
813    v7m_push(env, env->regs[1]);
814    v7m_push(env, env->regs[0]);
815    switch_v7m_sp(env, 0);
816    env->uncached_cpsr &= ~CPSR_IT;
817    env->regs[14] = lr;
818    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
819    env->regs[15] = addr & 0xfffffffe;
820    env->thumb = addr & 1;
821}
822
823/* Handle a CPU exception.  */
824void do_interrupt(CPUARMState *env)
825{
826    uint32_t addr;
827    uint32_t mask;
828    int new_mode;
829    uint32_t offset;
830
831#ifdef CONFIG_TRACE
832    if (tracing) {
833        trace_exception(env->regs[15]);
834    }
835#endif
836
837    if (IS_M(env)) {
838        do_interrupt_v7m(env);
839        return;
840    }
841    /* TODO: Vectored interrupt controller.  */
842    switch (env->exception_index) {
843    case EXCP_UDEF:
844        new_mode = ARM_CPU_MODE_UND;
845        addr = 0x04;
846        mask = CPSR_I;
847        if (env->thumb)
848            offset = 2;
849        else
850            offset = 4;
851        break;
852    case EXCP_SWI:
853        if (semihosting_enabled) {
854            /* Check for semihosting interrupt.  */
855            if (env->thumb) {
856                mask = lduw_code(env->regs[15] - 2) & 0xff;
857            } else {
858                mask = ldl_code(env->regs[15] - 4) & 0xffffff;
859            }
860            /* Only intercept calls from privileged modes, to provide some
861               semblance of security.  */
862            if (((mask == 0x123456 && !env->thumb)
863                    || (mask == 0xab && env->thumb))
864                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
865                env->regs[0] = do_arm_semihosting(env);
866                return;
867            }
868        }
869        new_mode = ARM_CPU_MODE_SVC;
870        addr = 0x08;
871        mask = CPSR_I;
872        /* The PC already points to the next instruction.  */
873        offset = 0;
874        break;
875    case EXCP_BKPT:
876        /* See if this is a semihosting syscall.  */
877        if (env->thumb && semihosting_enabled) {
878            mask = lduw_code(env->regs[15]) & 0xff;
879            if (mask == 0xab
880                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
881                env->regs[15] += 2;
882                env->regs[0] = do_arm_semihosting(env);
883                return;
884            }
885        }
886        /* Fall through to prefetch abort.  */
887    case EXCP_PREFETCH_ABORT:
888        new_mode = ARM_CPU_MODE_ABT;
889        addr = 0x0c;
890        mask = CPSR_A | CPSR_I;
891        offset = 4;
892        break;
893    case EXCP_DATA_ABORT:
894        new_mode = ARM_CPU_MODE_ABT;
895        addr = 0x10;
896        mask = CPSR_A | CPSR_I;
897        offset = 8;
898        break;
899    case EXCP_IRQ:
900        new_mode = ARM_CPU_MODE_IRQ;
901        addr = 0x18;
902        /* Disable IRQ and imprecise data aborts.  */
903        mask = CPSR_A | CPSR_I;
904        offset = 4;
905        break;
906    case EXCP_FIQ:
907        new_mode = ARM_CPU_MODE_FIQ;
908        addr = 0x1c;
909        /* Disable FIQ, IRQ and imprecise data aborts.  */
910        mask = CPSR_A | CPSR_I | CPSR_F;
911        offset = 4;
912        break;
913    case EXCP_SMC:
914        if (semihosting_enabled) {
915            cpu_abort(env, "SMC handling under semihosting not implemented\n");
916            return;
917        }
918        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SMC) {
919            env->cp15.c1_secfg &= ~1;
920        }
921        offset = env->thumb ? 2 : 0;
922        new_mode = ARM_CPU_MODE_SMC;
923        addr = 0x08;
924        mask = CPSR_A | CPSR_I | CPSR_F;
925        break;
926    default:
927        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
928        return; /* Never happens.  Keep compiler happy.  */
929    }
930    if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
931        if (new_mode == ARM_CPU_MODE_SMC ||
932            (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SMC) {
933            addr += env->cp15.c12_mvbar;
934        } else {
935            if (env->cp15.c1_sys & (1 << 13)) {
936                addr += 0xffff0000;
937            } else {
938                addr += env->cp15.c12_vbar;
939            }
940        }
941    } else {
942    /* High vectors.  */
943    if (env->cp15.c1_sys & (1 << 13)) {
944        addr += 0xffff0000;
945        }
946    }
947    switch_mode (env, new_mode);
948    env->spsr = cpsr_read(env);
949    /* Clear IT bits.  */
950    env->condexec_bits = 0;
951    /* Switch to the new mode, and to the correct instruction set.  */
952    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
953    env->uncached_cpsr |= mask;
954    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
955     * and we should just guard the thumb mode on V4 */
956    if (arm_feature(env, ARM_FEATURE_V4T)) {
957        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
958    }
959    env->regs[14] = env->regs[15] + offset;
960    env->regs[15] = addr;
961    env->interrupt_request |= CPU_INTERRUPT_EXITTB;
962}
963
964/* Check section/page access permissions.
965   Returns the page protection flags, or zero if the access is not
966   permitted.  */
967static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
968                           int is_user)
969{
970  int prot_ro;
971
972  if (domain == 3)
973    return PAGE_READ | PAGE_WRITE;
974
975  if (access_type == 1)
976      prot_ro = 0;
977  else
978      prot_ro = PAGE_READ;
979
980  switch (ap) {
981  case 0:
982      if (access_type == 1)
983          return 0;
984      switch ((env->cp15.c1_sys >> 8) & 3) {
985      case 1:
986          return is_user ? 0 : PAGE_READ;
987      case 2:
988          return PAGE_READ;
989      default:
990          return 0;
991      }
992  case 1:
993      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
994  case 2:
995      if (is_user)
996          return prot_ro;
997      else
998          return PAGE_READ | PAGE_WRITE;
999  case 3:
1000      return PAGE_READ | PAGE_WRITE;
1001  case 4: /* Reserved.  */
1002      return 0;
1003  case 5:
1004      return is_user ? 0 : prot_ro;
1005  case 6:
1006      return prot_ro;
1007  case 7:
1008      if (!arm_feature (env, ARM_FEATURE_V7))
1009          return 0;
1010      return prot_ro;
1011  default:
1012      abort();
1013  }
1014}
1015
1016static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
1017{
1018    uint32_t table;
1019
1020    if (address & env->cp15.c2_mask)
1021        table = env->cp15.c2_base1 & 0xffffc000;
1022    else
1023        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
1024
1025    table |= (address >> 18) & 0x3ffc;
1026    return table;
1027}
1028
1029static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
1030			    int is_user, uint32_t *phys_ptr, int *prot,
1031                            target_ulong *page_size)
1032{
1033    int code;
1034    uint32_t table;
1035    uint32_t desc;
1036    int type;
1037    int ap;
1038    int domain;
1039    uint32_t phys_addr;
1040
1041    /* Pagetable walk.  */
1042    /* Lookup l1 descriptor.  */
1043    table = get_level1_table_address(env, address);
1044    desc = ldl_phys(table);
1045    type = (desc & 3);
1046    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
1047    if (type == 0) {
1048        /* Section translation fault.  */
1049        code = 5;
1050        goto do_fault;
1051    }
1052    if (domain == 0 || domain == 2) {
1053        if (type == 2)
1054            code = 9; /* Section domain fault.  */
1055        else
1056            code = 11; /* Page domain fault.  */
1057        goto do_fault;
1058    }
1059    if (type == 2) {
1060        /* 1Mb section.  */
1061        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1062        ap = (desc >> 10) & 3;
1063        code = 13;
1064        *page_size = 1024 * 1024;
1065    } else {
1066        /* Lookup l2 entry.  */
1067	if (type == 1) {
1068	    /* Coarse pagetable.  */
1069	    table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1070	} else {
1071	    /* Fine pagetable.  */
1072	    table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1073	}
1074        desc = ldl_phys(table);
1075        switch (desc & 3) {
1076        case 0: /* Page translation fault.  */
1077            code = 7;
1078            goto do_fault;
1079        case 1: /* 64k page.  */
1080            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1081            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1082            *page_size = 0x10000;
1083            break;
1084        case 2: /* 4k page.  */
1085            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1086            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1087            *page_size = 0x1000;
1088            break;
1089        case 3: /* 1k page.  */
1090	    if (type == 1) {
1091		if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1092		    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1093		} else {
1094		    /* Page translation fault.  */
1095		    code = 7;
1096		    goto do_fault;
1097		}
1098	    } else {
1099		phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1100	    }
1101            ap = (desc >> 4) & 3;
1102            *page_size = 0x400;
1103            break;
1104        default:
1105            /* Never happens, but compiler isn't smart enough to tell.  */
1106            abort();
1107        }
1108        code = 15;
1109    }
1110    *prot = check_ap(env, ap, domain, access_type, is_user);
1111    if (!*prot) {
1112        /* Access permission fault.  */
1113        goto do_fault;
1114    }
1115    *prot |= PAGE_EXEC;
1116    *phys_ptr = phys_addr;
1117    return 0;
1118do_fault:
1119    return code | (domain << 4);
1120}
1121
1122static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
1123			    int is_user, uint32_t *phys_ptr, int *prot,
1124                            target_ulong *page_size)
1125{
1126    int code;
1127    uint32_t table;
1128    uint32_t desc;
1129    uint32_t xn;
1130    int type;
1131    int ap;
1132    int domain;
1133    uint32_t phys_addr;
1134
1135    /* Pagetable walk.  */
1136    /* Lookup l1 descriptor.  */
1137    table = get_level1_table_address(env, address);
1138    desc = ldl_phys(table);
1139    type = (desc & 3);
1140    if (type == 0 || type == 3) {
1141        /* Section translation fault.  */
1142        code = 5;
1143        domain = 0;
1144        goto do_fault;
1145    } else if (type == 2 && (desc & (1 << 18))) {
1146        /* Supersection.  */
1147        domain = 0;
1148    } else {
1149        /* Section or page.  */
1150        domain = (desc >> 4) & 0x1e;
1151    }
1152    domain = (env->cp15.c3 >> domain) & 3;
1153    if (domain == 0 || domain == 2) {
1154        if (type == 2)
1155            code = 9; /* Section domain fault.  */
1156        else
1157            code = 11; /* Page domain fault.  */
1158        goto do_fault;
1159    }
1160    if (type == 2) {
1161        if (desc & (1 << 18)) {
1162            /* Supersection.  */
1163            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1164            *page_size = 0x1000000;
1165        } else {
1166            /* Section.  */
1167            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1168            *page_size = 0x100000;
1169        }
1170        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1171        xn = desc & (1 << 4);
1172        code = 13;
1173    } else {
1174        /* Lookup l2 entry.  */
1175        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1176        desc = ldl_phys(table);
1177        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1178        switch (desc & 3) {
1179        case 0: /* Page translation fault.  */
1180            code = 7;
1181            goto do_fault;
1182        case 1: /* 64k page.  */
1183            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1184            xn = desc & (1 << 15);
1185            *page_size = 0x10000;
1186            break;
1187        case 2: case 3: /* 4k page.  */
1188            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1189            xn = desc & 1;
1190            *page_size = 0x1000;
1191            break;
1192        default:
1193            /* Never happens, but compiler isn't smart enough to tell.  */
1194            abort();
1195        }
1196        code = 15;
1197    }
1198    if (domain == 3) {
1199        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1200    } else {
1201    if (xn && access_type == 2)
1202        goto do_fault;
1203
1204    /* The simplified model uses AP[0] as an access control bit.  */
1205    if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1206        /* Access flag fault.  */
1207        code = (code == 15) ? 6 : 3;
1208        goto do_fault;
1209    }
1210    *prot = check_ap(env, ap, domain, access_type, is_user);
1211    if (!*prot) {
1212        /* Access permission fault.  */
1213        goto do_fault;
1214        }
1215        if (!xn) {
1216            *prot |= PAGE_EXEC;
1217        }
1218    }
1219    *phys_ptr = phys_addr;
1220    return 0;
1221do_fault:
1222    return code | (domain << 4);
1223}
1224
1225static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1226			     int is_user, uint32_t *phys_ptr, int *prot)
1227{
1228    int n;
1229    uint32_t mask;
1230    uint32_t base;
1231
1232    *phys_ptr = address;
1233    for (n = 7; n >= 0; n--) {
1234	base = env->cp15.c6_region[n];
1235	if ((base & 1) == 0)
1236	    continue;
1237	mask = 1 << ((base >> 1) & 0x1f);
1238	/* Keep this shift separate from the above to avoid an
1239	   (undefined) << 32.  */
1240	mask = (mask << 1) - 1;
1241	if (((base ^ address) & ~mask) == 0)
1242	    break;
1243    }
1244    if (n < 0)
1245	return 2;
1246
1247    if (access_type == 2) {
1248	mask = env->cp15.c5_insn;
1249    } else {
1250	mask = env->cp15.c5_data;
1251    }
1252    mask = (mask >> (n * 4)) & 0xf;
1253    switch (mask) {
1254    case 0:
1255	return 1;
1256    case 1:
1257	if (is_user)
1258	  return 1;
1259	*prot = PAGE_READ | PAGE_WRITE;
1260	break;
1261    case 2:
1262	*prot = PAGE_READ;
1263	if (!is_user)
1264	    *prot |= PAGE_WRITE;
1265	break;
1266    case 3:
1267	*prot = PAGE_READ | PAGE_WRITE;
1268	break;
1269    case 5:
1270	if (is_user)
1271	    return 1;
1272	*prot = PAGE_READ;
1273	break;
1274    case 6:
1275	*prot = PAGE_READ;
1276	break;
1277    default:
1278	/* Bad permission.  */
1279	return 1;
1280    }
1281    *prot |= PAGE_EXEC;
1282    return 0;
1283}
1284
1285#ifdef CONFIG_GLES2
1286int get_phys_addr(CPUState *env, uint32_t address,
1287                  int access_type, int is_user,
1288                  uint32_t *phys_ptr, int *prot,
1289                  target_ulong *page_size);
1290#else
1291static
1292#endif
1293int get_phys_addr(CPUState *env, uint32_t address,
1294                                int access_type, int is_user,
1295                                uint32_t *phys_ptr, int *prot,
1296                                target_ulong *page_size)
1297{
1298    /* Fast Context Switch Extension.  */
1299    if (address < 0x02000000)
1300        address += env->cp15.c13_fcse;
1301
1302    if ((env->cp15.c1_sys & 1) == 0) {
1303        /* MMU/MPU disabled.  */
1304        *phys_ptr = address;
1305        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1306        *page_size = TARGET_PAGE_SIZE;
1307        return 0;
1308    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1309        *page_size = TARGET_PAGE_SIZE;
1310	return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1311				 prot);
1312    } else if (env->cp15.c1_sys & (1 << 23)) {
1313        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1314                                prot, page_size);
1315    } else {
1316        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1317                                prot, page_size);
1318    }
1319}
1320
1321int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1322                              int access_type, int mmu_idx, int is_softmmu)
1323{
1324    uint32_t phys_addr;
1325    target_ulong page_size;
1326    int prot;
1327    int ret, is_user;
1328
1329    is_user = mmu_idx == MMU_USER_IDX;
1330    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1331                        &page_size);
1332    if (ret == 0) {
1333        /* Map a single [sub]page.  */
1334        phys_addr &= ~(uint32_t)0x3ff;
1335        address &= ~(uint32_t)0x3ff;
1336        tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
1337        return 0;
1338    }
1339
1340    if (access_type == 2) {
1341        env->cp15.c5_insn = ret;
1342        env->cp15.c6_insn = address;
1343        env->exception_index = EXCP_PREFETCH_ABORT;
1344    } else {
1345        env->cp15.c5_data = ret;
1346        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1347            env->cp15.c5_data |= (1 << 11);
1348        env->cp15.c6_data = address;
1349        env->exception_index = EXCP_DATA_ABORT;
1350    }
1351    return 1;
1352}
1353
1354target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1355{
1356    uint32_t phys_addr;
1357    target_ulong page_size;
1358    int prot;
1359    int ret;
1360
1361    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1362
1363    if (ret != 0)
1364        return -1;
1365
1366    return phys_addr;
1367}
1368
1369/* Return basic MPU access permission bits.  */
1370static uint32_t simple_mpu_ap_bits(uint32_t val)
1371{
1372    uint32_t ret;
1373    uint32_t mask;
1374    int i;
1375    ret = 0;
1376    mask = 3;
1377    for (i = 0; i < 16; i += 2) {
1378        ret |= (val >> i) & mask;
1379        mask <<= 2;
1380    }
1381    return ret;
1382}
1383
1384/* Pad basic MPU access permission bits to extended format.  */
1385static uint32_t extended_mpu_ap_bits(uint32_t val)
1386{
1387    uint32_t ret;
1388    uint32_t mask;
1389    int i;
1390    ret = 0;
1391    mask = 3;
1392    for (i = 0; i < 16; i += 2) {
1393        ret |= (val & mask) << i;
1394        mask <<= 2;
1395    }
1396    return ret;
1397}
1398
1399void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1400{
1401    int op1;
1402    int op2;
1403    int crm;
1404
1405    op1 = (insn >> 21) & 7;
1406    op2 = (insn >> 5) & 7;
1407    crm = insn & 0xf;
1408    switch ((insn >> 16) & 0xf) {
1409    case 0:
1410        /* ID codes.  */
1411        if (arm_feature(env, ARM_FEATURE_XSCALE))
1412            break;
1413        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1414            break;
1415        if (arm_feature(env, ARM_FEATURE_V7)
1416                && op1 == 2 && crm == 0 && op2 == 0) {
1417            env->cp15.c0_cssel = val & 0xf;
1418            break;
1419        }
1420        goto bad_reg;
1421    case 1: /* System configuration.  */
1422        switch (crm) {
1423        case 0:
1424        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1425            op2 = 0;
1426        switch (op2) {
1427        case 0:
1428                if (!arm_feature(env, ARM_FEATURE_XSCALE))
1429                env->cp15.c1_sys = val;
1430            /* ??? Lots of these bits are not implemented.  */
1431            /* This may enable/disable the MMU, so do a TLB flush.  */
1432            tlb_flush(env, 1);
1433            break;
1434            case 1: /* Auxiliary control register.  */
1435            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1436                env->cp15.c1_xscaleauxcr = val;
1437                break;
1438            }
1439            /* Not implemented.  */
1440            break;
1441        case 2:
1442            if (arm_feature(env, ARM_FEATURE_XSCALE))
1443                goto bad_reg;
1444            if (env->cp15.c1_coproc != val) {
1445                env->cp15.c1_coproc = val;
1446                /* ??? Is this safe when called from within a TB?  */
1447                tb_flush(env);
1448                }
1449                break;
1450            default:
1451                goto bad_reg;
1452            }
1453            break;
1454        case 1:
1455            if (!arm_feature(env, ARM_FEATURE_TRUSTZONE)
1456                || (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
1457                goto bad_reg;
1458            switch (op2) {
1459            case 0: /* Secure configuration register. */
1460                if (env->cp15.c1_secfg & 1)
1461                    goto bad_reg;
1462                env->cp15.c1_secfg = val;
1463                break;
1464            case 1: /* Secure debug enable register. */
1465                if (env->cp15.c1_secfg & 1)
1466                    goto bad_reg;
1467                env->cp15.c1_sedbg = val;
1468                break;
1469            case 2: /* Nonsecure access control register. */
1470                if (env->cp15.c1_secfg & 1)
1471                    goto bad_reg;
1472                env->cp15.c1_nseac = val;
1473                break;
1474            default:
1475                goto bad_reg;
1476            }
1477            break;
1478        default:
1479            goto bad_reg;
1480        }
1481        break;
1482    case 2: /* MMU Page table control / MPU cache control.  */
1483        if (arm_feature(env, ARM_FEATURE_MPU)) {
1484            switch (op2) {
1485            case 0:
1486                env->cp15.c2_data = val;
1487                break;
1488            case 1:
1489                env->cp15.c2_insn = val;
1490                break;
1491            default:
1492                goto bad_reg;
1493            }
1494        } else {
1495	    switch (op2) {
1496	    case 0:
1497		env->cp15.c2_base0 = val;
1498		break;
1499	    case 1:
1500		env->cp15.c2_base1 = val;
1501		break;
1502	    case 2:
1503                val &= 7;
1504                env->cp15.c2_control = val;
1505		env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1506                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1507		break;
1508	    default:
1509		goto bad_reg;
1510	    }
1511        }
1512        break;
1513    case 3: /* MMU Domain access control / MPU write buffer control.  */
1514        env->cp15.c3 = val;
1515        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1516        break;
1517    case 4: /* Reserved.  */
1518        goto bad_reg;
1519    case 5: /* MMU Fault status / MPU access permission.  */
1520        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1521            op2 = 0;
1522        switch (op2) {
1523        case 0:
1524            if (arm_feature(env, ARM_FEATURE_MPU))
1525                val = extended_mpu_ap_bits(val);
1526            env->cp15.c5_data = val;
1527            break;
1528        case 1:
1529            if (arm_feature(env, ARM_FEATURE_MPU))
1530                val = extended_mpu_ap_bits(val);
1531            env->cp15.c5_insn = val;
1532            break;
1533        case 2:
1534            if (!arm_feature(env, ARM_FEATURE_MPU))
1535                goto bad_reg;
1536            env->cp15.c5_data = val;
1537            break;
1538        case 3:
1539            if (!arm_feature(env, ARM_FEATURE_MPU))
1540                goto bad_reg;
1541            env->cp15.c5_insn = val;
1542            break;
1543        default:
1544            goto bad_reg;
1545        }
1546        break;
1547    case 6: /* MMU Fault address / MPU base/size.  */
1548        if (arm_feature(env, ARM_FEATURE_MPU)) {
1549            if (crm >= 8)
1550                goto bad_reg;
1551            env->cp15.c6_region[crm] = val;
1552        } else {
1553            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1554                op2 = 0;
1555            switch (op2) {
1556            case 0:
1557                env->cp15.c6_data = val;
1558                break;
1559            case 1: /* ??? This is WFAR on armv6 */
1560            case 2:
1561                env->cp15.c6_insn = val;
1562                break;
1563            default:
1564                goto bad_reg;
1565            }
1566        }
1567        break;
1568    case 7: /* Cache control.  */
1569        env->cp15.c15_i_max = 0x000;
1570        env->cp15.c15_i_min = 0xff0;
1571        if (op1 != 0) {
1572            goto bad_reg;
1573        }
1574        /* No cache, so nothing to do except VA->PA translations. */
1575        if (arm_feature(env, ARM_FEATURE_V6K)) {
1576            switch (crm) {
1577            case 4:
1578                if (arm_feature(env, ARM_FEATURE_V7)) {
1579                    env->cp15.c7_par = val & 0xfffff6ff;
1580                } else {
1581                    env->cp15.c7_par = val & 0xfffff1ff;
1582                }
1583                break;
1584            case 8: {
1585                uint32_t phys_addr;
1586                target_ulong page_size;
1587                int prot;
1588                int ret, is_user = op2 & 2;
1589                int access_type = op2 & 1;
1590
1591                if (op2 & 4) {
1592                    /* Other states are only available with TrustZone */
1593                    goto bad_reg;
1594                }
1595                ret = get_phys_addr(env, val, access_type, is_user,
1596                                    &phys_addr, &prot, &page_size);
1597                if (ret == 0) {
1598                    /* We do not set any attribute bits in the PAR */
1599                    if (page_size == (1 << 24)
1600                        && arm_feature(env, ARM_FEATURE_V7)) {
1601                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1602                    } else {
1603                        env->cp15.c7_par = phys_addr & 0xfffff000;
1604                    }
1605                } else {
1606                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1607                                       ((ret & (12 << 1)) >> 6) |
1608                                       ((ret & 0xf) << 1) | 1;
1609                }
1610                break;
1611            }
1612            }
1613        }
1614        break;
1615    case 8: /* MMU TLB control.  */
1616        switch (op2) {
1617        case 0: /* Invalidate all.  */
1618            tlb_flush(env, 0);
1619            break;
1620        case 1: /* Invalidate single TLB entry.  */
1621            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1622            break;
1623        case 2: /* Invalidate on ASID.  */
1624            tlb_flush(env, val == 0);
1625            break;
1626        case 3: /* Invalidate single entry on MVA.  */
1627            /* ??? This is like case 1, but ignores ASID.  */
1628            tlb_flush(env, 1);
1629            break;
1630        default:
1631            goto bad_reg;
1632        }
1633        break;
1634    case 9:
1635        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1636            break;
1637        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1638            break; /* Ignore ReadBuffer access */
1639        switch (crm) {
1640        case 0: /* Cache lockdown.  */
1641	    switch (op1) {
1642	    case 0: /* L1 cache.  */
1643		switch (op2) {
1644		case 0:
1645		    env->cp15.c9_data = val;
1646		    break;
1647		case 1:
1648		    env->cp15.c9_insn = val;
1649		    break;
1650		default:
1651		    goto bad_reg;
1652		}
1653		break;
1654	    case 1: /* L2 cache.  */
1655                switch (op2) {
1656                case 0: /* L2 cache lockdown */
1657                case 2: /* L2 cache auxiliary control */
1658                    /* ignore */
1659                    break;
1660                default:
1661                    goto bad_reg;
1662                }
1663		break;
1664	    default:
1665		goto bad_reg;
1666	    }
1667	    break;
1668        case 1: /* TCM memory region registers.  */
1669        case 2:
1670            /* Not implemented.  */
1671            goto bad_reg;
1672        case 12: /* performance monitor control */
1673            if (arm_feature(env, ARM_FEATURE_V7)) {
1674                switch (op2) {
1675                case 0: /* performance monitor control */
1676                    env->cp15.c9_pmcr_data = val;
1677                    break;
1678                case 1: /* count enable set */
1679                case 2: /* count enable clear */
1680                case 3: /* overflow flag status */
1681                case 4: /* software increment */
1682                case 5: /* performance counter selection */
1683                    /* not implemented */
1684                    goto bad_reg;
1685                default:
1686                    goto bad_reg;
1687                }
1688            } else {
1689                goto bad_reg;
1690            }
1691            break;
1692        case 13: /* performance counters */
1693            if (arm_feature(env, ARM_FEATURE_V7)) {
1694                switch (op2) {
1695                case 0: /* cycle count */
1696                case 1: /* event selection */
1697                case 2: /* performance monitor count */
1698                    /* not implemented */
1699                    goto bad_reg;
1700                default:
1701                    goto bad_reg;
1702                }
1703            } else {
1704                goto bad_reg;
1705            }
1706            break;
1707        case 14: /* performance monitor control */
1708            if (arm_feature(env, ARM_FEATURE_V7)) {
1709                switch (op2) {
1710                case 0: /* user enable */
1711                    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
1712                        goto bad_reg;
1713                    }
1714                    env->cp15.c9_useren = val & 1;
1715                    break;
1716                case 1: /* interrupt enable set */
1717                    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
1718                        goto bad_reg;
1719                    }
1720                    env->cp15.c9_inten |= val & 0xf;
1721                    break;
1722                case 2: /* interrupt enable clear */
1723                    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
1724                        goto bad_reg;
1725                    }
1726                    env->cp15.c9_inten &= ~(val & 0xf);
1727                    break;
1728                default:
1729                    goto bad_reg;
1730                }
1731            } else {
1732                goto bad_reg;
1733            }
1734            break;
1735        default:
1736            goto bad_reg;
1737        }
1738        break;
1739    case 10: /* MMU TLB lockdown.  */
1740        /* ??? TLB lockdown not implemented.  */
1741        break;
1742    case 12: /* Reserved.  */
1743        if (!op1 && !crm) {
1744            switch (op2) {
1745            case 0:
1746                if (!arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
1747                    goto bad_reg;
1748                }
1749                env->cp15.c12_vbar = val & ~0x1f;
1750                break;
1751            case 1:
1752                if (!arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
1753                    goto bad_reg;
1754                }
1755                if (!(env->cp15.c1_secfg & 1)) {
1756                    env->cp15.c12_mvbar = val & ~0x1f;
1757                }
1758                break;
1759            default:
1760                goto bad_reg;
1761            }
1762            break;
1763        }
1764        goto bad_reg;
1765    case 13: /* Process ID.  */
1766        switch (op2) {
1767        case 0:
1768            /* Unlike real hardware the qemu TLB uses virtual addresses,
1769               not modified virtual addresses, so this causes a TLB flush.
1770             */
1771            if (env->cp15.c13_fcse != val)
1772              tlb_flush(env, 1);
1773            env->cp15.c13_fcse = val;
1774            break;
1775        case 1:
1776            /* This changes the ASID, so do a TLB flush.  */
1777            if (env->cp15.c13_context != val
1778                && !arm_feature(env, ARM_FEATURE_MPU))
1779              tlb_flush(env, 0);
1780            env->cp15.c13_context = val;
1781            break;
1782        default:
1783            goto bad_reg;
1784        }
1785        break;
1786    case 14: /* Reserved.  */
1787        goto bad_reg;
1788    case 15: /* Implementation specific.  */
1789        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1790            if (op2 == 0 && crm == 1) {
1791                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1792                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1793                    tb_flush(env);
1794                    env->cp15.c15_cpar = val & 0x3fff;
1795                }
1796                break;
1797            }
1798            goto bad_reg;
1799        }
1800        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1801            switch (crm) {
1802            case 0:
1803                break;
1804            case 1: /* Set TI925T configuration.  */
1805                env->cp15.c15_ticonfig = val & 0xe7;
1806                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1807                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1808                break;
1809            case 2: /* Set I_max.  */
1810                env->cp15.c15_i_max = val;
1811                break;
1812            case 3: /* Set I_min.  */
1813                env->cp15.c15_i_min = val;
1814                break;
1815            case 4: /* Set thread-ID.  */
1816                env->cp15.c15_threadid = val & 0xffff;
1817                break;
1818            case 8: /* Wait-for-interrupt (deprecated).  */
1819                cpu_interrupt(env, CPU_INTERRUPT_HALT);
1820                break;
1821            default:
1822                goto bad_reg;
1823            }
1824        }
1825        break;
1826    }
1827    return;
1828bad_reg:
1829    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1830    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1831              (insn >> 16) & 0xf, crm, op1, op2);
1832}
1833
1834uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1835{
1836    int op1;
1837    int op2;
1838    int crm;
1839
1840    op1 = (insn >> 21) & 7;
1841    op2 = (insn >> 5) & 7;
1842    crm = insn & 0xf;
1843    switch ((insn >> 16) & 0xf) {
1844    case 0: /* ID codes.  */
1845        switch (op1) {
1846        case 0:
1847            switch (crm) {
1848            case 0:
1849                switch (op2) {
1850                case 0: /* Device ID.  */
1851                    return env->cp15.c0_cpuid;
1852                case 1: /* Cache Type.  */
1853		    return env->cp15.c0_cachetype;
1854                case 2: /* TCM status.  */
1855                    return 0;
1856                case 3: /* TLB type register.  */
1857                    return 0; /* No lockable TLB entries.  */
1858                case 5: /* MPIDR */
1859                    /* The MPIDR was standardised in v7; prior to
1860                     * this it was implemented only in the 11MPCore.
1861                     * For all other pre-v7 cores it does not exist.
1862                     */
1863                    if (arm_feature(env, ARM_FEATURE_V7) ||
1864                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1865                        int mpidr = env->cpu_index;
1866                        /* We don't support setting cluster ID ([8..11])
1867                         * so these bits always RAZ.
1868                         */
1869                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1870                            mpidr |= (1 << 31);
1871                            /* Cores which are uniprocessor (non-coherent)
1872                             * but still implement the MP extensions set
1873                             * bit 30. (For instance, A9UP.) However we do
1874                             * not currently model any of those cores.
1875                             */
1876                        }
1877                        return mpidr;
1878                    }
1879                    /* otherwise fall through to the unimplemented-reg case */
1880                default:
1881                    goto bad_reg;
1882                }
1883            case 1:
1884                if (!arm_feature(env, ARM_FEATURE_V6))
1885                    goto bad_reg;
1886                return env->cp15.c0_c1[op2];
1887            case 2:
1888                if (!arm_feature(env, ARM_FEATURE_V6))
1889                    goto bad_reg;
1890                return env->cp15.c0_c2[op2];
1891            case 3: case 4: case 5: case 6: case 7:
1892                return 0;
1893            default:
1894                goto bad_reg;
1895            }
1896            break;
1897        case 1:
1898            /* These registers aren't documented on arm11 cores.  However
1899               Linux looks at them anyway.  */
1900            if (!arm_feature(env, ARM_FEATURE_V6))
1901                goto bad_reg;
1902            if (crm != 0)
1903                goto bad_reg;
1904            if (!arm_feature(env, ARM_FEATURE_V7))
1905                return 0;
1906
1907            switch (op2) {
1908            case 0:
1909                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1910            case 1:
1911                return env->cp15.c0_clid;
1912            case 7:
1913                return 0;
1914            }
1915            goto bad_reg;
1916        case 2:
1917            if (op2 != 0 || crm != 0)
1918                goto bad_reg;
1919            return env->cp15.c0_cssel;
1920        default:
1921            goto bad_reg;
1922        }
1923        break;
1924    case 1: /* System configuration.  */
1925        switch (crm) {
1926        case 0:
1927        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1928            op2 = 0;
1929        switch (op2) {
1930        case 0: /* Control register.  */
1931            return env->cp15.c1_sys;
1932        case 1: /* Auxiliary control register.  */
1933            if (arm_feature(env, ARM_FEATURE_XSCALE))
1934                return env->cp15.c1_xscaleauxcr;
1935            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1936                goto bad_reg;
1937            switch (ARM_CPUID(env)) {
1938            case ARM_CPUID_ARM1026:
1939                return 1;
1940            case ARM_CPUID_ARM1136:
1941            case ARM_CPUID_ARM1136_R2:
1942                return 7;
1943            case ARM_CPUID_ARM11MPCORE:
1944                return 1;
1945            case ARM_CPUID_CORTEXA8:
1946                case ARM_CPUID_CORTEXA8_R2:
1947                return 2;
1948                case ARM_CPUID_CORTEXA9:
1949                    return 0;
1950            default:
1951                goto bad_reg;
1952            }
1953                break;
1954        case 2: /* Coprocessor access register.  */
1955            if (arm_feature(env, ARM_FEATURE_XSCALE))
1956                goto bad_reg;
1957            return env->cp15.c1_coproc;
1958        default:
1959            goto bad_reg;
1960        }
1961            break;
1962        case 1:
1963            if (!arm_feature(env, ARM_FEATURE_TRUSTZONE)
1964                || (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
1965                goto bad_reg;
1966            switch (op2) {
1967            case 0: /* Secure configuration register. */
1968                if (env->cp15.c1_secfg & 1)
1969                    goto bad_reg;
1970                return env->cp15.c1_secfg;
1971            case 1: /* Secure debug enable register. */
1972                if (env->cp15.c1_secfg & 1)
1973                    goto bad_reg;
1974                return env->cp15.c1_sedbg;
1975            case 2: /* Nonsecure access control register. */
1976                return env->cp15.c1_nseac;
1977            default:
1978                goto bad_reg;
1979            }
1980            break;
1981        default:
1982            goto bad_reg;
1983        }
1984        break;
1985    case 2: /* MMU Page table control / MPU cache control.  */
1986        if (arm_feature(env, ARM_FEATURE_MPU)) {
1987            switch (op2) {
1988            case 0:
1989                return env->cp15.c2_data;
1990                break;
1991            case 1:
1992                return env->cp15.c2_insn;
1993                break;
1994            default:
1995                goto bad_reg;
1996            }
1997        } else {
1998	    switch (op2) {
1999	    case 0:
2000		return env->cp15.c2_base0;
2001	    case 1:
2002		return env->cp15.c2_base1;
2003	    case 2:
2004                return env->cp15.c2_control;
2005	    default:
2006		goto bad_reg;
2007	    }
2008	}
2009    case 3: /* MMU Domain access control / MPU write buffer control.  */
2010        return env->cp15.c3;
2011    case 4: /* Reserved.  */
2012        goto bad_reg;
2013    case 5: /* MMU Fault status / MPU access permission.  */
2014        if (arm_feature(env, ARM_FEATURE_OMAPCP))
2015            op2 = 0;
2016        switch (op2) {
2017        case 0:
2018            if (arm_feature(env, ARM_FEATURE_MPU))
2019                return simple_mpu_ap_bits(env->cp15.c5_data);
2020            return env->cp15.c5_data;
2021        case 1:
2022            if (arm_feature(env, ARM_FEATURE_MPU))
2023                return simple_mpu_ap_bits(env->cp15.c5_data);
2024            return env->cp15.c5_insn;
2025        case 2:
2026            if (!arm_feature(env, ARM_FEATURE_MPU))
2027                goto bad_reg;
2028            return env->cp15.c5_data;
2029        case 3:
2030            if (!arm_feature(env, ARM_FEATURE_MPU))
2031                goto bad_reg;
2032            return env->cp15.c5_insn;
2033        default:
2034            goto bad_reg;
2035        }
2036    case 6: /* MMU Fault address.  */
2037        if (arm_feature(env, ARM_FEATURE_MPU)) {
2038            if (crm >= 8)
2039                goto bad_reg;
2040            return env->cp15.c6_region[crm];
2041        } else {
2042            if (arm_feature(env, ARM_FEATURE_OMAPCP))
2043                op2 = 0;
2044	    switch (op2) {
2045	    case 0:
2046		return env->cp15.c6_data;
2047	    case 1:
2048		if (arm_feature(env, ARM_FEATURE_V6)) {
2049		    /* Watchpoint Fault Adrress.  */
2050		    return 0; /* Not implemented.  */
2051                }
2052		    /* Instruction Fault Adrress.  */
2053		    /* Arm9 doesn't have an IFAR, but implementing it anyway
2054		       shouldn't do any harm.  */
2055		    return env->cp15.c6_insn;
2056	    case 2:
2057		if (arm_feature(env, ARM_FEATURE_V6)) {
2058		    /* Instruction Fault Adrress.  */
2059		    return env->cp15.c6_insn;
2060		}
2061                goto bad_reg;
2062	    default:
2063		goto bad_reg;
2064	    }
2065        }
2066    case 7: /* Cache control.  */
2067        if (crm == 4 && op1 == 0 && op2 == 0) {
2068            return env->cp15.c7_par;
2069        }
2070        if (((insn >> 12) & 0xf) == 0xf) /* clear ZF only if destination is r15 */
2071        env->ZF = 0;
2072        return 0;
2073    case 8: /* MMU TLB control.  */
2074        goto bad_reg;
2075    case 9: /* Cache lockdown.  */
2076        switch (op1) {
2077        case 0:
2078	    if (arm_feature(env, ARM_FEATURE_OMAPCP))
2079		return 0;
2080            switch (crm) {
2081            case 0: /* L1 cache */
2082            switch (op2) {
2083            case 0:
2084                return env->cp15.c9_data;
2085            case 1:
2086                return env->cp15.c9_insn;
2087            default:
2088                goto bad_reg;
2089            }
2090                break;
2091            case 12:
2092                switch (op2) {
2093                case 0:
2094                    return env->cp15.c9_pmcr_data;
2095                default:
2096                    goto bad_reg;
2097                }
2098                break;
2099            case 14: /* performance monitor control */
2100                if (arm_feature(env, ARM_FEATURE_V7)) {
2101                    switch (op2) {
2102                    case 0: /* user enable */
2103                        return env->cp15.c9_useren;
2104                    case 1: /* interrupt enable set */
2105                    case 2: /* interrupt enable clear */
2106                        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
2107                            goto bad_reg;
2108                        }
2109                        return env->cp15.c9_inten;
2110                    default:
2111                        goto bad_reg;
2112                    }
2113                } else {
2114                    goto bad_reg;
2115                }
2116                break;
2117            default:
2118                goto bad_reg;
2119            }
2120            break;
2121        case 1: /* L2 cache */
2122            if (crm != 0)
2123                goto bad_reg;
2124            /* L2 Lockdown and Auxiliary control.  */
2125            return 0;
2126        default:
2127            goto bad_reg;
2128        }
2129    case 10: /* MMU TLB lockdown.  */
2130        /* ??? TLB lockdown not implemented.  */
2131        return 0;
2132    case 11: /* TCM DMA control.  */
2133    case 12: /* Reserved.  */
2134        if (!op1 && !crm) {
2135            switch (op2) {
2136            case 0: /* secure or nonsecure vector base address */
2137                if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
2138                    return env->cp15.c12_vbar;
2139                }
2140                break;
2141            case 1: /* monitor vector base address */
2142                if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
2143                    return env->cp15.c12_mvbar;
2144                }
2145                break;
2146            default:
2147                break;
2148            }
2149        }
2150        goto bad_reg;
2151    case 13: /* Process ID.  */
2152        switch (op2) {
2153        case 0:
2154            return env->cp15.c13_fcse;
2155        case 1:
2156            return env->cp15.c13_context;
2157        default:
2158            goto bad_reg;
2159        }
2160    case 14: /* Reserved.  */
2161        goto bad_reg;
2162    case 15: /* Implementation specific.  */
2163        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2164            if (op2 == 0 && crm == 1)
2165                return env->cp15.c15_cpar;
2166
2167            goto bad_reg;
2168        }
2169        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2170            switch (crm) {
2171            case 0:
2172                return 0;
2173            case 1: /* Read TI925T configuration.  */
2174                return env->cp15.c15_ticonfig;
2175            case 2: /* Read I_max.  */
2176                return env->cp15.c15_i_max;
2177            case 3: /* Read I_min.  */
2178                return env->cp15.c15_i_min;
2179            case 4: /* Read thread-ID.  */
2180                return env->cp15.c15_threadid;
2181            case 8: /* TI925T_status */
2182                return 0;
2183            }
2184            /* TODO: Peripheral port remap register:
2185             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2186             * controller base address at $rn & ~0xfff and map size of
2187             * 0x200 << ($rn & 0xfff), when MMU is off.  */
2188            goto bad_reg;
2189        }
2190        return 0;
2191    }
2192bad_reg:
2193    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2194    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2195              (insn >> 16) & 0xf, crm, op1, op2);
2196    return 0;
2197}
2198
2199void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
2200{
2201    if ((env->uncached_cpsr & CPSR_M) == mode) {
2202        env->regs[13] = val;
2203    } else {
2204    env->banked_r13[bank_number(mode)] = val;
2205}
2206}
2207
2208uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
2209{
2210    if ((env->uncached_cpsr & CPSR_M) == mode) {
2211        return env->regs[13];
2212    } else {
2213    return env->banked_r13[bank_number(mode)];
2214    }
2215}
2216
2217uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
2218{
2219    switch (reg) {
2220    case 0: /* APSR */
2221        return xpsr_read(env) & 0xf8000000;
2222    case 1: /* IAPSR */
2223        return xpsr_read(env) & 0xf80001ff;
2224    case 2: /* EAPSR */
2225        return xpsr_read(env) & 0xff00fc00;
2226    case 3: /* xPSR */
2227        return xpsr_read(env) & 0xff00fdff;
2228    case 5: /* IPSR */
2229        return xpsr_read(env) & 0x000001ff;
2230    case 6: /* EPSR */
2231        return xpsr_read(env) & 0x0700fc00;
2232    case 7: /* IEPSR */
2233        return xpsr_read(env) & 0x0700edff;
2234    case 8: /* MSP */
2235        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2236    case 9: /* PSP */
2237        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2238    case 16: /* PRIMASK */
2239        return (env->uncached_cpsr & CPSR_I) != 0;
2240    case 17: /* FAULTMASK */
2241        return (env->uncached_cpsr & CPSR_F) != 0;
2242    case 18: /* BASEPRI */
2243    case 19: /* BASEPRI_MAX */
2244        return env->v7m.basepri;
2245    case 20: /* CONTROL */
2246        return env->v7m.control;
2247    default:
2248        /* ??? For debugging only.  */
2249        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2250        return 0;
2251    }
2252}
2253
2254void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
2255{
2256    switch (reg) {
2257    case 0: /* APSR */
2258        xpsr_write(env, val, 0xf8000000);
2259        break;
2260    case 1: /* IAPSR */
2261        xpsr_write(env, val, 0xf8000000);
2262        break;
2263    case 2: /* EAPSR */
2264        xpsr_write(env, val, 0xfe00fc00);
2265        break;
2266    case 3: /* xPSR */
2267        xpsr_write(env, val, 0xfe00fc00);
2268        break;
2269    case 5: /* IPSR */
2270        /* IPSR bits are readonly.  */
2271        break;
2272    case 6: /* EPSR */
2273        xpsr_write(env, val, 0x0600fc00);
2274        break;
2275    case 7: /* IEPSR */
2276        xpsr_write(env, val, 0x0600fc00);
2277        break;
2278    case 8: /* MSP */
2279        if (env->v7m.current_sp)
2280            env->v7m.other_sp = val;
2281        else
2282            env->regs[13] = val;
2283        break;
2284    case 9: /* PSP */
2285        if (env->v7m.current_sp)
2286            env->regs[13] = val;
2287        else
2288            env->v7m.other_sp = val;
2289        break;
2290    case 16: /* PRIMASK */
2291        if (val & 1)
2292            env->uncached_cpsr |= CPSR_I;
2293        else
2294            env->uncached_cpsr &= ~CPSR_I;
2295        break;
2296    case 17: /* FAULTMASK */
2297        if (val & 1)
2298            env->uncached_cpsr |= CPSR_F;
2299        else
2300            env->uncached_cpsr &= ~CPSR_F;
2301        break;
2302    case 18: /* BASEPRI */
2303        env->v7m.basepri = val & 0xff;
2304        break;
2305    case 19: /* BASEPRI_MAX */
2306        val &= 0xff;
2307        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2308            env->v7m.basepri = val;
2309        break;
2310    case 20: /* CONTROL */
2311        env->v7m.control = val & 3;
2312        switch_v7m_sp(env, (val & 2) != 0);
2313        break;
2314    default:
2315        /* ??? For debugging only.  */
2316        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2317        return;
2318    }
2319}
2320
2321void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2322                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2323                void *opaque)
2324{
2325    if (cpnum < 0 || cpnum > 14) {
2326        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2327        return;
2328    }
2329
2330    env->cp[cpnum].cp_read = cp_read;
2331    env->cp[cpnum].cp_write = cp_write;
2332    env->cp[cpnum].opaque = opaque;
2333}
2334
2335#endif
2336
2337/* Note that signed overflow is undefined in C.  The following routines are
2338   careful to use unsigned types where modulo arithmetic is required.
2339   Failure to do so _will_ break on newer gcc.  */
2340
2341/* Signed saturating arithmetic.  */
2342
2343/* Perform 16-bit signed saturating addition.  */
2344static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2345{
2346    uint16_t res;
2347
2348    res = a + b;
2349    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2350        if (a & 0x8000)
2351            res = 0x8000;
2352        else
2353            res = 0x7fff;
2354    }
2355    return res;
2356}
2357
2358/* Perform 8-bit signed saturating addition.  */
2359static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2360{
2361    uint8_t res;
2362
2363    res = a + b;
2364    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2365        if (a & 0x80)
2366            res = 0x80;
2367        else
2368            res = 0x7f;
2369    }
2370    return res;
2371}
2372
2373/* Perform 16-bit signed saturating subtraction.  */
2374static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2375{
2376    uint16_t res;
2377
2378    res = a - b;
2379    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2380        if (a & 0x8000)
2381            res = 0x8000;
2382        else
2383            res = 0x7fff;
2384    }
2385    return res;
2386}
2387
2388/* Perform 8-bit signed saturating subtraction.  */
2389static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2390{
2391    uint8_t res;
2392
2393    res = a - b;
2394    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2395        if (a & 0x80)
2396            res = 0x80;
2397        else
2398            res = 0x7f;
2399    }
2400    return res;
2401}
2402
2403#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2404#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2405#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2406#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2407#define PFX q
2408
2409#include "op_addsub.h"
2410
2411/* Unsigned saturating arithmetic.  */
2412static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2413{
2414    uint16_t res;
2415    res = a + b;
2416    if (res < a)
2417        res = 0xffff;
2418    return res;
2419}
2420
2421static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2422{
2423    if (a > b)
2424        return a - b;
2425    else
2426        return 0;
2427}
2428
2429static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2430{
2431    uint8_t res;
2432    res = a + b;
2433    if (res < a)
2434        res = 0xff;
2435    return res;
2436}
2437
2438static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2439{
2440    if (a > b)
2441        return a - b;
2442    else
2443        return 0;
2444}
2445
2446#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2447#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2448#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2449#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2450#define PFX uq
2451
2452#include "op_addsub.h"
2453
2454/* Signed modulo arithmetic.  */
2455#define SARITH16(a, b, n, op) do { \
2456    int32_t sum; \
2457    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2458    RESULT(sum, n, 16); \
2459    if (sum >= 0) \
2460        ge |= 3 << (n * 2); \
2461    } while(0)
2462
2463#define SARITH8(a, b, n, op) do { \
2464    int32_t sum; \
2465    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2466    RESULT(sum, n, 8); \
2467    if (sum >= 0) \
2468        ge |= 1 << n; \
2469    } while(0)
2470
2471
2472#define ADD16(a, b, n) SARITH16(a, b, n, +)
2473#define SUB16(a, b, n) SARITH16(a, b, n, -)
2474#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2475#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2476#define PFX s
2477#define ARITH_GE
2478
2479#include "op_addsub.h"
2480
2481/* Unsigned modulo arithmetic.  */
2482#define ADD16(a, b, n) do { \
2483    uint32_t sum; \
2484    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2485    RESULT(sum, n, 16); \
2486    if ((sum >> 16) == 1) \
2487        ge |= 3 << (n * 2); \
2488    } while(0)
2489
2490#define ADD8(a, b, n) do { \
2491    uint32_t sum; \
2492    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2493    RESULT(sum, n, 8); \
2494    if ((sum >> 8) == 1) \
2495        ge |= 1 << n; \
2496    } while(0)
2497
2498#define SUB16(a, b, n) do { \
2499    uint32_t sum; \
2500    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2501    RESULT(sum, n, 16); \
2502    if ((sum >> 16) == 0) \
2503        ge |= 3 << (n * 2); \
2504    } while(0)
2505
2506#define SUB8(a, b, n) do { \
2507    uint32_t sum; \
2508    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2509    RESULT(sum, n, 8); \
2510    if ((sum >> 8) == 0) \
2511        ge |= 1 << n; \
2512    } while(0)
2513
2514#define PFX u
2515#define ARITH_GE
2516
2517#include "op_addsub.h"
2518
2519/* Halved signed arithmetic.  */
2520#define ADD16(a, b, n) \
2521  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2522#define SUB16(a, b, n) \
2523  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2524#define ADD8(a, b, n) \
2525  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2526#define SUB8(a, b, n) \
2527  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2528#define PFX sh
2529
2530#include "op_addsub.h"
2531
2532/* Halved unsigned arithmetic.  */
2533#define ADD16(a, b, n) \
2534  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2535#define SUB16(a, b, n) \
2536  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2537#define ADD8(a, b, n) \
2538  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2539#define SUB8(a, b, n) \
2540  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2541#define PFX uh
2542
2543#include "op_addsub.h"
2544
2545static inline uint8_t do_usad(uint8_t a, uint8_t b)
2546{
2547    if (a > b)
2548        return a - b;
2549    else
2550        return b - a;
2551}
2552
2553/* Unsigned sum of absolute byte differences.  */
2554uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2555{
2556    uint32_t sum;
2557    sum = do_usad(a, b);
2558    sum += do_usad(a >> 8, b >> 8);
2559    sum += do_usad(a >> 16, b >>16);
2560    sum += do_usad(a >> 24, b >> 24);
2561    return sum;
2562}
2563
2564/* For ARMv6 SEL instruction.  */
2565uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2566{
2567    uint32_t mask;
2568
2569    mask = 0;
2570    if (flags & 1)
2571        mask |= 0xff;
2572    if (flags & 2)
2573        mask |= 0xff00;
2574    if (flags & 4)
2575        mask |= 0xff0000;
2576    if (flags & 8)
2577        mask |= 0xff000000;
2578    return (a & mask) | (b & ~mask);
2579}
2580
2581uint32_t HELPER(logicq_cc)(uint64_t val)
2582{
2583    return (val >> 32) | (val != 0);
2584}
2585
2586/* VFP support.  We follow the convention used for VFP instrunctions:
2587   Single precition routines have a "s" suffix, double precision a
2588   "d" suffix.  */
2589
2590/* Convert host exception flags to vfp form.  */
2591static inline int vfp_exceptbits_from_host(int host_bits)
2592{
2593    int target_bits = 0;
2594
2595    if (host_bits & float_flag_invalid)
2596        target_bits |= 1;
2597    if (host_bits & float_flag_divbyzero)
2598        target_bits |= 2;
2599    if (host_bits & float_flag_overflow)
2600        target_bits |= 4;
2601    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2602        target_bits |= 8;
2603    if (host_bits & float_flag_inexact)
2604        target_bits |= 0x10;
2605    if (host_bits & float_flag_input_denormal)
2606        target_bits |= 0x80;
2607    return target_bits;
2608}
2609
2610uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2611{
2612    int i;
2613    uint32_t fpscr;
2614
2615    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2616            | (env->vfp.vec_len << 16)
2617            | (env->vfp.vec_stride << 20);
2618    i = get_float_exception_flags(&env->vfp.fp_status);
2619    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2620    fpscr |= vfp_exceptbits_from_host(i);
2621    return fpscr;
2622}
2623
2624uint32_t vfp_get_fpscr(CPUState *env)
2625{
2626    return HELPER(vfp_get_fpscr)(env);
2627}
2628
2629/* Convert vfp exception flags to target form.  */
2630static inline int vfp_exceptbits_to_host(int target_bits)
2631{
2632    int host_bits = 0;
2633
2634    if (target_bits & 1)
2635        host_bits |= float_flag_invalid;
2636    if (target_bits & 2)
2637        host_bits |= float_flag_divbyzero;
2638    if (target_bits & 4)
2639        host_bits |= float_flag_overflow;
2640    if (target_bits & 8)
2641        host_bits |= float_flag_underflow;
2642    if (target_bits & 0x10)
2643        host_bits |= float_flag_inexact;
2644    if (target_bits & 0x80)
2645        host_bits |= float_flag_input_denormal;
2646    return host_bits;
2647}
2648
2649void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2650{
2651    int i;
2652    uint32_t changed;
2653
2654    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2655    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2656    env->vfp.vec_len = (val >> 16) & 7;
2657    env->vfp.vec_stride = (val >> 20) & 3;
2658
2659    changed ^= val;
2660    if (changed & (3 << 22)) {
2661        i = (val >> 22) & 3;
2662        switch (i) {
2663        case 0:
2664            i = float_round_nearest_even;
2665            break;
2666        case 1:
2667            i = float_round_up;
2668            break;
2669        case 2:
2670            i = float_round_down;
2671            break;
2672        case 3:
2673            i = float_round_to_zero;
2674            break;
2675        }
2676        set_float_rounding_mode(i, &env->vfp.fp_status);
2677    }
2678    if (changed & (1 << 24)) {
2679        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2680        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2681    }
2682    if (changed & (1 << 25))
2683        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2684
2685    i = vfp_exceptbits_to_host(val);
2686    set_float_exception_flags(i, &env->vfp.fp_status);
2687    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2688}
2689
2690void vfp_set_fpscr(CPUState *env, uint32_t val)
2691{
2692    HELPER(vfp_set_fpscr)(env, val);
2693}
2694
2695#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2696
2697#define VFP_BINOP(name) \
2698float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
2699{ \
2700    return float32_ ## name (a, b, &env->vfp.fp_status); \
2701} \
2702float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
2703{ \
2704    return float64_ ## name (a, b, &env->vfp.fp_status); \
2705}
2706VFP_BINOP(add)
2707VFP_BINOP(sub)
2708VFP_BINOP(mul)
2709VFP_BINOP(div)
2710#undef VFP_BINOP
2711
2712float32 VFP_HELPER(neg, s)(float32 a)
2713{
2714    return float32_chs(a);
2715}
2716
2717float64 VFP_HELPER(neg, d)(float64 a)
2718{
2719    return float64_chs(a);
2720}
2721
2722float32 VFP_HELPER(abs, s)(float32 a)
2723{
2724    return float32_abs(a);
2725}
2726
2727float64 VFP_HELPER(abs, d)(float64 a)
2728{
2729    return float64_abs(a);
2730}
2731
2732float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2733{
2734    return float32_sqrt(a, &env->vfp.fp_status);
2735}
2736
2737float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2738{
2739    return float64_sqrt(a, &env->vfp.fp_status);
2740}
2741
2742/* XXX: check quiet/signaling case */
2743#define DO_VFP_cmp(p, type) \
2744void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2745{ \
2746    uint32_t flags; \
2747    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2748    case 0: flags = 0x6; break; \
2749    case -1: flags = 0x8; break; \
2750    case 1: flags = 0x2; break; \
2751    default: case 2: flags = 0x3; break; \
2752    } \
2753    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2754        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2755} \
2756void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2757{ \
2758    uint32_t flags; \
2759    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2760    case 0: flags = 0x6; break; \
2761    case -1: flags = 0x8; break; \
2762    case 1: flags = 0x2; break; \
2763    default: case 2: flags = 0x3; break; \
2764    } \
2765    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2766        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2767}
2768DO_VFP_cmp(s, float32)
2769DO_VFP_cmp(d, float64)
2770#undef DO_VFP_cmp
2771
2772/* Integer to float and float to integer conversions */
2773
2774#define CONV_ITOF(name, fsz, sign) \
2775    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2776{ \
2777    float_status *fpst = fpstp; \
2778    return sign##int32_to_##float##fsz(x, fpst); \
2779}
2780
2781#define CONV_FTOI(name, fsz, sign, round) \
2782uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2783{ \
2784    float_status *fpst = fpstp; \
2785    if (float##fsz##_is_any_nan(x)) { \
2786        float_raise(float_flag_invalid, fpst); \
2787        return 0; \
2788    } \
2789    return float##fsz##_to_##sign##int32##round(x, fpst); \
2790}
2791
2792#define FLOAT_CONVS(name, p, fsz, sign) \
2793CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2794CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2795CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2796
2797FLOAT_CONVS(si, s, 32, )
2798FLOAT_CONVS(si, d, 64, )
2799FLOAT_CONVS(ui, s, 32, u)
2800FLOAT_CONVS(ui, d, 64, u)
2801
2802#undef CONV_ITOF
2803#undef CONV_FTOI
2804#undef FLOAT_CONVS
2805
2806/* floating point conversion */
2807float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2808{
2809    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2810    /* ARM requires that S<->D conversion of any kind of NaN generates
2811     * a quiet NaN by forcing the most significant frac bit to 1.
2812     */
2813    return float64_maybe_silence_nan(r);
2814}
2815
2816float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2817{
2818    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2819    /* ARM requires that S<->D conversion of any kind of NaN generates
2820     * a quiet NaN by forcing the most significant frac bit to 1.
2821     */
2822    return float32_maybe_silence_nan(r);
2823}
2824
2825/* VFP3 fixed point conversion.  */
2826#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2827float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2828                                    void *fpstp) \
2829{ \
2830    float_status *fpst = fpstp; \
2831    float##fsz tmp; \
2832    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2833    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2834} \
2835uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2836                                       void *fpstp) \
2837{ \
2838    float_status *fpst = fpstp; \
2839    float##fsz tmp; \
2840    if (float##fsz##_is_any_nan(x)) { \
2841        float_raise(float_flag_invalid, fpst); \
2842        return 0; \
2843    } \
2844    tmp = float##fsz##_scalbn(x, shift, fpst); \
2845    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2846}
2847
2848VFP_CONV_FIX(sh, d, 64, int16, )
2849VFP_CONV_FIX(sl, d, 64, int32, )
2850VFP_CONV_FIX(uh, d, 64, uint16, u)
2851VFP_CONV_FIX(ul, d, 64, uint32, u)
2852VFP_CONV_FIX(sh, s, 32, int16, )
2853VFP_CONV_FIX(sl, s, 32, int32, )
2854VFP_CONV_FIX(uh, s, 32, uint16, u)
2855VFP_CONV_FIX(ul, s, 32, uint32, u)
2856#undef VFP_CONV_FIX
2857
2858/* Half precision conversions.  */
2859static float32 do_fcvt_f16_to_f32(uint32_t a, CPUState *env, float_status *s)
2860{
2861    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2862    float32 r = float16_to_float32(make_float16(a), ieee, s);
2863    if (ieee) {
2864        return float32_maybe_silence_nan(r);
2865    }
2866    return r;
2867}
2868
2869static uint32_t do_fcvt_f32_to_f16(float32 a, CPUState *env, float_status *s)
2870{
2871    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2872    float16 r = float32_to_float16(a, ieee, s);
2873    if (ieee) {
2874        r = float16_maybe_silence_nan(r);
2875    }
2876    return float16_val(r);
2877}
2878
2879float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2880{
2881    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2882}
2883
2884uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUState *env)
2885{
2886    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2887}
2888
2889float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
2890{
2891    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2892}
2893
2894uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUState *env)
2895{
2896    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2897}
2898
2899#define float32_two make_float32(0x40000000)
2900#define float32_three make_float32(0x40400000)
2901#define float32_one_point_five make_float32(0x3fc00000)
2902
2903float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2904{
2905    float_status *s = &env->vfp.standard_fp_status;
2906    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2907        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2908        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2909            float_raise(float_flag_input_denormal, s);
2910        }
2911        return float32_two;
2912    }
2913    return float32_sub(float32_two, float32_mul(a, b, s), s);
2914}
2915
2916float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2917{
2918    float_status *s = &env->vfp.standard_fp_status;
2919    float32 product;
2920    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2921        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2922        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2923            float_raise(float_flag_input_denormal, s);
2924        }
2925        return float32_one_point_five;
2926    }
2927    product = float32_mul(a, b, s);
2928    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
2929}
2930
2931/* NEON helpers.  */
2932
2933/* Constants 256 and 512 are used in some helpers; we avoid relying on
2934 * int->float conversions at run-time.  */
2935#define float64_256 make_float64(0x4070000000000000LL)
2936#define float64_512 make_float64(0x4080000000000000LL)
2937
2938/* The algorithm that must be used to calculate the estimate
2939 * is specified by the ARM ARM.
2940 */
2941static float64 recip_estimate(float64 a, CPUState *env)
2942{
2943    /* These calculations mustn't set any fp exception flags,
2944     * so we use a local copy of the fp_status.
2945     */
2946    float_status dummy_status = env->vfp.standard_fp_status;
2947    float_status *s = &dummy_status;
2948    /* q = (int)(a * 512.0) */
2949    float64 q = float64_mul(float64_512, a, s);
2950    int64_t q_int = float64_to_int64_round_to_zero(q, s);
2951
2952    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2953    q = int64_to_float64(q_int, s);
2954    q = float64_add(q, float64_half, s);
2955    q = float64_div(q, float64_512, s);
2956    q = float64_div(float64_one, q, s);
2957
2958    /* s = (int)(256.0 * r + 0.5) */
2959    q = float64_mul(q, float64_256, s);
2960    q = float64_add(q, float64_half, s);
2961    q_int = float64_to_int64_round_to_zero(q, s);
2962
2963    /* return (double)s / 256.0 */
2964    return float64_div(int64_to_float64(q_int, s), float64_256, s);
2965}
2966
2967float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2968{
2969    float_status *s = &env->vfp.standard_fp_status;
2970    float64 f64;
2971    uint32_t val32 = float32_val(a);
2972
2973    int result_exp;
2974    int a_exp = (val32  & 0x7f800000) >> 23;
2975    int sign = val32 & 0x80000000;
2976
2977    if (float32_is_any_nan(a)) {
2978        if (float32_is_signaling_nan(a)) {
2979            float_raise(float_flag_invalid, s);
2980        }
2981        return float32_default_nan;
2982    } else if (float32_is_infinity(a)) {
2983        return float32_set_sign(float32_zero, float32_is_neg(a));
2984    } else if (float32_is_zero_or_denormal(a)) {
2985        if (!float32_is_zero(a)) {
2986            float_raise(float_flag_input_denormal, s);
2987        }
2988        float_raise(float_flag_divbyzero, s);
2989        return float32_set_sign(float32_infinity, float32_is_neg(a));
2990    } else if (a_exp >= 253) {
2991        float_raise(float_flag_underflow, s);
2992        return float32_set_sign(float32_zero, float32_is_neg(a));
2993    }
2994
2995    f64 = make_float64((0x3feULL << 52)
2996                       | ((int64_t)(val32 & 0x7fffff) << 29));
2997
2998    result_exp = 253 - a_exp;
2999
3000    f64 = recip_estimate(f64, env);
3001
3002    val32 = sign
3003        | ((result_exp & 0xff) << 23)
3004        | ((float64_val(f64) >> 29) & 0x7fffff);
3005    return make_float32(val32);
3006}
3007
3008/* The algorithm that must be used to calculate the estimate
3009 * is specified by the ARM ARM.
3010 */
3011static float64 recip_sqrt_estimate(float64 a, CPUState *env)
3012{
3013    /* These calculations mustn't set any fp exception flags,
3014     * so we use a local copy of the fp_status.
3015     */
3016    float_status dummy_status = env->vfp.standard_fp_status;
3017    float_status *s = &dummy_status;
3018    float64 q;
3019    int64_t q_int;
3020
3021    if (float64_lt(a, float64_half, s)) {
3022        /* range 0.25 <= a < 0.5 */
3023
3024        /* a in units of 1/512 rounded down */
3025        /* q0 = (int)(a * 512.0);  */
3026        q = float64_mul(float64_512, a, s);
3027        q_int = float64_to_int64_round_to_zero(q, s);
3028
3029        /* reciprocal root r */
3030        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
3031        q = int64_to_float64(q_int, s);
3032        q = float64_add(q, float64_half, s);
3033        q = float64_div(q, float64_512, s);
3034        q = float64_sqrt(q, s);
3035        q = float64_div(float64_one, q, s);
3036    } else {
3037        /* range 0.5 <= a < 1.0 */
3038
3039        /* a in units of 1/256 rounded down */
3040        /* q1 = (int)(a * 256.0); */
3041        q = float64_mul(float64_256, a, s);
3042        int64_t q_int = float64_to_int64_round_to_zero(q, s);
3043
3044        /* reciprocal root r */
3045        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
3046        q = int64_to_float64(q_int, s);
3047        q = float64_add(q, float64_half, s);
3048        q = float64_div(q, float64_256, s);
3049        q = float64_sqrt(q, s);
3050        q = float64_div(float64_one, q, s);
3051    }
3052    /* r in units of 1/256 rounded to nearest */
3053    /* s = (int)(256.0 * r + 0.5); */
3054
3055    q = float64_mul(q, float64_256,s );
3056    q = float64_add(q, float64_half, s);
3057    q_int = float64_to_int64_round_to_zero(q, s);
3058
3059    /* return (double)s / 256.0;*/
3060    return float64_div(int64_to_float64(q_int, s), float64_256, s);
3061}
3062
3063float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
3064{
3065    float_status *s = &env->vfp.standard_fp_status;
3066    int result_exp;
3067    float64 f64;
3068    uint32_t val;
3069    uint64_t val64;
3070
3071    val = float32_val(a);
3072
3073    if (float32_is_any_nan(a)) {
3074        if (float32_is_signaling_nan(a)) {
3075            float_raise(float_flag_invalid, s);
3076        }
3077        return float32_default_nan;
3078    } else if (float32_is_zero_or_denormal(a)) {
3079        if (!float32_is_zero(a)) {
3080            float_raise(float_flag_input_denormal, s);
3081        }
3082        float_raise(float_flag_divbyzero, s);
3083        return float32_set_sign(float32_infinity, float32_is_neg(a));
3084    } else if (float32_is_neg(a)) {
3085        float_raise(float_flag_invalid, s);
3086        return float32_default_nan;
3087    } else if (float32_is_infinity(a)) {
3088        return float32_zero;
3089    }
3090
3091    /* Normalize to a double-precision value between 0.25 and 1.0,
3092     * preserving the parity of the exponent.  */
3093    if ((val & 0x800000) == 0) {
3094        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3095                           | (0x3feULL << 52)
3096                           | ((uint64_t)(val & 0x7fffff) << 29));
3097    } else {
3098        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3099                           | (0x3fdULL << 52)
3100                           | ((uint64_t)(val & 0x7fffff) << 29));
3101    }
3102
3103    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
3104
3105    f64 = recip_sqrt_estimate(f64, env);
3106
3107    val64 = float64_val(f64);
3108
3109    val = ((val64 >> 63)  & 0x80000000)
3110        | ((result_exp & 0xff) << 23)
3111        | ((val64 >> 29)  & 0x7fffff);
3112    return make_float32(val);
3113}
3114
3115uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
3116{
3117    float64 f64;
3118
3119    if ((a & 0x80000000) == 0) {
3120        return 0xffffffff;
3121    }
3122
3123    f64 = make_float64((0x3feULL << 52)
3124                       | ((int64_t)(a & 0x7fffffff) << 21));
3125
3126    f64 = recip_estimate (f64, env);
3127
3128    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3129}
3130
3131uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
3132{
3133    float64 f64;
3134
3135    if ((a & 0xc0000000) == 0) {
3136        return 0xffffffff;
3137    }
3138
3139    if (a & 0x80000000) {
3140        f64 = make_float64((0x3feULL << 52)
3141                           | ((uint64_t)(a & 0x7fffffff) << 21));
3142    } else { /* bits 31-30 == '01' */
3143        f64 = make_float64((0x3fdULL << 52)
3144                           | ((uint64_t)(a & 0x3fffffff) << 22));
3145    }
3146
3147    f64 = recip_sqrt_estimate(f64, env);
3148
3149    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3150}
3151
3152void HELPER(set_teecr)(CPUState *env, uint32_t val)
3153{
3154    val &= 1;
3155    if (env->teecr != val) {
3156        env->teecr = val;
3157        tb_flush(env);
3158    }
3159}
3160