1#include <stdio.h>
2#include <stdlib.h>
3#include <string.h>
4
5#include "cpu.h"
6#include "exec/exec-all.h"
7#include "exec/gdbstub.h"
8#include "helper.h"
9#include "qemu-common.h"
10#include "qemu/host-utils.h"
11#if !defined(CONFIG_USER_ONLY)
12//#include "hw/loader.h"
13#endif
14
15static uint32_t cortexa9_cp15_c0_c1[8] =
16{ 0x1031, 0x11, 0x000, 0, 0x00100103, 0x20000000, 0x01230000, 0x00002111 };
17
18static uint32_t cortexa9_cp15_c0_c2[8] =
19{ 0x00101111, 0x13112111, 0x21232041, 0x11112131, 0x00111142, 0, 0, 0 };
20
21static uint32_t cortexa8_cp15_c0_c1[8] =
22{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
23
24static uint32_t cortexa8_cp15_c0_c2[8] =
25{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
26
27static uint32_t cortexa8r2_cp15_c0_c2[8] =
28{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00011142, 0, 0, 0 };
29
30static uint32_t mpcore_cp15_c0_c1[8] =
31{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
32
33static uint32_t mpcore_cp15_c0_c2[8] =
34{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
35
36static uint32_t arm1136_cp15_c0_c1[8] =
37{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
38
39static uint32_t arm1136_cp15_c0_c2[8] =
40{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
41
42static uint32_t arm1176_cp15_c0_c1[8] =
43{ 0x111, 0x11, 0x33, 0, 0x01130003, 0x10030302, 0x01222100, 0 };
44
45static uint32_t arm1176_cp15_c0_c2[8] =
46{ 0x0140011, 0x12002111, 0x11231121, 0x01102131, 0x01141, 0, 0, 0 };
47
48static uint32_t cpu_arm_find_by_name(const char *name);
49
50static inline void set_feature(CPUARMState *env, int feature)
51{
52    env->features |= 1u << feature;
53}
54
55static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
56{
57    env->cp15.c0_cpuid = id;
58    switch (id) {
59    case ARM_CPUID_ARM926:
60        set_feature(env, ARM_FEATURE_V4T);
61        set_feature(env, ARM_FEATURE_V5);
62        set_feature(env, ARM_FEATURE_VFP);
63        env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
64        env->cp15.c0_cachetype = 0x1dd20d2;
65        env->cp15.c1_sys = 0x00090078;
66        break;
67    case ARM_CPUID_ARM946:
68        set_feature(env, ARM_FEATURE_V4T);
69        set_feature(env, ARM_FEATURE_V5);
70        set_feature(env, ARM_FEATURE_MPU);
71        env->cp15.c0_cachetype = 0x0f004006;
72        env->cp15.c1_sys = 0x00000078;
73        break;
74    case ARM_CPUID_ARM1026:
75        set_feature(env, ARM_FEATURE_V4T);
76        set_feature(env, ARM_FEATURE_V5);
77        set_feature(env, ARM_FEATURE_VFP);
78        set_feature(env, ARM_FEATURE_AUXCR);
79        env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
80        env->cp15.c0_cachetype = 0x1dd20d2;
81        env->cp15.c1_sys = 0x00090078;
82        break;
83    case ARM_CPUID_ARM1136:
84        /* This is the 1136 r1, which is a v6K core */
85        set_feature(env, ARM_FEATURE_V6K);
86        /* Fall through */
87    case ARM_CPUID_ARM1136_R2:
88        /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
89         * older core than plain "arm1136". In particular this does not
90         * have the v6K features.
91         */
92        set_feature(env, ARM_FEATURE_V4T);
93        set_feature(env, ARM_FEATURE_V5);
94        set_feature(env, ARM_FEATURE_V6);
95        set_feature(env, ARM_FEATURE_VFP);
96        set_feature(env, ARM_FEATURE_AUXCR);
97        /* These ID register values are correct for 1136 but may be wrong
98         * for 1136_r2 (in particular r0p2 does not actually implement most
99         * of the ID registers).
100         */
101        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
102        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
103        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
104        memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
105        memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
106        env->cp15.c0_cachetype = 0x1dd20d2;
107        env->cp15.c1_sys = 0x00050078;
108        break;
109    case ARM_CPUID_ARM1176:
110        set_feature(env, ARM_FEATURE_V4T);
111        set_feature(env, ARM_FEATURE_V5);
112        set_feature(env, ARM_FEATURE_V6);
113        set_feature(env, ARM_FEATURE_V6K);
114        set_feature(env, ARM_FEATURE_VFP);
115        set_feature(env, ARM_FEATURE_AUXCR);
116        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b5;
117        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
118        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
119        memcpy(env->cp15.c0_c1, arm1176_cp15_c0_c1, 8 * sizeof(uint32_t));
120        memcpy(env->cp15.c0_c2, arm1176_cp15_c0_c2, 8 * sizeof(uint32_t));
121        env->cp15.c0_cachetype = 0x1dd20d2;
122        env->cp15.c1_sys = 0x00050078;
123        break;
124    case ARM_CPUID_ARM11MPCORE:
125        set_feature(env, ARM_FEATURE_V4T);
126        set_feature(env, ARM_FEATURE_V5);
127        set_feature(env, ARM_FEATURE_V6);
128        set_feature(env, ARM_FEATURE_V6K);
129        set_feature(env, ARM_FEATURE_VFP);
130        set_feature(env, ARM_FEATURE_AUXCR);
131        set_feature(env, ARM_FEATURE_VAPA);
132        env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
133        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
134        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
135        memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
136        memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
137        env->cp15.c0_cachetype = 0x1dd20d2;
138        break;
139    case ARM_CPUID_CORTEXA8:
140        set_feature(env, ARM_FEATURE_V4T);
141        set_feature(env, ARM_FEATURE_V5);
142        set_feature(env, ARM_FEATURE_V6);
143        set_feature(env, ARM_FEATURE_V6K);
144        set_feature(env, ARM_FEATURE_V7);
145        set_feature(env, ARM_FEATURE_AUXCR);
146        set_feature(env, ARM_FEATURE_THUMB2);
147        set_feature(env, ARM_FEATURE_VFP);
148        set_feature(env, ARM_FEATURE_VFP3);
149        set_feature(env, ARM_FEATURE_NEON);
150        set_feature(env, ARM_FEATURE_THUMB2EE);
151        set_feature(env, ARM_FEATURE_TRUSTZONE);
152        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
153        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
154        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
155        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
156        memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
157        env->cp15.c0_cachetype = 0x82048004;
158        env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
159        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
160        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
161        env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
162        env->cp15.c1_sys = 0x00c50078;
163        break;
164    case ARM_CPUID_CORTEXA8_R2:
165        set_feature(env, ARM_FEATURE_V4T);
166        set_feature(env, ARM_FEATURE_V5);
167        set_feature(env, ARM_FEATURE_V6);
168        set_feature(env, ARM_FEATURE_V6K);
169        set_feature(env, ARM_FEATURE_V7);
170        set_feature(env, ARM_FEATURE_AUXCR);
171        set_feature(env, ARM_FEATURE_THUMB2);
172        set_feature(env, ARM_FEATURE_VFP);
173        set_feature(env, ARM_FEATURE_VFP3);
174        set_feature(env, ARM_FEATURE_NEON);
175        set_feature(env, ARM_FEATURE_THUMB2EE);
176        set_feature(env, ARM_FEATURE_TRUSTZONE);
177        env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c2;
178        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
179        env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011111;
180        memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
181        memcpy(env->cp15.c0_c2, cortexa8r2_cp15_c0_c2, 8 * sizeof(uint32_t));
182        env->cp15.c0_cachetype = 0x82048004;
183        env->cp15.c0_clid = (1 << 27) | (2 << 24) | (4 << 3) | 3;
184        env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
185        env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
186        env->cp15.c0_ccsid[2] = 0xf03fe03a; /* 256k L2 cache. */
187        env->cp15.c1_sys = 0x00c50078;
188        break;
189    case ARM_CPUID_CORTEXA9:
190        set_feature(env, ARM_FEATURE_V4T);
191        set_feature(env, ARM_FEATURE_V5);
192        set_feature(env, ARM_FEATURE_V6);
193        set_feature(env, ARM_FEATURE_V6K);
194        set_feature(env, ARM_FEATURE_V7);
195        set_feature(env, ARM_FEATURE_AUXCR);
196        set_feature(env, ARM_FEATURE_THUMB2);
197        set_feature(env, ARM_FEATURE_VFP);
198        set_feature(env, ARM_FEATURE_VFP3);
199        set_feature(env, ARM_FEATURE_VFP_FP16);
200        set_feature(env, ARM_FEATURE_NEON);
201        set_feature(env, ARM_FEATURE_THUMB2EE);
202        /* Note that A9 supports the MP extensions even for
203         * A9UP and single-core A9MP (which are both different
204         * and valid configurations; we don't model A9UP).
205         */
206        set_feature(env, ARM_FEATURE_V7MP);
207        set_feature(env, ARM_FEATURE_TRUSTZONE);
208        env->vfp.xregs[ARM_VFP_FPSID] = 0x41034000; /* Guess */
209        env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
210        env->vfp.xregs[ARM_VFP_MVFR1] = 0x01111111;
211        memcpy(env->cp15.c0_c1, cortexa9_cp15_c0_c1, 8 * sizeof(uint32_t));
212        memcpy(env->cp15.c0_c2, cortexa9_cp15_c0_c2, 8 * sizeof(uint32_t));
213        env->cp15.c0_cachetype = 0x80038003;
214        env->cp15.c0_clid = (1 << 27) | (1 << 24) | 3;
215        env->cp15.c0_ccsid[0] = 0xe00fe015; /* 16k L1 dcache. */
216        env->cp15.c0_ccsid[1] = 0x200fe015; /* 16k L1 icache. */
217        env->cp15.c1_sys = 0x00c50078;
218        break;
219    case ARM_CPUID_CORTEXM3:
220        set_feature(env, ARM_FEATURE_V4T);
221        set_feature(env, ARM_FEATURE_V5);
222        set_feature(env, ARM_FEATURE_V6);
223        set_feature(env, ARM_FEATURE_THUMB2);
224        set_feature(env, ARM_FEATURE_V7);
225        set_feature(env, ARM_FEATURE_M);
226        set_feature(env, ARM_FEATURE_DIV);
227        break;
228    case ARM_CPUID_ANY: /* For userspace emulation.  */
229        set_feature(env, ARM_FEATURE_V4T);
230        set_feature(env, ARM_FEATURE_V5);
231        set_feature(env, ARM_FEATURE_V6);
232        set_feature(env, ARM_FEATURE_V6K);
233        set_feature(env, ARM_FEATURE_V7);
234        set_feature(env, ARM_FEATURE_THUMB2);
235        set_feature(env, ARM_FEATURE_VFP);
236        set_feature(env, ARM_FEATURE_VFP3);
237        set_feature(env, ARM_FEATURE_VFP_FP16);
238        set_feature(env, ARM_FEATURE_NEON);
239        set_feature(env, ARM_FEATURE_THUMB2EE);
240        set_feature(env, ARM_FEATURE_DIV);
241        set_feature(env, ARM_FEATURE_V7MP);
242        break;
243    case ARM_CPUID_TI915T:
244    case ARM_CPUID_TI925T:
245        set_feature(env, ARM_FEATURE_V4T);
246        set_feature(env, ARM_FEATURE_OMAPCP);
247        env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
248        env->cp15.c0_cachetype = 0x5109149;
249        env->cp15.c1_sys = 0x00000070;
250        env->cp15.c15_i_max = 0x000;
251        env->cp15.c15_i_min = 0xff0;
252        break;
253    case ARM_CPUID_PXA250:
254    case ARM_CPUID_PXA255:
255    case ARM_CPUID_PXA260:
256    case ARM_CPUID_PXA261:
257    case ARM_CPUID_PXA262:
258        set_feature(env, ARM_FEATURE_V4T);
259        set_feature(env, ARM_FEATURE_V5);
260        set_feature(env, ARM_FEATURE_XSCALE);
261        /* JTAG_ID is ((id << 28) | 0x09265013) */
262        env->cp15.c0_cachetype = 0xd172172;
263        env->cp15.c1_sys = 0x00000078;
264        break;
265    case ARM_CPUID_PXA270_A0:
266    case ARM_CPUID_PXA270_A1:
267    case ARM_CPUID_PXA270_B0:
268    case ARM_CPUID_PXA270_B1:
269    case ARM_CPUID_PXA270_C0:
270    case ARM_CPUID_PXA270_C5:
271        set_feature(env, ARM_FEATURE_V4T);
272        set_feature(env, ARM_FEATURE_V5);
273        set_feature(env, ARM_FEATURE_XSCALE);
274        /* JTAG_ID is ((id << 28) | 0x09265013) */
275        set_feature(env, ARM_FEATURE_IWMMXT);
276        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
277        env->cp15.c0_cachetype = 0xd172172;
278        env->cp15.c1_sys = 0x00000078;
279        break;
280    case ARM_CPUID_SA1100:
281    case ARM_CPUID_SA1110:
282        set_feature(env, ARM_FEATURE_STRONGARM);
283        env->cp15.c1_sys = 0x00000070;
284        break;
285    default:
286        cpu_abort(env, "Bad CPU ID: %x\n", id);
287        break;
288    }
289
290    /* Some features automatically imply others: */
291    if (arm_feature(env, ARM_FEATURE_V7)) {
292        set_feature(env, ARM_FEATURE_VAPA);
293    }
294}
295
296void cpu_reset(CPUState *cpu)
297{
298    CPUARMState *env = cpu->env_ptr;
299    uint32_t id;
300
301    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
302        qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
303        log_cpu_state(cpu, 0);
304    }
305
306    id = env->cp15.c0_cpuid;
307    memset(env, 0, offsetof(CPUARMState, breakpoints));
308    if (id)
309        cpu_reset_model_id(env, id);
310    /* DBGDIDR : we implement nothing, and just mirror the main ID
311     * register's Variant and Revision fields.
312     */
313    env->cp14_dbgdidr = (id >> 16 & 0xf0) | 0xf;
314#if defined (CONFIG_USER_ONLY)
315    env->uncached_cpsr = ARM_CPU_MODE_USR;
316    /* For user mode we must enable access to coprocessors */
317    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
318    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
319        env->cp15.c15_cpar = 3;
320    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
321        env->cp15.c15_cpar = 1;
322    }
323#else
324    /* SVC mode with interrupts disabled.  */
325    env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
326    /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
327       clear at reset.  Initial SP and PC are loaded from ROM.  */
328    if (IS_M(env)) {
329        env->uncached_cpsr &= ~CPSR_I;
330#ifndef CONFIG_ANDROID  /* No hw/loader.h and no ROM support for now on Android */
331        uint32_t pc;
332        uint8_t *rom;
333        rom = rom_ptr(0);
334        if (rom) {
335            /* We should really use ldl_phys here, in case the guest
336               modified flash and reset itself.  However images
337               loaded via -kenrel have not been copied yet, so load the
338               values directly from there.  */
339            env->regs[13] = ldl_p(rom);
340            pc = ldl_p(rom + 4);
341            env->thumb = pc & 1;
342            env->regs[15] = pc & ~1;
343        }
344#endif
345    }
346    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
347    env->cp15.c2_base_mask = 0xffffc000u;
348    /* v7 performance monitor control register: same implementor
349     * field as main ID register, and we implement no event counters.
350     */
351    env->cp15.c9_pmcr = (id & 0xff000000);
352#endif
353    set_flush_to_zero(1, &env->vfp.standard_fp_status);
354    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
355    set_default_nan_mode(1, &env->vfp.standard_fp_status);
356    set_float_detect_tininess(float_tininess_before_rounding,
357                              &env->vfp.fp_status);
358    set_float_detect_tininess(float_tininess_before_rounding,
359                              &env->vfp.standard_fp_status);
360    tlb_flush(env, 1);
361}
362
363static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
364{
365    int nregs;
366
367    /* VFP data registers are always little-endian.  */
368    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
369    if (reg < nregs) {
370        stfq_le_p(buf, env->vfp.regs[reg]);
371        return 8;
372    }
373    if (arm_feature(env, ARM_FEATURE_NEON)) {
374        /* Aliases for Q regs.  */
375        nregs += 16;
376        if (reg < nregs) {
377            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
378            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
379            return 16;
380        }
381    }
382    switch (reg - nregs) {
383    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
384    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
385    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
386    }
387    return 0;
388}
389
390static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
391{
392    int nregs;
393
394    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
395    if (reg < nregs) {
396        env->vfp.regs[reg] = ldfq_le_p(buf);
397        return 8;
398    }
399    if (arm_feature(env, ARM_FEATURE_NEON)) {
400        nregs += 16;
401        if (reg < nregs) {
402            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
403            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
404            return 16;
405        }
406    }
407    switch (reg - nregs) {
408    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
409    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
410    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
411    }
412    return 0;
413}
414
415CPUARMState *cpu_arm_init(const char *cpu_model)
416{
417    ARMCPU *arm_cpu;
418    CPUARMState *env;
419    uint32_t id;
420    static int inited = 0;
421
422    id = cpu_arm_find_by_name(cpu_model);
423    if (id == 0)
424        return NULL;
425    arm_cpu = g_malloc0(sizeof(ARMCPU));
426    env = &arm_cpu->env;
427    ENV_GET_CPU(env)->env_ptr = env;
428
429    CPUState *cpu = ENV_GET_CPU(env);
430    cpu_exec_init(env);
431    if (!inited) {
432        inited = 1;
433        arm_translate_init();
434    }
435
436    cpu->cpu_model_str = cpu_model;
437    env->cp15.c0_cpuid = id;
438    cpu_reset(cpu);
439    if (arm_feature(env, ARM_FEATURE_NEON)) {
440        gdb_register_coprocessor(cpu, vfp_gdb_get_reg, vfp_gdb_set_reg,
441                                 51, "arm-neon.xml", 0);
442    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
443        gdb_register_coprocessor(cpu, vfp_gdb_get_reg, vfp_gdb_set_reg,
444                                 35, "arm-vfp3.xml", 0);
445    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
446        gdb_register_coprocessor(cpu, vfp_gdb_get_reg, vfp_gdb_set_reg,
447                                 19, "arm-vfp.xml", 0);
448    }
449    qemu_init_vcpu(cpu);
450    return env;
451}
452
453struct arm_cpu_t {
454    uint32_t id;
455    const char *name;
456};
457
458static const struct arm_cpu_t arm_cpu_names[] = {
459    { ARM_CPUID_ARM926, "arm926"},
460    { ARM_CPUID_ARM946, "arm946"},
461    { ARM_CPUID_ARM1026, "arm1026"},
462    { ARM_CPUID_ARM1136, "arm1136"},
463    { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
464    { ARM_CPUID_ARM1176, "arm1176"},
465    { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
466    { ARM_CPUID_CORTEXM3, "cortex-m3"},
467    { ARM_CPUID_CORTEXA8, "cortex-a8"},
468    { ARM_CPUID_CORTEXA8_R2, "cortex-a8-r2"},
469    { ARM_CPUID_CORTEXA9, "cortex-a9"},
470    { ARM_CPUID_TI925T, "ti925t" },
471    { ARM_CPUID_PXA250, "pxa250" },
472    { ARM_CPUID_SA1100,    "sa1100" },
473    { ARM_CPUID_SA1110,    "sa1110" },
474    { ARM_CPUID_PXA255, "pxa255" },
475    { ARM_CPUID_PXA260, "pxa260" },
476    { ARM_CPUID_PXA261, "pxa261" },
477    { ARM_CPUID_PXA262, "pxa262" },
478    { ARM_CPUID_PXA270, "pxa270" },
479    { ARM_CPUID_PXA270_A0, "pxa270-a0" },
480    { ARM_CPUID_PXA270_A1, "pxa270-a1" },
481    { ARM_CPUID_PXA270_B0, "pxa270-b0" },
482    { ARM_CPUID_PXA270_B1, "pxa270-b1" },
483    { ARM_CPUID_PXA270_C0, "pxa270-c0" },
484    { ARM_CPUID_PXA270_C5, "pxa270-c5" },
485    { ARM_CPUID_ANY, "any"},
486    { 0, NULL}
487};
488
489void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
490{
491    int i;
492
493    (*cpu_fprintf)(f, "Available CPUs:\n");
494    for (i = 0; arm_cpu_names[i].name; i++) {
495        (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
496    }
497}
498
499/* return 0 if not found */
500static uint32_t cpu_arm_find_by_name(const char *name)
501{
502    int i;
503    uint32_t id;
504
505    id = 0;
506    for (i = 0; arm_cpu_names[i].name; i++) {
507        if (strcmp(name, arm_cpu_names[i].name) == 0) {
508            id = arm_cpu_names[i].id;
509            break;
510        }
511    }
512    return id;
513}
514
515void cpu_arm_close(CPUARMState *env)
516{
517    free(env);
518}
519
520uint32_t cpsr_read(CPUARMState *env)
521{
522    int ZF;
523    ZF = (env->ZF == 0);
524    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
525        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
526        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
527        | ((env->condexec_bits & 0xfc) << 8)
528        | (env->GE << 16);
529}
530
531void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
532{
533    if (mask & CPSR_NZCV) {
534        env->ZF = (~val) & CPSR_Z;
535        env->NF = val;
536        env->CF = (val >> 29) & 1;
537        env->VF = (val << 3) & 0x80000000;
538    }
539    if (mask & CPSR_Q)
540        env->QF = ((val & CPSR_Q) != 0);
541    if (mask & CPSR_T)
542        env->thumb = ((val & CPSR_T) != 0);
543    if (mask & CPSR_IT_0_1) {
544        env->condexec_bits &= ~3;
545        env->condexec_bits |= (val >> 25) & 3;
546    }
547    if (mask & CPSR_IT_2_7) {
548        env->condexec_bits &= 3;
549        env->condexec_bits |= (val >> 8) & 0xfc;
550    }
551    if (mask & CPSR_GE) {
552        env->GE = (val >> 16) & 0xf;
553    }
554
555    if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
556        switch_mode(env, val & CPSR_M);
557    }
558    mask &= ~CACHED_CPSR_BITS;
559    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
560}
561
562/* Sign/zero extend */
563uint32_t HELPER(sxtb16)(uint32_t x)
564{
565    uint32_t res;
566    res = (uint16_t)(int8_t)x;
567    res |= (uint32_t)(int8_t)(x >> 16) << 16;
568    return res;
569}
570
571uint32_t HELPER(uxtb16)(uint32_t x)
572{
573    uint32_t res;
574    res = (uint16_t)(uint8_t)x;
575    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
576    return res;
577}
578
579uint32_t HELPER(clz)(uint32_t x)
580{
581    return clz32(x);
582}
583
584int32_t HELPER(sdiv)(int32_t num, int32_t den)
585{
586    if (den == 0)
587      return 0;
588    if (num == INT_MIN && den == -1)
589      return INT_MIN;
590    return num / den;
591}
592
593uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
594{
595    if (den == 0)
596      return 0;
597    return num / den;
598}
599
600uint32_t HELPER(rbit)(uint32_t x)
601{
602    x =  ((x & 0xff000000) >> 24)
603       | ((x & 0x00ff0000) >> 8)
604       | ((x & 0x0000ff00) << 8)
605       | ((x & 0x000000ff) << 24);
606    x =  ((x & 0xf0f0f0f0) >> 4)
607       | ((x & 0x0f0f0f0f) << 4);
608    x =  ((x & 0x88888888) >> 3)
609       | ((x & 0x44444444) >> 1)
610       | ((x & 0x22222222) << 1)
611       | ((x & 0x11111111) << 3);
612    return x;
613}
614
615uint32_t HELPER(abs)(uint32_t x)
616{
617    return ((int32_t)x < 0) ? -x : x;
618}
619
620#if defined(CONFIG_USER_ONLY)
621
622void do_interrupt (CPUARMState *env)
623{
624    env->exception_index = -1;
625}
626
627int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
628                              int mmu_idx)
629{
630    if (rw == 2) {
631        env->exception_index = EXCP_PREFETCH_ABORT;
632        env->cp15.c6_insn = address;
633    } else {
634        env->exception_index = EXCP_DATA_ABORT;
635        env->cp15.c6_data = address;
636    }
637    return 1;
638}
639
640/* These should probably raise undefined insn exceptions.  */
641void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
642{
643    cpu_abort(env, "cp15 insn %08x\n", insn);
644}
645
646uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
647{
648    cpu_abort(env, "cp15 insn %08x\n", insn);
649    return 0;
650}
651
652/* These should probably raise undefined insn exceptions.  */
653void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
654{
655    cpu_abort(env, "v7m_mrs %d\n", reg);
656}
657
658uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
659{
660    cpu_abort(env, "v7m_mrs %d\n", reg);
661    return 0;
662}
663
664void switch_mode(CPUARMState *env, int mode)
665{
666    if (mode != ARM_CPU_MODE_USR)
667        cpu_abort(env, "Tried to switch out of user mode\n");
668}
669
670void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
671{
672    cpu_abort(env, "banked r13 write\n");
673}
674
675uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
676{
677    cpu_abort(env, "banked r13 read\n");
678    return 0;
679}
680
681#else
682
683extern int semihosting_enabled;
684
685/* Map CPU modes onto saved register banks.  */
686static inline int bank_number (CPUARMState *env, int mode)
687{
688    switch (mode) {
689    case ARM_CPU_MODE_USR:
690    case ARM_CPU_MODE_SYS:
691        return 0;
692    case ARM_CPU_MODE_SVC:
693        return 1;
694    case ARM_CPU_MODE_ABT:
695        return 2;
696    case ARM_CPU_MODE_UND:
697        return 3;
698    case ARM_CPU_MODE_IRQ:
699        return 4;
700    case ARM_CPU_MODE_FIQ:
701        return 5;
702    case ARM_CPU_MODE_SMC:
703        return 6;
704    }
705    cpu_abort(env, "Bad mode %x\n", mode);
706    return -1;
707}
708
709void switch_mode(CPUARMState *env, int mode)
710{
711    int old_mode;
712    int i;
713
714    old_mode = env->uncached_cpsr & CPSR_M;
715    if (mode == old_mode)
716        return;
717
718    if (old_mode == ARM_CPU_MODE_FIQ) {
719        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
720        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
721    } else if (mode == ARM_CPU_MODE_FIQ) {
722        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
723        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
724    }
725
726    i = bank_number(env, old_mode);
727    env->banked_r13[i] = env->regs[13];
728    env->banked_r14[i] = env->regs[14];
729    env->banked_spsr[i] = env->spsr;
730
731    i = bank_number(env, mode);
732    env->regs[13] = env->banked_r13[i];
733    env->regs[14] = env->banked_r14[i];
734    env->spsr = env->banked_spsr[i];
735}
736
737static void v7m_push(CPUARMState *env, uint32_t val)
738{
739    env->regs[13] -= 4;
740    stl_phys(env->regs[13], val);
741}
742
743static uint32_t v7m_pop(CPUARMState *env)
744{
745    uint32_t val;
746    val = ldl_phys(env->regs[13]);
747    env->regs[13] += 4;
748    return val;
749}
750
751/* Switch to V7M main or process stack pointer.  */
752static void switch_v7m_sp(CPUARMState *env, int process)
753{
754    uint32_t tmp;
755    if (env->v7m.current_sp != process) {
756        tmp = env->v7m.other_sp;
757        env->v7m.other_sp = env->regs[13];
758        env->regs[13] = tmp;
759        env->v7m.current_sp = process;
760    }
761}
762
763static void do_v7m_exception_exit(CPUARMState *env)
764{
765    uint32_t type;
766    uint32_t xpsr;
767
768    type = env->regs[15];
769    if (env->v7m.exception != 0)
770        armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
771
772    /* Switch to the target stack.  */
773    switch_v7m_sp(env, (type & 4) != 0);
774    /* Pop registers.  */
775    env->regs[0] = v7m_pop(env);
776    env->regs[1] = v7m_pop(env);
777    env->regs[2] = v7m_pop(env);
778    env->regs[3] = v7m_pop(env);
779    env->regs[12] = v7m_pop(env);
780    env->regs[14] = v7m_pop(env);
781    env->regs[15] = v7m_pop(env);
782    xpsr = v7m_pop(env);
783    xpsr_write(env, xpsr, 0xfffffdff);
784    /* Undo stack alignment.  */
785    if (xpsr & 0x200)
786        env->regs[13] |= 4;
787    /* ??? The exception return type specifies Thread/Handler mode.  However
788       this is also implied by the xPSR value. Not sure what to do
789       if there is a mismatch.  */
790    /* ??? Likewise for mismatches between the CONTROL register and the stack
791       pointer.  */
792}
793
794static void do_interrupt_v7m(CPUARMState *env)
795{
796    uint32_t xpsr = xpsr_read(env);
797    uint32_t lr;
798    uint32_t addr;
799
800    lr = 0xfffffff1;
801    if (env->v7m.current_sp)
802        lr |= 4;
803    if (env->v7m.exception == 0)
804        lr |= 8;
805
806    /* For exceptions we just mark as pending on the NVIC, and let that
807       handle it.  */
808    /* TODO: Need to escalate if the current priority is higher than the
809       one we're raising.  */
810    switch (env->exception_index) {
811    case EXCP_UDEF:
812        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
813        return;
814    case EXCP_SWI:
815        env->regs[15] += 2;
816        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
817        return;
818    case EXCP_PREFETCH_ABORT:
819    case EXCP_DATA_ABORT:
820        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
821        return;
822    case EXCP_BKPT:
823        if (semihosting_enabled) {
824            int nr;
825            nr = cpu_lduw_code(env, env->regs[15]) & 0xff;
826            if (nr == 0xab) {
827                env->regs[15] += 2;
828                env->regs[0] = do_arm_semihosting(env);
829                return;
830            }
831        }
832        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
833        return;
834    case EXCP_IRQ:
835        env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
836        break;
837    case EXCP_EXCEPTION_EXIT:
838        do_v7m_exception_exit(env);
839        return;
840    default:
841        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
842        return; /* Never happens.  Keep compiler happy.  */
843    }
844
845    /* Align stack pointer.  */
846    /* ??? Should only do this if Configuration Control Register
847       STACKALIGN bit is set.  */
848    if (env->regs[13] & 4) {
849        env->regs[13] -= 4;
850        xpsr |= 0x200;
851    }
852    /* Switch to the handler mode.  */
853    v7m_push(env, xpsr);
854    v7m_push(env, env->regs[15]);
855    v7m_push(env, env->regs[14]);
856    v7m_push(env, env->regs[12]);
857    v7m_push(env, env->regs[3]);
858    v7m_push(env, env->regs[2]);
859    v7m_push(env, env->regs[1]);
860    v7m_push(env, env->regs[0]);
861    switch_v7m_sp(env, 0);
862    env->uncached_cpsr &= ~CPSR_IT;
863    env->regs[14] = lr;
864    addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
865    env->regs[15] = addr & 0xfffffffe;
866    env->thumb = addr & 1;
867}
868
869/* Handle a CPU exception.  */
870void do_interrupt(CPUARMState *env)
871{
872    uint32_t addr;
873    uint32_t mask;
874    int new_mode;
875    uint32_t offset;
876
877    if (IS_M(env)) {
878        do_interrupt_v7m(env);
879        return;
880    }
881    /* TODO: Vectored interrupt controller.  */
882    switch (env->exception_index) {
883    case EXCP_UDEF:
884        new_mode = ARM_CPU_MODE_UND;
885        addr = 0x04;
886        mask = CPSR_I;
887        if (env->thumb)
888            offset = 2;
889        else
890            offset = 4;
891        break;
892    case EXCP_SWI:
893        if (semihosting_enabled) {
894            /* Check for semihosting interrupt.  */
895            if (env->thumb) {
896                mask = cpu_lduw_code(env, env->regs[15] - 2) & 0xff;
897            } else {
898                mask = cpu_ldl_code(env, env->regs[15] - 4) & 0xffffff;
899            }
900            /* Only intercept calls from privileged modes, to provide some
901               semblance of security.  */
902            if (((mask == 0x123456 && !env->thumb)
903                    || (mask == 0xab && env->thumb))
904                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
905                env->regs[0] = do_arm_semihosting(env);
906                return;
907            }
908        }
909        new_mode = ARM_CPU_MODE_SVC;
910        addr = 0x08;
911        mask = CPSR_I;
912        /* The PC already points to the next instruction.  */
913        offset = 0;
914        break;
915    case EXCP_BKPT:
916        /* See if this is a semihosting syscall.  */
917        if (env->thumb && semihosting_enabled) {
918            mask = cpu_lduw_code(env, env->regs[15]) & 0xff;
919            if (mask == 0xab
920                  && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
921                env->regs[15] += 2;
922                env->regs[0] = do_arm_semihosting(env);
923                return;
924            }
925        }
926        /* Fall through to prefetch abort.  */
927    case EXCP_PREFETCH_ABORT:
928        new_mode = ARM_CPU_MODE_ABT;
929        addr = 0x0c;
930        mask = CPSR_A | CPSR_I;
931        offset = 4;
932        break;
933    case EXCP_DATA_ABORT:
934        new_mode = ARM_CPU_MODE_ABT;
935        addr = 0x10;
936        mask = CPSR_A | CPSR_I;
937        offset = 8;
938        break;
939    case EXCP_IRQ:
940        new_mode = ARM_CPU_MODE_IRQ;
941        addr = 0x18;
942        /* Disable IRQ and imprecise data aborts.  */
943        mask = CPSR_A | CPSR_I;
944        offset = 4;
945        break;
946    case EXCP_FIQ:
947        new_mode = ARM_CPU_MODE_FIQ;
948        addr = 0x1c;
949        /* Disable FIQ, IRQ and imprecise data aborts.  */
950        mask = CPSR_A | CPSR_I | CPSR_F;
951        offset = 4;
952        break;
953    case EXCP_SMC:
954        if (semihosting_enabled) {
955            cpu_abort(env, "SMC handling under semihosting not implemented\n");
956            return;
957        }
958        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SMC) {
959            env->cp15.c1_secfg &= ~1;
960        }
961        offset = env->thumb ? 2 : 0;
962        new_mode = ARM_CPU_MODE_SMC;
963        addr = 0x08;
964        mask = CPSR_A | CPSR_I | CPSR_F;
965        break;
966    default:
967        cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
968        return; /* Never happens.  Keep compiler happy.  */
969    }
970    if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
971        if (new_mode == ARM_CPU_MODE_SMC ||
972            (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SMC) {
973            addr += env->cp15.c12_mvbar;
974        } else {
975            if (env->cp15.c1_sys & (1 << 13)) {
976                addr += 0xffff0000;
977            } else {
978                addr += env->cp15.c12_vbar;
979            }
980        }
981    } else {
982    /* High vectors.  */
983    if (env->cp15.c1_sys & (1 << 13)) {
984        addr += 0xffff0000;
985        }
986    }
987    switch_mode (env, new_mode);
988    env->spsr = cpsr_read(env);
989    /* Clear IT bits.  */
990    env->condexec_bits = 0;
991    /* Switch to the new mode, and to the correct instruction set.  */
992    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
993    env->uncached_cpsr |= mask;
994    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
995     * and we should just guard the thumb mode on V4 */
996    if (arm_feature(env, ARM_FEATURE_V4T)) {
997        env->thumb = (env->cp15.c1_sys & (1 << 30)) != 0;
998    }
999    env->regs[14] = env->regs[15] + offset;
1000    env->regs[15] = addr;
1001    ENV_GET_CPU(env)->interrupt_request |= CPU_INTERRUPT_EXITTB;
1002}
1003
1004/* Check section/page access permissions.
1005   Returns the page protection flags, or zero if the access is not
1006   permitted.  */
1007static inline int check_ap(CPUARMState *env, int ap, int domain, int access_type,
1008                           int is_user)
1009{
1010  int prot_ro;
1011
1012  if (domain == 3)
1013    return PAGE_READ | PAGE_WRITE;
1014
1015  if (access_type == 1)
1016      prot_ro = 0;
1017  else
1018      prot_ro = PAGE_READ;
1019
1020  switch (ap) {
1021  case 0:
1022      if (access_type == 1)
1023          return 0;
1024      switch ((env->cp15.c1_sys >> 8) & 3) {
1025      case 1:
1026          return is_user ? 0 : PAGE_READ;
1027      case 2:
1028          return PAGE_READ;
1029      default:
1030          return 0;
1031      }
1032  case 1:
1033      return is_user ? 0 : PAGE_READ | PAGE_WRITE;
1034  case 2:
1035      if (is_user)
1036          return prot_ro;
1037      else
1038          return PAGE_READ | PAGE_WRITE;
1039  case 3:
1040      return PAGE_READ | PAGE_WRITE;
1041  case 4: /* Reserved.  */
1042      return 0;
1043  case 5:
1044      return is_user ? 0 : prot_ro;
1045  case 6:
1046      return prot_ro;
1047  case 7:
1048      if (!arm_feature (env, ARM_FEATURE_V6K))
1049          return 0;
1050      return prot_ro;
1051  default:
1052      abort();
1053  }
1054}
1055
1056static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
1057{
1058    uint32_t table;
1059
1060    if (address & env->cp15.c2_mask)
1061        table = env->cp15.c2_base1 & 0xffffc000;
1062    else
1063        table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
1064
1065    table |= (address >> 18) & 0x3ffc;
1066    return table;
1067}
1068
1069static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
1070			    int is_user, uint32_t *phys_ptr, int *prot,
1071                            target_ulong *page_size)
1072{
1073    int code;
1074    uint32_t table;
1075    uint32_t desc;
1076    int type;
1077    int ap;
1078    int domain;
1079    uint32_t phys_addr;
1080
1081    /* Pagetable walk.  */
1082    /* Lookup l1 descriptor.  */
1083    table = get_level1_table_address(env, address);
1084    desc = ldl_phys(table);
1085    type = (desc & 3);
1086    domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
1087    if (type == 0) {
1088        /* Section translation fault.  */
1089        code = 5;
1090        goto do_fault;
1091    }
1092    if (domain == 0 || domain == 2) {
1093        if (type == 2)
1094            code = 9; /* Section domain fault.  */
1095        else
1096            code = 11; /* Page domain fault.  */
1097        goto do_fault;
1098    }
1099    if (type == 2) {
1100        /* 1Mb section.  */
1101        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1102        ap = (desc >> 10) & 3;
1103        code = 13;
1104        *page_size = 1024 * 1024;
1105    } else {
1106        /* Lookup l2 entry.  */
1107	if (type == 1) {
1108	    /* Coarse pagetable.  */
1109	    table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1110	} else {
1111	    /* Fine pagetable.  */
1112	    table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1113	}
1114        desc = ldl_phys(table);
1115        switch (desc & 3) {
1116        case 0: /* Page translation fault.  */
1117            code = 7;
1118            goto do_fault;
1119        case 1: /* 64k page.  */
1120            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1121            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1122            *page_size = 0x10000;
1123            break;
1124        case 2: /* 4k page.  */
1125            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1126            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1127            *page_size = 0x1000;
1128            break;
1129        case 3: /* 1k page.  */
1130	    if (type == 1) {
1131		if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1132		    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1133		} else {
1134		    /* Page translation fault.  */
1135		    code = 7;
1136		    goto do_fault;
1137		}
1138	    } else {
1139		phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1140	    }
1141            ap = (desc >> 4) & 3;
1142            *page_size = 0x400;
1143            break;
1144        default:
1145            /* Never happens, but compiler isn't smart enough to tell.  */
1146            abort();
1147        }
1148        code = 15;
1149    }
1150    *prot = check_ap(env, ap, domain, access_type, is_user);
1151    if (!*prot) {
1152        /* Access permission fault.  */
1153        goto do_fault;
1154    }
1155    *prot |= PAGE_EXEC;
1156    *phys_ptr = phys_addr;
1157    return 0;
1158do_fault:
1159    return code | (domain << 4);
1160}
1161
1162static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
1163			    int is_user, uint32_t *phys_ptr, int *prot,
1164                            target_ulong *page_size)
1165{
1166    int code;
1167    uint32_t table;
1168    uint32_t desc;
1169    uint32_t xn;
1170    int type;
1171    int ap;
1172    int domain;
1173    uint32_t phys_addr;
1174
1175    /* Pagetable walk.  */
1176    /* Lookup l1 descriptor.  */
1177    table = get_level1_table_address(env, address);
1178    desc = ldl_phys(table);
1179    type = (desc & 3);
1180    if (type == 0 || type == 3) {
1181        /* Section translation fault.  */
1182        code = 5;
1183        domain = 0;
1184        goto do_fault;
1185    } else if (type == 2 && (desc & (1 << 18))) {
1186        /* Supersection.  */
1187        domain = 0;
1188    } else {
1189        /* Section or page.  */
1190        domain = (desc >> 4) & 0x1e;
1191    }
1192    domain = (env->cp15.c3 >> domain) & 3;
1193    if (domain == 0 || domain == 2) {
1194        if (type == 2)
1195            code = 9; /* Section domain fault.  */
1196        else
1197            code = 11; /* Page domain fault.  */
1198        goto do_fault;
1199    }
1200    if (type == 2) {
1201        if (desc & (1 << 18)) {
1202            /* Supersection.  */
1203            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1204            *page_size = 0x1000000;
1205        } else {
1206            /* Section.  */
1207            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1208            *page_size = 0x100000;
1209        }
1210        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1211        xn = desc & (1 << 4);
1212        code = 13;
1213    } else {
1214        /* Lookup l2 entry.  */
1215        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1216        desc = ldl_phys(table);
1217        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1218        switch (desc & 3) {
1219        case 0: /* Page translation fault.  */
1220            code = 7;
1221            goto do_fault;
1222        case 1: /* 64k page.  */
1223            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1224            xn = desc & (1 << 15);
1225            *page_size = 0x10000;
1226            break;
1227        case 2: case 3: /* 4k page.  */
1228            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1229            xn = desc & 1;
1230            *page_size = 0x1000;
1231            break;
1232        default:
1233            /* Never happens, but compiler isn't smart enough to tell.  */
1234            abort();
1235        }
1236        code = 15;
1237    }
1238    if (domain == 3) {
1239        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1240    } else {
1241    if (xn && access_type == 2)
1242        goto do_fault;
1243
1244    /* The simplified model uses AP[0] as an access control bit.  */
1245    if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1246        /* Access flag fault.  */
1247        code = (code == 15) ? 6 : 3;
1248        goto do_fault;
1249    }
1250    *prot = check_ap(env, ap, domain, access_type, is_user);
1251    if (!*prot) {
1252        /* Access permission fault.  */
1253        goto do_fault;
1254        }
1255        if (!xn) {
1256            *prot |= PAGE_EXEC;
1257        }
1258    }
1259    *phys_ptr = phys_addr;
1260    return 0;
1261do_fault:
1262    return code | (domain << 4);
1263}
1264
1265static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type,
1266			     int is_user, uint32_t *phys_ptr, int *prot)
1267{
1268    int n;
1269    uint32_t mask;
1270    uint32_t base;
1271
1272    *phys_ptr = address;
1273    for (n = 7; n >= 0; n--) {
1274	base = env->cp15.c6_region[n];
1275	if ((base & 1) == 0)
1276	    continue;
1277	mask = 1 << ((base >> 1) & 0x1f);
1278	/* Keep this shift separate from the above to avoid an
1279	   (undefined) << 32.  */
1280	mask = (mask << 1) - 1;
1281	if (((base ^ address) & ~mask) == 0)
1282	    break;
1283    }
1284    if (n < 0)
1285	return 2;
1286
1287    if (access_type == 2) {
1288	mask = env->cp15.c5_insn;
1289    } else {
1290	mask = env->cp15.c5_data;
1291    }
1292    mask = (mask >> (n * 4)) & 0xf;
1293    switch (mask) {
1294    case 0:
1295	return 1;
1296    case 1:
1297	if (is_user)
1298	  return 1;
1299	*prot = PAGE_READ | PAGE_WRITE;
1300	break;
1301    case 2:
1302	*prot = PAGE_READ;
1303	if (!is_user)
1304	    *prot |= PAGE_WRITE;
1305	break;
1306    case 3:
1307	*prot = PAGE_READ | PAGE_WRITE;
1308	break;
1309    case 5:
1310	if (is_user)
1311	    return 1;
1312	*prot = PAGE_READ;
1313	break;
1314    case 6:
1315	*prot = PAGE_READ;
1316	break;
1317    default:
1318	/* Bad permission.  */
1319	return 1;
1320    }
1321    *prot |= PAGE_EXEC;
1322    return 0;
1323}
1324
1325#ifdef CONFIG_GLES2
1326int get_phys_addr(CPUARMState *env, uint32_t address,
1327                  int access_type, int is_user,
1328                  uint32_t *phys_ptr, int *prot,
1329                  target_ulong *page_size);
1330#else
1331static
1332#endif
1333int get_phys_addr(CPUARMState *env, uint32_t address,
1334                  int access_type, int is_user,
1335                  uint32_t *phys_ptr, int *prot,
1336                  target_ulong *page_size)
1337{
1338    /* Fast Context Switch Extension.  */
1339    if (address < 0x02000000)
1340        address += env->cp15.c13_fcse;
1341
1342    if ((env->cp15.c1_sys & 1) == 0) {
1343        /* MMU/MPU disabled.  */
1344        *phys_ptr = address;
1345        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1346        *page_size = TARGET_PAGE_SIZE;
1347        return 0;
1348    } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1349        *page_size = TARGET_PAGE_SIZE;
1350    return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1351                             prot);
1352    } else if (env->cp15.c1_sys & (1 << 23)) {
1353        return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1354                                prot, page_size);
1355    } else {
1356        return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1357                                prot, page_size);
1358    }
1359}
1360
1361int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
1362                              int access_type, int mmu_idx)
1363{
1364    uint32_t phys_addr;
1365    target_ulong page_size;
1366    int prot;
1367    int ret, is_user;
1368
1369    is_user = mmu_idx == MMU_USER_IDX;
1370    ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
1371                        &page_size);
1372    if (ret == 0) {
1373        /* Map a single [sub]page.  */
1374        phys_addr &= ~(uint32_t)0x3ff;
1375        address &= ~(uint32_t)0x3ff;
1376        tlb_set_page (env, address, phys_addr, prot | PAGE_EXEC, mmu_idx,
1377                      page_size);
1378        return 0;
1379    }
1380
1381    if (access_type == 2) {
1382        env->cp15.c5_insn = ret;
1383        env->cp15.c6_insn = address;
1384        env->exception_index = EXCP_PREFETCH_ABORT;
1385    } else {
1386        env->cp15.c5_data = ret;
1387        if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1388            env->cp15.c5_data |= (1 << 11);
1389        env->cp15.c6_data = address;
1390        env->exception_index = EXCP_DATA_ABORT;
1391    }
1392    return 1;
1393}
1394
1395hwaddr cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr)
1396{
1397    uint32_t phys_addr;
1398    target_ulong page_size;
1399    int prot;
1400    int ret;
1401
1402    ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
1403
1404    if (ret != 0)
1405        return -1;
1406
1407    return phys_addr;
1408}
1409
1410/* Return basic MPU access permission bits.  */
1411static uint32_t simple_mpu_ap_bits(uint32_t val)
1412{
1413    uint32_t ret;
1414    uint32_t mask;
1415    int i;
1416    ret = 0;
1417    mask = 3;
1418    for (i = 0; i < 16; i += 2) {
1419        ret |= (val >> i) & mask;
1420        mask <<= 2;
1421    }
1422    return ret;
1423}
1424
1425/* Pad basic MPU access permission bits to extended format.  */
1426static uint32_t extended_mpu_ap_bits(uint32_t val)
1427{
1428    uint32_t ret;
1429    uint32_t mask;
1430    int i;
1431    ret = 0;
1432    mask = 3;
1433    for (i = 0; i < 16; i += 2) {
1434        ret |= (val & mask) << i;
1435        mask <<= 2;
1436    }
1437    return ret;
1438}
1439
1440void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
1441{
1442    int op1;
1443    int op2;
1444    int crm;
1445
1446    op1 = (insn >> 21) & 7;
1447    op2 = (insn >> 5) & 7;
1448    crm = insn & 0xf;
1449    switch ((insn >> 16) & 0xf) {
1450    case 0:
1451        /* ID codes.  */
1452        if (arm_feature(env, ARM_FEATURE_XSCALE))
1453            break;
1454        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1455            break;
1456        if (arm_feature(env, ARM_FEATURE_V7)
1457                && op1 == 2 && crm == 0 && op2 == 0) {
1458            env->cp15.c0_cssel = val & 0xf;
1459            break;
1460        }
1461        goto bad_reg;
1462    case 1: /* System configuration.  */
1463        switch (crm) {
1464        case 0:
1465        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1466            op2 = 0;
1467        switch (op2) {
1468        case 0:
1469                if (!arm_feature(env, ARM_FEATURE_XSCALE))
1470                env->cp15.c1_sys = val;
1471            /* ??? Lots of these bits are not implemented.  */
1472            /* This may enable/disable the MMU, so do a TLB flush.  */
1473            tlb_flush(env, 1);
1474            break;
1475            case 1: /* Auxiliary control register.  */
1476            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1477                env->cp15.c1_xscaleauxcr = val;
1478                break;
1479            }
1480            /* Not implemented.  */
1481            break;
1482        case 2:
1483            if (arm_feature(env, ARM_FEATURE_XSCALE))
1484                goto bad_reg;
1485            if (env->cp15.c1_coproc != val) {
1486                env->cp15.c1_coproc = val;
1487                /* ??? Is this safe when called from within a TB?  */
1488                tb_flush(env);
1489                }
1490                break;
1491            default:
1492                goto bad_reg;
1493            }
1494            break;
1495        case 1:
1496            if (!arm_feature(env, ARM_FEATURE_TRUSTZONE)
1497                || (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
1498                goto bad_reg;
1499            switch (op2) {
1500            case 0: /* Secure configuration register. */
1501                if (env->cp15.c1_secfg & 1)
1502                    goto bad_reg;
1503                env->cp15.c1_secfg = val;
1504                break;
1505            case 1: /* Secure debug enable register. */
1506                if (env->cp15.c1_secfg & 1)
1507                    goto bad_reg;
1508                env->cp15.c1_sedbg = val;
1509                break;
1510            case 2: /* Nonsecure access control register. */
1511                if (env->cp15.c1_secfg & 1)
1512                    goto bad_reg;
1513                env->cp15.c1_nseac = val;
1514                break;
1515            default:
1516                goto bad_reg;
1517            }
1518            break;
1519        default:
1520            goto bad_reg;
1521        }
1522        break;
1523    case 2: /* MMU Page table control / MPU cache control.  */
1524        if (arm_feature(env, ARM_FEATURE_MPU)) {
1525            switch (op2) {
1526            case 0:
1527                env->cp15.c2_data = val;
1528                break;
1529            case 1:
1530                env->cp15.c2_insn = val;
1531                break;
1532            default:
1533                goto bad_reg;
1534            }
1535        } else {
1536	    switch (op2) {
1537	    case 0:
1538		env->cp15.c2_base0 = val;
1539		break;
1540	    case 1:
1541		env->cp15.c2_base1 = val;
1542		break;
1543	    case 2:
1544                val &= 7;
1545                env->cp15.c2_control = val;
1546		env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1547                env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1548		break;
1549	    default:
1550		goto bad_reg;
1551	    }
1552        }
1553        break;
1554    case 3: /* MMU Domain access control / MPU write buffer control.  */
1555        env->cp15.c3 = val;
1556        tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1557        break;
1558    case 4: /* Reserved.  */
1559        goto bad_reg;
1560    case 5: /* MMU Fault status / MPU access permission.  */
1561        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1562            op2 = 0;
1563        switch (op2) {
1564        case 0:
1565            if (arm_feature(env, ARM_FEATURE_MPU))
1566                val = extended_mpu_ap_bits(val);
1567            env->cp15.c5_data = val;
1568            break;
1569        case 1:
1570            if (arm_feature(env, ARM_FEATURE_MPU))
1571                val = extended_mpu_ap_bits(val);
1572            env->cp15.c5_insn = val;
1573            break;
1574        case 2:
1575            if (!arm_feature(env, ARM_FEATURE_MPU))
1576                goto bad_reg;
1577            env->cp15.c5_data = val;
1578            break;
1579        case 3:
1580            if (!arm_feature(env, ARM_FEATURE_MPU))
1581                goto bad_reg;
1582            env->cp15.c5_insn = val;
1583            break;
1584        default:
1585            goto bad_reg;
1586        }
1587        break;
1588    case 6: /* MMU Fault address / MPU base/size.  */
1589        if (arm_feature(env, ARM_FEATURE_MPU)) {
1590            if (crm >= 8)
1591                goto bad_reg;
1592            env->cp15.c6_region[crm] = val;
1593        } else {
1594            if (arm_feature(env, ARM_FEATURE_OMAPCP))
1595                op2 = 0;
1596            switch (op2) {
1597            case 0:
1598                env->cp15.c6_data = val;
1599                break;
1600            case 1: /* ??? This is WFAR on armv6 */
1601            case 2:
1602                env->cp15.c6_insn = val;
1603                break;
1604            default:
1605                goto bad_reg;
1606            }
1607        }
1608        break;
1609    case 7: /* Cache control.  */
1610        env->cp15.c15_i_max = 0x000;
1611        env->cp15.c15_i_min = 0xff0;
1612        if (op1 != 0) {
1613            goto bad_reg;
1614        }
1615        /* No cache, so nothing to do except VA->PA translations. */
1616        if (arm_feature(env, ARM_FEATURE_VAPA)) {
1617            switch (crm) {
1618            case 4:
1619                if (arm_feature(env, ARM_FEATURE_V7)) {
1620                    env->cp15.c7_par = val & 0xfffff6ff;
1621                } else {
1622                    env->cp15.c7_par = val & 0xfffff1ff;
1623                }
1624                break;
1625            case 8: {
1626                uint32_t phys_addr;
1627                target_ulong page_size;
1628                int prot;
1629                int ret, is_user = op2 & 2;
1630                int access_type = op2 & 1;
1631
1632                if (op2 & 4) {
1633                    /* Other states are only available with TrustZone */
1634                    goto bad_reg;
1635                }
1636                ret = get_phys_addr(env, val, access_type, is_user,
1637                                    &phys_addr, &prot, &page_size);
1638                if (ret == 0) {
1639                    /* We do not set any attribute bits in the PAR */
1640                    if (page_size == (1 << 24)
1641                        && arm_feature(env, ARM_FEATURE_V7)) {
1642                        env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1643                    } else {
1644                        env->cp15.c7_par = phys_addr & 0xfffff000;
1645                    }
1646                } else {
1647                    env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
1648                                       ((ret & (12 << 1)) >> 6) |
1649                                       ((ret & 0xf) << 1) | 1;
1650                }
1651                break;
1652            }
1653            }
1654        }
1655        break;
1656    case 8: /* MMU TLB control.  */
1657        switch (op2) {
1658        case 0: /* Invalidate all.  */
1659            tlb_flush(env, 0);
1660            break;
1661        case 1: /* Invalidate single TLB entry.  */
1662            tlb_flush_page(env, val & TARGET_PAGE_MASK);
1663            break;
1664        case 2: /* Invalidate on ASID.  */
1665            tlb_flush(env, val == 0);
1666            break;
1667        case 3: /* Invalidate single entry on MVA.  */
1668            /* ??? This is like case 1, but ignores ASID.  */
1669            tlb_flush(env, 1);
1670            break;
1671        default:
1672            goto bad_reg;
1673        }
1674        break;
1675    case 9:
1676        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1677            break;
1678        if (arm_feature(env, ARM_FEATURE_STRONGARM))
1679            break; /* Ignore ReadBuffer access */
1680        switch (crm) {
1681        case 0: /* Cache lockdown.  */
1682	    switch (op1) {
1683	    case 0: /* L1 cache.  */
1684		switch (op2) {
1685		case 0:
1686		    env->cp15.c9_data = val;
1687		    break;
1688		case 1:
1689		    env->cp15.c9_insn = val;
1690		    break;
1691		default:
1692		    goto bad_reg;
1693		}
1694		break;
1695	    case 1: /* L2 cache.  */
1696                switch (op2) {
1697                case 0: /* L2 cache lockdown */
1698                case 2: /* L2 cache auxiliary control */
1699                    /* ignore */
1700                    break;
1701                default:
1702                    goto bad_reg;
1703                }
1704		break;
1705	    default:
1706		goto bad_reg;
1707	    }
1708	    break;
1709        case 1: /* TCM memory region registers.  */
1710        case 2:
1711            /* Not implemented.  */
1712            goto bad_reg;
1713        case 12: /* Performance monitor control */
1714            /* Performance monitors are implementation defined in v7,
1715             * but with an ARM recommended set of registers, which we
1716             * follow (although we don't actually implement any counters)
1717             */
1718            if (!arm_feature(env, ARM_FEATURE_V7)) {
1719                goto bad_reg;
1720            }
1721            switch (op2) {
1722            case 0: /* performance monitor control register */
1723                /* only the DP, X, D and E bits are writable */
1724                env->cp15.c9_pmcr &= ~0x39;
1725                env->cp15.c9_pmcr |= (val & 0x39);
1726                break;
1727            case 1: /* Count enable set register */
1728                val &= (1 << 31);
1729                env->cp15.c9_pmcnten |= val;
1730                break;
1731            case 2: /* Count enable clear */
1732                val &= (1 << 31);
1733                env->cp15.c9_pmcnten &= ~val;
1734                break;
1735            case 3: /* Overflow flag status */
1736                env->cp15.c9_pmovsr &= ~val;
1737                break;
1738            case 4: /* Software increment */
1739                /* RAZ/WI since we don't implement the software-count event */
1740                break;
1741            case 5: /* Event counter selection register */
1742                /* Since we don't implement any events, writing to this register
1743                 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1744                 */
1745                break;
1746            default:
1747                goto bad_reg;
1748            }
1749            break;
1750        case 13: /* Performance counters */
1751            if (!arm_feature(env, ARM_FEATURE_V7)) {
1752                goto bad_reg;
1753            }
1754            switch (op2) {
1755            case 0: /* Cycle count register: not implemented, so RAZ/WI */
1756                break;
1757            case 1: /* Event type select */
1758                env->cp15.c9_pmxevtyper = val & 0xff;
1759                break;
1760            case 2: /* Event count register */
1761                /* Unimplemented (we have no events), RAZ/WI */
1762                break;
1763            default:
1764                goto bad_reg;
1765            }
1766            break;
1767        case 14: /* Performance monitor control */
1768            if (!arm_feature(env, ARM_FEATURE_V7)) {
1769                goto bad_reg;
1770            }
1771            switch (op2) {
1772            case 0: /* user enable */
1773                env->cp15.c9_pmuserenr = val & 1;
1774                /* changes access rights for cp registers, so flush tbs */
1775                tb_flush(env);
1776                break;
1777            case 1: /* interrupt enable set */
1778                /* We have no event counters so only the C bit can be changed */
1779                val &= (1 << 31);
1780                env->cp15.c9_pminten |= val;
1781                break;
1782            case 2: /* interrupt enable clear */
1783                val &= (1 << 31);
1784                env->cp15.c9_pminten &= ~val;
1785                break;
1786            }
1787            break;
1788        default:
1789            goto bad_reg;
1790        }
1791        break;
1792    case 10: /* MMU TLB lockdown.  */
1793        /* ??? TLB lockdown not implemented.  */
1794        break;
1795    case 12: /* Reserved.  */
1796        if (!op1 && !crm) {
1797            switch (op2) {
1798            case 0:
1799                if (!arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
1800                    goto bad_reg;
1801                }
1802                env->cp15.c12_vbar = val & ~0x1f;
1803                break;
1804            case 1:
1805                if (!arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
1806                    goto bad_reg;
1807                }
1808                if (!(env->cp15.c1_secfg & 1)) {
1809                    env->cp15.c12_mvbar = val & ~0x1f;
1810                }
1811                break;
1812            default:
1813                goto bad_reg;
1814            }
1815            break;
1816        }
1817        goto bad_reg;
1818    case 13: /* Process ID.  */
1819        switch (op2) {
1820        case 0:
1821            /* Unlike real hardware the qemu TLB uses virtual addresses,
1822               not modified virtual addresses, so this causes a TLB flush.
1823             */
1824            if (env->cp15.c13_fcse != val)
1825              tlb_flush(env, 1);
1826            env->cp15.c13_fcse = val;
1827            break;
1828        case 1:
1829            /* This changes the ASID, so do a TLB flush.  */
1830            if (env->cp15.c13_context != val
1831                && !arm_feature(env, ARM_FEATURE_MPU))
1832              tlb_flush(env, 0);
1833            env->cp15.c13_context = val;
1834            break;
1835        default:
1836            goto bad_reg;
1837        }
1838        break;
1839    case 14: /* Reserved.  */
1840        goto bad_reg;
1841    case 15: /* Implementation specific.  */
1842        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1843            if (op2 == 0 && crm == 1) {
1844                if (env->cp15.c15_cpar != (val & 0x3fff)) {
1845                    /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1846                    tb_flush(env);
1847                    env->cp15.c15_cpar = val & 0x3fff;
1848                }
1849                break;
1850            }
1851            goto bad_reg;
1852        }
1853        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1854            switch (crm) {
1855            case 0:
1856                break;
1857            case 1: /* Set TI925T configuration.  */
1858                env->cp15.c15_ticonfig = val & 0xe7;
1859                env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1860                        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1861                break;
1862            case 2: /* Set I_max.  */
1863                env->cp15.c15_i_max = val;
1864                break;
1865            case 3: /* Set I_min.  */
1866                env->cp15.c15_i_min = val;
1867                break;
1868            case 4: /* Set thread-ID.  */
1869                env->cp15.c15_threadid = val & 0xffff;
1870                break;
1871            case 8: /* Wait-for-interrupt (deprecated).  */
1872                cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_HALT);
1873                break;
1874            default:
1875                goto bad_reg;
1876            }
1877        }
1878        break;
1879    }
1880    return;
1881bad_reg:
1882    /* ??? For debugging only.  Should raise illegal instruction exception.  */
1883    cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1884              (insn >> 16) & 0xf, crm, op1, op2);
1885}
1886
1887uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
1888{
1889    int op1;
1890    int op2;
1891    int crm;
1892
1893    op1 = (insn >> 21) & 7;
1894    op2 = (insn >> 5) & 7;
1895    crm = insn & 0xf;
1896    switch ((insn >> 16) & 0xf) {
1897    case 0: /* ID codes.  */
1898        switch (op1) {
1899        case 0:
1900            switch (crm) {
1901            case 0:
1902                switch (op2) {
1903                case 0: /* Device ID.  */
1904                    return env->cp15.c0_cpuid;
1905                case 1: /* Cache Type.  */
1906		    return env->cp15.c0_cachetype;
1907                case 2: /* TCM status.  */
1908                    return 0;
1909                case 3: /* TLB type register.  */
1910                    return 0; /* No lockable TLB entries.  */
1911                case 5: /* MPIDR */
1912                    /* The MPIDR was standardised in v7; prior to
1913                     * this it was implemented only in the 11MPCore.
1914                     * For all other pre-v7 cores it does not exist.
1915                     */
1916                    if (arm_feature(env, ARM_FEATURE_V7) ||
1917                        ARM_CPUID(env) == ARM_CPUID_ARM11MPCORE) {
1918                        int mpidr = ENV_GET_CPU(env)->cpu_index;
1919                        /* We don't support setting cluster ID ([8..11])
1920                         * so these bits always RAZ.
1921                         */
1922                        if (arm_feature(env, ARM_FEATURE_V7MP)) {
1923                            mpidr |= (1 << 31);
1924                            /* Cores which are uniprocessor (non-coherent)
1925                             * but still implement the MP extensions set
1926                             * bit 30. (For instance, A9UP.) However we do
1927                             * not currently model any of those cores.
1928                             */
1929                        }
1930                        return mpidr;
1931                    }
1932                    /* otherwise fall through to the unimplemented-reg case */
1933                default:
1934                    goto bad_reg;
1935                }
1936            case 1:
1937                if (!arm_feature(env, ARM_FEATURE_V6))
1938                    goto bad_reg;
1939                return env->cp15.c0_c1[op2];
1940            case 2:
1941                if (!arm_feature(env, ARM_FEATURE_V6))
1942                    goto bad_reg;
1943                return env->cp15.c0_c2[op2];
1944            case 3: case 4: case 5: case 6: case 7:
1945                return 0;
1946            default:
1947                goto bad_reg;
1948            }
1949            break;
1950        case 1:
1951            /* These registers aren't documented on arm11 cores.  However
1952               Linux looks at them anyway.  */
1953            if (!arm_feature(env, ARM_FEATURE_V6))
1954                goto bad_reg;
1955            if (crm != 0)
1956                goto bad_reg;
1957            if (!arm_feature(env, ARM_FEATURE_V7))
1958                return 0;
1959
1960            switch (op2) {
1961            case 0:
1962                return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1963            case 1:
1964                return env->cp15.c0_clid;
1965            case 7:
1966                return 0;
1967            }
1968            goto bad_reg;
1969        case 2:
1970            if (op2 != 0 || crm != 0)
1971                goto bad_reg;
1972            return env->cp15.c0_cssel;
1973        default:
1974            goto bad_reg;
1975        }
1976        break;
1977    case 1: /* System configuration.  */
1978        switch (crm) {
1979        case 0:
1980        if (arm_feature(env, ARM_FEATURE_OMAPCP))
1981            op2 = 0;
1982        switch (op2) {
1983        case 0: /* Control register.  */
1984            return env->cp15.c1_sys;
1985        case 1: /* Auxiliary control register.  */
1986            if (arm_feature(env, ARM_FEATURE_XSCALE))
1987                return env->cp15.c1_xscaleauxcr;
1988            if (!arm_feature(env, ARM_FEATURE_AUXCR))
1989                goto bad_reg;
1990            switch (ARM_CPUID(env)) {
1991            case ARM_CPUID_ARM1026:
1992                return 1;
1993            case ARM_CPUID_ARM1136:
1994            case ARM_CPUID_ARM1136_R2:
1995                return 7;
1996            case ARM_CPUID_ARM11MPCORE:
1997                return 1;
1998            case ARM_CPUID_CORTEXA8:
1999                case ARM_CPUID_CORTEXA8_R2:
2000                return 2;
2001                case ARM_CPUID_CORTEXA9:
2002                    return 0;
2003            default:
2004                goto bad_reg;
2005            }
2006                break;
2007        case 2: /* Coprocessor access register.  */
2008            if (arm_feature(env, ARM_FEATURE_XSCALE))
2009                goto bad_reg;
2010            return env->cp15.c1_coproc;
2011        default:
2012            goto bad_reg;
2013        }
2014            break;
2015        case 1:
2016            if (!arm_feature(env, ARM_FEATURE_TRUSTZONE)
2017                || (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
2018                goto bad_reg;
2019            switch (op2) {
2020            case 0: /* Secure configuration register. */
2021                if (env->cp15.c1_secfg & 1)
2022                    goto bad_reg;
2023                return env->cp15.c1_secfg;
2024            case 1: /* Secure debug enable register. */
2025                if (env->cp15.c1_secfg & 1)
2026                    goto bad_reg;
2027                return env->cp15.c1_sedbg;
2028            case 2: /* Nonsecure access control register. */
2029                return env->cp15.c1_nseac;
2030            default:
2031                goto bad_reg;
2032            }
2033            break;
2034        default:
2035            goto bad_reg;
2036        }
2037        break;
2038    case 2: /* MMU Page table control / MPU cache control.  */
2039        if (arm_feature(env, ARM_FEATURE_MPU)) {
2040            switch (op2) {
2041            case 0:
2042                return env->cp15.c2_data;
2043                break;
2044            case 1:
2045                return env->cp15.c2_insn;
2046                break;
2047            default:
2048                goto bad_reg;
2049            }
2050        } else {
2051	    switch (op2) {
2052	    case 0:
2053		return env->cp15.c2_base0;
2054	    case 1:
2055		return env->cp15.c2_base1;
2056	    case 2:
2057                return env->cp15.c2_control;
2058	    default:
2059		goto bad_reg;
2060	    }
2061	}
2062    case 3: /* MMU Domain access control / MPU write buffer control.  */
2063        return env->cp15.c3;
2064    case 4: /* Reserved.  */
2065        goto bad_reg;
2066    case 5: /* MMU Fault status / MPU access permission.  */
2067        if (arm_feature(env, ARM_FEATURE_OMAPCP))
2068            op2 = 0;
2069        switch (op2) {
2070        case 0:
2071            if (arm_feature(env, ARM_FEATURE_MPU))
2072                return simple_mpu_ap_bits(env->cp15.c5_data);
2073            return env->cp15.c5_data;
2074        case 1:
2075            if (arm_feature(env, ARM_FEATURE_MPU))
2076                return simple_mpu_ap_bits(env->cp15.c5_data);
2077            return env->cp15.c5_insn;
2078        case 2:
2079            if (!arm_feature(env, ARM_FEATURE_MPU))
2080                goto bad_reg;
2081            return env->cp15.c5_data;
2082        case 3:
2083            if (!arm_feature(env, ARM_FEATURE_MPU))
2084                goto bad_reg;
2085            return env->cp15.c5_insn;
2086        default:
2087            goto bad_reg;
2088        }
2089    case 6: /* MMU Fault address.  */
2090        if (arm_feature(env, ARM_FEATURE_MPU)) {
2091            if (crm >= 8)
2092                goto bad_reg;
2093            return env->cp15.c6_region[crm];
2094        } else {
2095            if (arm_feature(env, ARM_FEATURE_OMAPCP))
2096                op2 = 0;
2097	    switch (op2) {
2098	    case 0:
2099		return env->cp15.c6_data;
2100	    case 1:
2101		if (arm_feature(env, ARM_FEATURE_V6)) {
2102		    /* Watchpoint Fault Adrress.  */
2103		    return 0; /* Not implemented.  */
2104                }
2105		    /* Instruction Fault Adrress.  */
2106		    /* Arm9 doesn't have an IFAR, but implementing it anyway
2107		       shouldn't do any harm.  */
2108		    return env->cp15.c6_insn;
2109	    case 2:
2110		if (arm_feature(env, ARM_FEATURE_V6)) {
2111		    /* Instruction Fault Adrress.  */
2112		    return env->cp15.c6_insn;
2113		}
2114                goto bad_reg;
2115	    default:
2116		goto bad_reg;
2117	    }
2118        }
2119    case 7: /* Cache control.  */
2120        if (crm == 4 && op1 == 0 && op2 == 0) {
2121            return env->cp15.c7_par;
2122        }
2123        if (((insn >> 12) & 0xf) == 0xf) /* clear ZF only if destination is r15 */
2124        env->ZF = 0;
2125        return 0;
2126    case 8: /* MMU TLB control.  */
2127        goto bad_reg;
2128    case 9:
2129        switch (crm) {
2130        case 0: /* Cache lockdown */
2131            switch (op1) {
2132            case 0: /* L1 cache.  */
2133                if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2134                    return 0;
2135                }
2136                switch (op2) {
2137                case 0:
2138                    return env->cp15.c9_data;
2139                case 1:
2140                    return env->cp15.c9_insn;
2141                default:
2142                    goto bad_reg;
2143                }
2144            case 1: /* L2 cache */
2145                if (crm != 0) {
2146                    goto bad_reg;
2147                }
2148                /* L2 Lockdown and Auxiliary control.  */
2149                return 0;
2150            default:
2151                goto bad_reg;
2152            }
2153            break;
2154        case 12: /* Performance monitor control */
2155            if (!arm_feature(env, ARM_FEATURE_V7)) {
2156                goto bad_reg;
2157            }
2158            switch (op2) {
2159            case 0: /* performance monitor control register */
2160                return env->cp15.c9_pmcr;
2161            case 1: /* count enable set */
2162            case 2: /* count enable clear */
2163                return env->cp15.c9_pmcnten;
2164            case 3: /* overflow flag status */
2165                return env->cp15.c9_pmovsr;
2166            case 4: /* software increment */
2167            case 5: /* event counter selection register */
2168                return 0; /* Unimplemented, RAZ/WI */
2169            default:
2170                goto bad_reg;
2171            }
2172        case 13: /* Performance counters */
2173            if (!arm_feature(env, ARM_FEATURE_V7)) {
2174                goto bad_reg;
2175            }
2176            switch (op2) {
2177            case 1: /* Event type select */
2178                return env->cp15.c9_pmxevtyper;
2179            case 0: /* Cycle count register */
2180            case 2: /* Event count register */
2181                /* Unimplemented, so RAZ/WI */
2182                return 0;
2183            default:
2184                goto bad_reg;
2185            }
2186        case 14: /* Performance monitor control */
2187            if (!arm_feature(env, ARM_FEATURE_V7)) {
2188                goto bad_reg;
2189            }
2190            switch (op2) {
2191            case 0: /* user enable */
2192                return env->cp15.c9_pmuserenr;
2193            case 1: /* interrupt enable set */
2194            case 2: /* interrupt enable clear */
2195                return env->cp15.c9_pminten;
2196            default:
2197                goto bad_reg;
2198            }
2199        default:
2200            goto bad_reg;
2201        }
2202        break;
2203    case 10: /* MMU TLB lockdown.  */
2204        /* ??? TLB lockdown not implemented.  */
2205        return 0;
2206    case 11: /* TCM DMA control.  */
2207    case 12: /* Reserved.  */
2208        if (!op1 && !crm) {
2209            switch (op2) {
2210            case 0: /* secure or nonsecure vector base address */
2211                if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
2212                    return env->cp15.c12_vbar;
2213                }
2214                break;
2215            case 1: /* monitor vector base address */
2216                if (arm_feature(env, ARM_FEATURE_TRUSTZONE)) {
2217                    return env->cp15.c12_mvbar;
2218                }
2219                break;
2220            default:
2221                break;
2222            }
2223        }
2224        goto bad_reg;
2225    case 13: /* Process ID.  */
2226        switch (op2) {
2227        case 0:
2228            return env->cp15.c13_fcse;
2229        case 1:
2230            return env->cp15.c13_context;
2231        default:
2232            goto bad_reg;
2233        }
2234    case 14: /* Reserved.  */
2235        goto bad_reg;
2236    case 15: /* Implementation specific.  */
2237        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2238            if (op2 == 0 && crm == 1)
2239                return env->cp15.c15_cpar;
2240
2241            goto bad_reg;
2242        }
2243        if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2244            switch (crm) {
2245            case 0:
2246                return 0;
2247            case 1: /* Read TI925T configuration.  */
2248                return env->cp15.c15_ticonfig;
2249            case 2: /* Read I_max.  */
2250                return env->cp15.c15_i_max;
2251            case 3: /* Read I_min.  */
2252                return env->cp15.c15_i_min;
2253            case 4: /* Read thread-ID.  */
2254                return env->cp15.c15_threadid;
2255            case 8: /* TI925T_status */
2256                return 0;
2257            }
2258            /* TODO: Peripheral port remap register:
2259             * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
2260             * controller base address at $rn & ~0xfff and map size of
2261             * 0x200 << ($rn & 0xfff), when MMU is off.  */
2262            goto bad_reg;
2263        }
2264        return 0;
2265    }
2266bad_reg:
2267    /* ??? For debugging only.  Should raise illegal instruction exception.  */
2268    cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
2269              (insn >> 16) & 0xf, crm, op1, op2);
2270    return 0;
2271}
2272
2273void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
2274{
2275    if ((env->uncached_cpsr & CPSR_M) == mode) {
2276        env->regs[13] = val;
2277    } else {
2278    env->banked_r13[bank_number(env, mode)] = val;
2279}
2280}
2281
2282uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
2283{
2284    if ((env->uncached_cpsr & CPSR_M) == mode) {
2285        return env->regs[13];
2286    } else {
2287    return env->banked_r13[bank_number(env, mode)];
2288    }
2289}
2290
2291uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2292{
2293    switch (reg) {
2294    case 0: /* APSR */
2295        return xpsr_read(env) & 0xf8000000;
2296    case 1: /* IAPSR */
2297        return xpsr_read(env) & 0xf80001ff;
2298    case 2: /* EAPSR */
2299        return xpsr_read(env) & 0xff00fc00;
2300    case 3: /* xPSR */
2301        return xpsr_read(env) & 0xff00fdff;
2302    case 5: /* IPSR */
2303        return xpsr_read(env) & 0x000001ff;
2304    case 6: /* EPSR */
2305        return xpsr_read(env) & 0x0700fc00;
2306    case 7: /* IEPSR */
2307        return xpsr_read(env) & 0x0700edff;
2308    case 8: /* MSP */
2309        return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
2310    case 9: /* PSP */
2311        return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
2312    case 16: /* PRIMASK */
2313        return (env->uncached_cpsr & CPSR_I) != 0;
2314    case 17: /* BASEPRI */
2315    case 18: /* BASEPRI_MAX */
2316        return env->v7m.basepri;
2317    case 19: /* FAULTMASK */
2318        return (env->uncached_cpsr & CPSR_F) != 0;
2319    case 20: /* CONTROL */
2320        return env->v7m.control;
2321    default:
2322        /* ??? For debugging only.  */
2323        cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
2324        return 0;
2325    }
2326}
2327
2328void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2329{
2330    switch (reg) {
2331    case 0: /* APSR */
2332        xpsr_write(env, val, 0xf8000000);
2333        break;
2334    case 1: /* IAPSR */
2335        xpsr_write(env, val, 0xf8000000);
2336        break;
2337    case 2: /* EAPSR */
2338        xpsr_write(env, val, 0xfe00fc00);
2339        break;
2340    case 3: /* xPSR */
2341        xpsr_write(env, val, 0xfe00fc00);
2342        break;
2343    case 5: /* IPSR */
2344        /* IPSR bits are readonly.  */
2345        break;
2346    case 6: /* EPSR */
2347        xpsr_write(env, val, 0x0600fc00);
2348        break;
2349    case 7: /* IEPSR */
2350        xpsr_write(env, val, 0x0600fc00);
2351        break;
2352    case 8: /* MSP */
2353        if (env->v7m.current_sp)
2354            env->v7m.other_sp = val;
2355        else
2356            env->regs[13] = val;
2357        break;
2358    case 9: /* PSP */
2359        if (env->v7m.current_sp)
2360            env->regs[13] = val;
2361        else
2362            env->v7m.other_sp = val;
2363        break;
2364    case 16: /* PRIMASK */
2365        if (val & 1)
2366            env->uncached_cpsr |= CPSR_I;
2367        else
2368            env->uncached_cpsr &= ~CPSR_I;
2369        break;
2370    case 17: /* BASEPRI */
2371        env->v7m.basepri = val & 0xff;
2372        break;
2373    case 18: /* BASEPRI_MAX */
2374        val &= 0xff;
2375        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
2376            env->v7m.basepri = val;
2377        break;
2378    case 19: /* FAULTMASK */
2379        if (val & 1)
2380            env->uncached_cpsr |= CPSR_F;
2381        else
2382            env->uncached_cpsr &= ~CPSR_F;
2383        break;
2384    case 20: /* CONTROL */
2385        env->v7m.control = val & 3;
2386        switch_v7m_sp(env, (val & 2) != 0);
2387        break;
2388    default:
2389        /* ??? For debugging only.  */
2390        cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2391        return;
2392    }
2393}
2394
2395void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2396                ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2397                void *opaque)
2398{
2399    if (cpnum < 0 || cpnum > 14) {
2400        cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2401        return;
2402    }
2403
2404    env->cp[cpnum].cp_read = cp_read;
2405    env->cp[cpnum].cp_write = cp_write;
2406    env->cp[cpnum].opaque = opaque;
2407}
2408
2409#endif
2410
2411/* Note that signed overflow is undefined in C.  The following routines are
2412   careful to use unsigned types where modulo arithmetic is required.
2413   Failure to do so _will_ break on newer gcc.  */
2414
2415/* Signed saturating arithmetic.  */
2416
2417/* Perform 16-bit signed saturating addition.  */
2418static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2419{
2420    uint16_t res;
2421
2422    res = a + b;
2423    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2424        if (a & 0x8000)
2425            res = 0x8000;
2426        else
2427            res = 0x7fff;
2428    }
2429    return res;
2430}
2431
2432/* Perform 8-bit signed saturating addition.  */
2433static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2434{
2435    uint8_t res;
2436
2437    res = a + b;
2438    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2439        if (a & 0x80)
2440            res = 0x80;
2441        else
2442            res = 0x7f;
2443    }
2444    return res;
2445}
2446
2447/* Perform 16-bit signed saturating subtraction.  */
2448static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2449{
2450    uint16_t res;
2451
2452    res = a - b;
2453    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2454        if (a & 0x8000)
2455            res = 0x8000;
2456        else
2457            res = 0x7fff;
2458    }
2459    return res;
2460}
2461
2462/* Perform 8-bit signed saturating subtraction.  */
2463static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2464{
2465    uint8_t res;
2466
2467    res = a - b;
2468    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2469        if (a & 0x80)
2470            res = 0x80;
2471        else
2472            res = 0x7f;
2473    }
2474    return res;
2475}
2476
2477#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2478#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2479#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2480#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2481#define PFX q
2482
2483#include "op_addsub.h"
2484
2485/* Unsigned saturating arithmetic.  */
2486static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2487{
2488    uint16_t res;
2489    res = a + b;
2490    if (res < a)
2491        res = 0xffff;
2492    return res;
2493}
2494
2495static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2496{
2497    if (a > b)
2498        return a - b;
2499    else
2500        return 0;
2501}
2502
2503static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2504{
2505    uint8_t res;
2506    res = a + b;
2507    if (res < a)
2508        res = 0xff;
2509    return res;
2510}
2511
2512static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2513{
2514    if (a > b)
2515        return a - b;
2516    else
2517        return 0;
2518}
2519
2520#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2521#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2522#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2523#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2524#define PFX uq
2525
2526#include "op_addsub.h"
2527
2528/* Signed modulo arithmetic.  */
2529#define SARITH16(a, b, n, op) do { \
2530    int32_t sum; \
2531    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2532    RESULT(sum, n, 16); \
2533    if (sum >= 0) \
2534        ge |= 3 << (n * 2); \
2535    } while(0)
2536
2537#define SARITH8(a, b, n, op) do { \
2538    int32_t sum; \
2539    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2540    RESULT(sum, n, 8); \
2541    if (sum >= 0) \
2542        ge |= 1 << n; \
2543    } while(0)
2544
2545
2546#define ADD16(a, b, n) SARITH16(a, b, n, +)
2547#define SUB16(a, b, n) SARITH16(a, b, n, -)
2548#define ADD8(a, b, n)  SARITH8(a, b, n, +)
2549#define SUB8(a, b, n)  SARITH8(a, b, n, -)
2550#define PFX s
2551#define ARITH_GE
2552
2553#include "op_addsub.h"
2554
2555/* Unsigned modulo arithmetic.  */
2556#define ADD16(a, b, n) do { \
2557    uint32_t sum; \
2558    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2559    RESULT(sum, n, 16); \
2560    if ((sum >> 16) == 1) \
2561        ge |= 3 << (n * 2); \
2562    } while(0)
2563
2564#define ADD8(a, b, n) do { \
2565    uint32_t sum; \
2566    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2567    RESULT(sum, n, 8); \
2568    if ((sum >> 8) == 1) \
2569        ge |= 1 << n; \
2570    } while(0)
2571
2572#define SUB16(a, b, n) do { \
2573    uint32_t sum; \
2574    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2575    RESULT(sum, n, 16); \
2576    if ((sum >> 16) == 0) \
2577        ge |= 3 << (n * 2); \
2578    } while(0)
2579
2580#define SUB8(a, b, n) do { \
2581    uint32_t sum; \
2582    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2583    RESULT(sum, n, 8); \
2584    if ((sum >> 8) == 0) \
2585        ge |= 1 << n; \
2586    } while(0)
2587
2588#define PFX u
2589#define ARITH_GE
2590
2591#include "op_addsub.h"
2592
2593/* Halved signed arithmetic.  */
2594#define ADD16(a, b, n) \
2595  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2596#define SUB16(a, b, n) \
2597  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2598#define ADD8(a, b, n) \
2599  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2600#define SUB8(a, b, n) \
2601  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2602#define PFX sh
2603
2604#include "op_addsub.h"
2605
2606/* Halved unsigned arithmetic.  */
2607#define ADD16(a, b, n) \
2608  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2609#define SUB16(a, b, n) \
2610  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2611#define ADD8(a, b, n) \
2612  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2613#define SUB8(a, b, n) \
2614  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2615#define PFX uh
2616
2617#include "op_addsub.h"
2618
2619static inline uint8_t do_usad(uint8_t a, uint8_t b)
2620{
2621    if (a > b)
2622        return a - b;
2623    else
2624        return b - a;
2625}
2626
2627/* Unsigned sum of absolute byte differences.  */
2628uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2629{
2630    uint32_t sum;
2631    sum = do_usad(a, b);
2632    sum += do_usad(a >> 8, b >> 8);
2633    sum += do_usad(a >> 16, b >>16);
2634    sum += do_usad(a >> 24, b >> 24);
2635    return sum;
2636}
2637
2638/* For ARMv6 SEL instruction.  */
2639uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2640{
2641    uint32_t mask;
2642
2643    mask = 0;
2644    if (flags & 1)
2645        mask |= 0xff;
2646    if (flags & 2)
2647        mask |= 0xff00;
2648    if (flags & 4)
2649        mask |= 0xff0000;
2650    if (flags & 8)
2651        mask |= 0xff000000;
2652    return (a & mask) | (b & ~mask);
2653}
2654
2655uint32_t HELPER(logicq_cc)(uint64_t val)
2656{
2657    return (val >> 32) | (val != 0);
2658}
2659
2660/* VFP support.  We follow the convention used for VFP instrunctions:
2661   Single precition routines have a "s" suffix, double precision a
2662   "d" suffix.  */
2663
2664/* Convert host exception flags to vfp form.  */
2665static inline int vfp_exceptbits_from_host(int host_bits)
2666{
2667    int target_bits = 0;
2668
2669    if (host_bits & float_flag_invalid)
2670        target_bits |= 1;
2671    if (host_bits & float_flag_divbyzero)
2672        target_bits |= 2;
2673    if (host_bits & float_flag_overflow)
2674        target_bits |= 4;
2675    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
2676        target_bits |= 8;
2677    if (host_bits & float_flag_inexact)
2678        target_bits |= 0x10;
2679    if (host_bits & float_flag_input_denormal)
2680        target_bits |= 0x80;
2681    return target_bits;
2682}
2683
2684uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
2685{
2686    int i;
2687    uint32_t fpscr;
2688
2689    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2690            | (env->vfp.vec_len << 16)
2691            | (env->vfp.vec_stride << 20);
2692    i = get_float_exception_flags(&env->vfp.fp_status);
2693    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
2694    fpscr |= vfp_exceptbits_from_host(i);
2695    return fpscr;
2696}
2697
2698uint32_t vfp_get_fpscr(CPUARMState *env)
2699{
2700    return HELPER(vfp_get_fpscr)(env);
2701}
2702
2703/* Convert vfp exception flags to target form.  */
2704static inline int vfp_exceptbits_to_host(int target_bits)
2705{
2706    int host_bits = 0;
2707
2708    if (target_bits & 1)
2709        host_bits |= float_flag_invalid;
2710    if (target_bits & 2)
2711        host_bits |= float_flag_divbyzero;
2712    if (target_bits & 4)
2713        host_bits |= float_flag_overflow;
2714    if (target_bits & 8)
2715        host_bits |= float_flag_underflow;
2716    if (target_bits & 0x10)
2717        host_bits |= float_flag_inexact;
2718    if (target_bits & 0x80)
2719        host_bits |= float_flag_input_denormal;
2720    return host_bits;
2721}
2722
2723void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
2724{
2725    int i;
2726    uint32_t changed;
2727
2728    changed = env->vfp.xregs[ARM_VFP_FPSCR];
2729    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2730    env->vfp.vec_len = (val >> 16) & 7;
2731    env->vfp.vec_stride = (val >> 20) & 3;
2732
2733    changed ^= val;
2734    if (changed & (3 << 22)) {
2735        i = (val >> 22) & 3;
2736        switch (i) {
2737        case 0:
2738            i = float_round_nearest_even;
2739            break;
2740        case 1:
2741            i = float_round_up;
2742            break;
2743        case 2:
2744            i = float_round_down;
2745            break;
2746        case 3:
2747            i = float_round_to_zero;
2748            break;
2749        }
2750        set_float_rounding_mode(i, &env->vfp.fp_status);
2751    }
2752    if (changed & (1 << 24)) {
2753        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2754        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2755    }
2756    if (changed & (1 << 25))
2757        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2758
2759    i = vfp_exceptbits_to_host(val);
2760    set_float_exception_flags(i, &env->vfp.fp_status);
2761    set_float_exception_flags(0, &env->vfp.standard_fp_status);
2762}
2763
2764void vfp_set_fpscr(CPUARMState *env, uint32_t val)
2765{
2766    HELPER(vfp_set_fpscr)(env, val);
2767}
2768
2769#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2770
2771#define VFP_BINOP(name) \
2772float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2773{ \
2774    float_status *fpst = fpstp; \
2775    return float32_ ## name (a, b, fpst); \
2776} \
2777float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2778{ \
2779    float_status *fpst = fpstp; \
2780    return float64_ ## name (a, b, fpst); \
2781}
2782VFP_BINOP(add)
2783VFP_BINOP(sub)
2784VFP_BINOP(mul)
2785VFP_BINOP(div)
2786#undef VFP_BINOP
2787
2788float32 VFP_HELPER(neg, s)(float32 a)
2789{
2790    return float32_chs(a);
2791}
2792
2793float64 VFP_HELPER(neg, d)(float64 a)
2794{
2795    return float64_chs(a);
2796}
2797
2798float32 VFP_HELPER(abs, s)(float32 a)
2799{
2800    return float32_abs(a);
2801}
2802
2803float64 VFP_HELPER(abs, d)(float64 a)
2804{
2805    return float64_abs(a);
2806}
2807
2808float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
2809{
2810    return float32_sqrt(a, &env->vfp.fp_status);
2811}
2812
2813float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
2814{
2815    return float64_sqrt(a, &env->vfp.fp_status);
2816}
2817
2818/* XXX: check quiet/signaling case */
2819#define DO_VFP_cmp(p, type) \
2820void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
2821{ \
2822    uint32_t flags; \
2823    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2824    case 0: flags = 0x6; break; \
2825    case -1: flags = 0x8; break; \
2826    case 1: flags = 0x2; break; \
2827    default: case 2: flags = 0x3; break; \
2828    } \
2829    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2830        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2831} \
2832void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
2833{ \
2834    uint32_t flags; \
2835    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2836    case 0: flags = 0x6; break; \
2837    case -1: flags = 0x8; break; \
2838    case 1: flags = 0x2; break; \
2839    default: case 2: flags = 0x3; break; \
2840    } \
2841    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2842        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2843}
2844DO_VFP_cmp(s, float32)
2845DO_VFP_cmp(d, float64)
2846#undef DO_VFP_cmp
2847
2848/* Integer to float and float to integer conversions */
2849
2850#define CONV_ITOF(name, fsz, sign) \
2851    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2852{ \
2853    float_status *fpst = fpstp; \
2854    return sign##int32_to_##float##fsz(x, fpst); \
2855}
2856
2857#define CONV_FTOI(name, fsz, sign, round) \
2858uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2859{ \
2860    float_status *fpst = fpstp; \
2861    if (float##fsz##_is_any_nan(x)) { \
2862        float_raise(float_flag_invalid, fpst); \
2863        return 0; \
2864    } \
2865    return float##fsz##_to_##sign##int32##round(x, fpst); \
2866}
2867
2868#define FLOAT_CONVS(name, p, fsz, sign) \
2869CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2870CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2871CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2872
2873FLOAT_CONVS(si, s, 32, )
2874FLOAT_CONVS(si, d, 64, )
2875FLOAT_CONVS(ui, s, 32, u)
2876FLOAT_CONVS(ui, d, 64, u)
2877
2878#undef CONV_ITOF
2879#undef CONV_FTOI
2880#undef FLOAT_CONVS
2881
2882/* floating point conversion */
2883float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
2884{
2885    float64 r = float32_to_float64(x, &env->vfp.fp_status);
2886    /* ARM requires that S<->D conversion of any kind of NaN generates
2887     * a quiet NaN by forcing the most significant frac bit to 1.
2888     */
2889    return float64_maybe_silence_nan(r);
2890}
2891
2892float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
2893{
2894    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
2895    /* ARM requires that S<->D conversion of any kind of NaN generates
2896     * a quiet NaN by forcing the most significant frac bit to 1.
2897     */
2898    return float32_maybe_silence_nan(r);
2899}
2900
2901/* VFP3 fixed point conversion.  */
2902#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2903float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t  x, uint32_t shift, \
2904                                    void *fpstp) \
2905{ \
2906    float_status *fpst = fpstp; \
2907    float##fsz tmp; \
2908    tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2909    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2910} \
2911uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2912                                       void *fpstp) \
2913{ \
2914    float_status *fpst = fpstp; \
2915    float##fsz tmp; \
2916    if (float##fsz##_is_any_nan(x)) { \
2917        float_raise(float_flag_invalid, fpst); \
2918        return 0; \
2919    } \
2920    tmp = float##fsz##_scalbn(x, shift, fpst); \
2921    return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2922}
2923
2924VFP_CONV_FIX(sh, d, 64, int16, )
2925VFP_CONV_FIX(sl, d, 64, int32, )
2926VFP_CONV_FIX(uh, d, 64, uint16, u)
2927VFP_CONV_FIX(ul, d, 64, uint32, u)
2928VFP_CONV_FIX(sh, s, 32, int16, )
2929VFP_CONV_FIX(sl, s, 32, int32, )
2930VFP_CONV_FIX(uh, s, 32, uint16, u)
2931VFP_CONV_FIX(ul, s, 32, uint32, u)
2932#undef VFP_CONV_FIX
2933
2934/* Half precision conversions.  */
2935static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
2936{
2937    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2938    float32 r = float16_to_float32(make_float16(a), ieee, s);
2939    if (ieee) {
2940        return float32_maybe_silence_nan(r);
2941    }
2942    return r;
2943}
2944
2945static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
2946{
2947    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
2948    float16 r = float32_to_float16(a, ieee, s);
2949    if (ieee) {
2950        r = float16_maybe_silence_nan(r);
2951    }
2952    return float16_val(r);
2953}
2954
2955float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2956{
2957    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
2958}
2959
2960uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2961{
2962    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
2963}
2964
2965float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
2966{
2967    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
2968}
2969
2970uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
2971{
2972    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
2973}
2974
2975#define float32_two make_float32(0x40000000)
2976#define float32_three make_float32(0x40400000)
2977#define float32_one_point_five make_float32(0x3fc00000)
2978
2979float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
2980{
2981    float_status *s = &env->vfp.standard_fp_status;
2982    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2983        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2984        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2985            float_raise(float_flag_input_denormal, s);
2986        }
2987        return float32_two;
2988    }
2989    return float32_sub(float32_two, float32_mul(a, b, s), s);
2990}
2991
2992float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
2993{
2994    float_status *s = &env->vfp.standard_fp_status;
2995    float32 product;
2996    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
2997        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
2998        if (!(float32_is_zero(a) || float32_is_zero(b))) {
2999            float_raise(float_flag_input_denormal, s);
3000        }
3001        return float32_one_point_five;
3002    }
3003    product = float32_mul(a, b, s);
3004    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
3005}
3006
3007/* NEON helpers.  */
3008
3009/* Constants 256 and 512 are used in some helpers; we avoid relying on
3010 * int->float conversions at run-time.  */
3011#define float64_256 make_float64(0x4070000000000000LL)
3012#define float64_512 make_float64(0x4080000000000000LL)
3013
3014/* The algorithm that must be used to calculate the estimate
3015 * is specified by the ARM ARM.
3016 */
3017static float64 recip_estimate(float64 a, CPUARMState *env)
3018{
3019    /* These calculations mustn't set any fp exception flags,
3020     * so we use a local copy of the fp_status.
3021     */
3022    float_status dummy_status = env->vfp.standard_fp_status;
3023    float_status *s = &dummy_status;
3024    /* q = (int)(a * 512.0) */
3025    float64 q = float64_mul(float64_512, a, s);
3026    int64_t q_int = float64_to_int64_round_to_zero(q, s);
3027
3028    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
3029    q = int64_to_float64(q_int, s);
3030    q = float64_add(q, float64_half, s);
3031    q = float64_div(q, float64_512, s);
3032    q = float64_div(float64_one, q, s);
3033
3034    /* s = (int)(256.0 * r + 0.5) */
3035    q = float64_mul(q, float64_256, s);
3036    q = float64_add(q, float64_half, s);
3037    q_int = float64_to_int64_round_to_zero(q, s);
3038
3039    /* return (double)s / 256.0 */
3040    return float64_div(int64_to_float64(q_int, s), float64_256, s);
3041}
3042
3043float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
3044{
3045    float_status *s = &env->vfp.standard_fp_status;
3046    float64 f64;
3047    uint32_t val32 = float32_val(a);
3048
3049    int result_exp;
3050    int a_exp = (val32  & 0x7f800000) >> 23;
3051    int sign = val32 & 0x80000000;
3052
3053    if (float32_is_any_nan(a)) {
3054        if (float32_is_signaling_nan(a)) {
3055            float_raise(float_flag_invalid, s);
3056        }
3057        return float32_default_nan;
3058    } else if (float32_is_infinity(a)) {
3059        return float32_set_sign(float32_zero, float32_is_neg(a));
3060    } else if (float32_is_zero_or_denormal(a)) {
3061        if (!float32_is_zero(a)) {
3062            float_raise(float_flag_input_denormal, s);
3063        }
3064        float_raise(float_flag_divbyzero, s);
3065        return float32_set_sign(float32_infinity, float32_is_neg(a));
3066    } else if (a_exp >= 253) {
3067        float_raise(float_flag_underflow, s);
3068        return float32_set_sign(float32_zero, float32_is_neg(a));
3069    }
3070
3071    f64 = make_float64((0x3feULL << 52)
3072                       | ((int64_t)(val32 & 0x7fffff) << 29));
3073
3074    result_exp = 253 - a_exp;
3075
3076    f64 = recip_estimate(f64, env);
3077
3078    val32 = sign
3079        | ((result_exp & 0xff) << 23)
3080        | ((float64_val(f64) >> 29) & 0x7fffff);
3081    return make_float32(val32);
3082}
3083
3084/* The algorithm that must be used to calculate the estimate
3085 * is specified by the ARM ARM.
3086 */
3087static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
3088{
3089    /* These calculations mustn't set any fp exception flags,
3090     * so we use a local copy of the fp_status.
3091     */
3092    float_status dummy_status = env->vfp.standard_fp_status;
3093    float_status *s = &dummy_status;
3094    float64 q;
3095    int64_t q_int;
3096
3097    if (float64_lt(a, float64_half, s)) {
3098        /* range 0.25 <= a < 0.5 */
3099
3100        /* a in units of 1/512 rounded down */
3101        /* q0 = (int)(a * 512.0);  */
3102        q = float64_mul(float64_512, a, s);
3103        q_int = float64_to_int64_round_to_zero(q, s);
3104
3105        /* reciprocal root r */
3106        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
3107        q = int64_to_float64(q_int, s);
3108        q = float64_add(q, float64_half, s);
3109        q = float64_div(q, float64_512, s);
3110        q = float64_sqrt(q, s);
3111        q = float64_div(float64_one, q, s);
3112    } else {
3113        /* range 0.5 <= a < 1.0 */
3114
3115        /* a in units of 1/256 rounded down */
3116        /* q1 = (int)(a * 256.0); */
3117        q = float64_mul(float64_256, a, s);
3118        int64_t q_int = float64_to_int64_round_to_zero(q, s);
3119
3120        /* reciprocal root r */
3121        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
3122        q = int64_to_float64(q_int, s);
3123        q = float64_add(q, float64_half, s);
3124        q = float64_div(q, float64_256, s);
3125        q = float64_sqrt(q, s);
3126        q = float64_div(float64_one, q, s);
3127    }
3128    /* r in units of 1/256 rounded to nearest */
3129    /* s = (int)(256.0 * r + 0.5); */
3130
3131    q = float64_mul(q, float64_256,s );
3132    q = float64_add(q, float64_half, s);
3133    q_int = float64_to_int64_round_to_zero(q, s);
3134
3135    /* return (double)s / 256.0;*/
3136    return float64_div(int64_to_float64(q_int, s), float64_256, s);
3137}
3138
3139float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
3140{
3141    float_status *s = &env->vfp.standard_fp_status;
3142    int result_exp;
3143    float64 f64;
3144    uint32_t val;
3145    uint64_t val64;
3146
3147    val = float32_val(a);
3148
3149    if (float32_is_any_nan(a)) {
3150        if (float32_is_signaling_nan(a)) {
3151            float_raise(float_flag_invalid, s);
3152        }
3153        return float32_default_nan;
3154    } else if (float32_is_zero_or_denormal(a)) {
3155        if (!float32_is_zero(a)) {
3156            float_raise(float_flag_input_denormal, s);
3157        }
3158        float_raise(float_flag_divbyzero, s);
3159        return float32_set_sign(float32_infinity, float32_is_neg(a));
3160    } else if (float32_is_neg(a)) {
3161        float_raise(float_flag_invalid, s);
3162        return float32_default_nan;
3163    } else if (float32_is_infinity(a)) {
3164        return float32_zero;
3165    }
3166
3167    /* Normalize to a double-precision value between 0.25 and 1.0,
3168     * preserving the parity of the exponent.  */
3169    if ((val & 0x800000) == 0) {
3170        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3171                           | (0x3feULL << 52)
3172                           | ((uint64_t)(val & 0x7fffff) << 29));
3173    } else {
3174        f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
3175                           | (0x3fdULL << 52)
3176                           | ((uint64_t)(val & 0x7fffff) << 29));
3177    }
3178
3179    result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
3180
3181    f64 = recip_sqrt_estimate(f64, env);
3182
3183    val64 = float64_val(f64);
3184
3185    val = ((val64 >> 63)  & 0x80000000)
3186        | ((result_exp & 0xff) << 23)
3187        | ((val64 >> 29)  & 0x7fffff);
3188    return make_float32(val);
3189}
3190
3191uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
3192{
3193    float64 f64;
3194
3195    if ((a & 0x80000000) == 0) {
3196        return 0xffffffff;
3197    }
3198
3199    f64 = make_float64((0x3feULL << 52)
3200                       | ((int64_t)(a & 0x7fffffff) << 21));
3201
3202    f64 = recip_estimate (f64, env);
3203
3204    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3205}
3206
3207uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
3208{
3209    float64 f64;
3210
3211    if ((a & 0xc0000000) == 0) {
3212        return 0xffffffff;
3213    }
3214
3215    if (a & 0x80000000) {
3216        f64 = make_float64((0x3feULL << 52)
3217                           | ((uint64_t)(a & 0x7fffffff) << 21));
3218    } else { /* bits 31-30 == '01' */
3219        f64 = make_float64((0x3fdULL << 52)
3220                           | ((uint64_t)(a & 0x3fffffff) << 22));
3221    }
3222
3223    f64 = recip_sqrt_estimate(f64, env);
3224
3225    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
3226}
3227
3228void HELPER(set_teecr)(CPUARMState *env, uint32_t val)
3229{
3230    val &= 1;
3231    if (env->teecr != val) {
3232        env->teecr = val;
3233        tb_flush(env);
3234    }
3235}
3236