helper.c revision 288208c386810fef725aa448a9f46bd2772bec8c
1#include <stdio.h> 2#include <stdlib.h> 3#include <string.h> 4 5#include "cpu.h" 6#include "exec-all.h" 7#include "gdbstub.h" 8#include "helpers.h" 9#include "qemu-common.h" 10#ifdef CONFIG_TRACE 11#include "trace.h" 12#endif 13 14static uint32_t cortexa8_cp15_c0_c1[8] = 15{ 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 }; 16 17static uint32_t cortexa8_cp15_c0_c2[8] = 18{ 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 }; 19 20static uint32_t mpcore_cp15_c0_c1[8] = 21{ 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 }; 22 23static uint32_t mpcore_cp15_c0_c2[8] = 24{ 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 }; 25 26static uint32_t arm1136_cp15_c0_c1[8] = 27{ 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 }; 28 29static uint32_t arm1136_cp15_c0_c2[8] = 30{ 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 }; 31 32static uint32_t cpu_arm_find_by_name(const char *name); 33 34static inline void set_feature(CPUARMState *env, int feature) 35{ 36 env->features |= 1u << feature; 37} 38 39static void cpu_reset_model_id(CPUARMState *env, uint32_t id) 40{ 41 env->cp15.c0_cpuid = id; 42 switch (id) { 43 case ARM_CPUID_ARM926: 44 set_feature(env, ARM_FEATURE_VFP); 45 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090; 46 env->cp15.c0_cachetype = 0x1dd20d2; 47 env->cp15.c1_sys = 0x00090078; 48 break; 49 case ARM_CPUID_ARM946: 50 set_feature(env, ARM_FEATURE_MPU); 51 env->cp15.c0_cachetype = 0x0f004006; 52 env->cp15.c1_sys = 0x00000078; 53 break; 54 case ARM_CPUID_ARM1026: 55 set_feature(env, ARM_FEATURE_VFP); 56 set_feature(env, ARM_FEATURE_AUXCR); 57 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0; 58 env->cp15.c0_cachetype = 0x1dd20d2; 59 env->cp15.c1_sys = 0x00090078; 60 break; 61 case ARM_CPUID_ARM1136_R2: 62 case ARM_CPUID_ARM1136: 63 set_feature(env, ARM_FEATURE_V6); 64 set_feature(env, ARM_FEATURE_VFP); 65 set_feature(env, ARM_FEATURE_AUXCR); 66 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4; 67 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111; 68 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000; 69 memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t)); 70 memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t)); 71 env->cp15.c0_cachetype = 0x1dd20d2; 72 break; 73 case ARM_CPUID_ARM11MPCORE: 74 set_feature(env, ARM_FEATURE_V6); 75 set_feature(env, ARM_FEATURE_V6K); 76 set_feature(env, ARM_FEATURE_VFP); 77 set_feature(env, ARM_FEATURE_AUXCR); 78 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4; 79 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111; 80 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000; 81 memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t)); 82 memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t)); 83 env->cp15.c0_cachetype = 0x1dd20d2; 84 break; 85 case ARM_CPUID_CORTEXA8: 86 set_feature(env, ARM_FEATURE_V6); 87 set_feature(env, ARM_FEATURE_V6K); 88 set_feature(env, ARM_FEATURE_V7); 89 set_feature(env, ARM_FEATURE_AUXCR); 90 set_feature(env, ARM_FEATURE_THUMB2); 91 set_feature(env, ARM_FEATURE_VFP); 92 set_feature(env, ARM_FEATURE_VFP3); 93 set_feature(env, ARM_FEATURE_NEON); 94 set_feature(env, ARM_FEATURE_THUMB2EE); 95 env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0; 96 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222; 97 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100; 98 memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t)); 99 memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t)); 100 env->cp15.c0_cachetype = 0x82048004; 101 env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3; 102 env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */ 103 env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */ 104 env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */ 105 break; 106 case ARM_CPUID_CORTEXM3: 107 set_feature(env, ARM_FEATURE_V6); 108 set_feature(env, ARM_FEATURE_THUMB2); 109 set_feature(env, ARM_FEATURE_V7); 110 set_feature(env, ARM_FEATURE_M); 111 set_feature(env, ARM_FEATURE_DIV); 112 break; 113 case ARM_CPUID_ANY: /* For userspace emulation. */ 114 set_feature(env, ARM_FEATURE_V6); 115 set_feature(env, ARM_FEATURE_V6K); 116 set_feature(env, ARM_FEATURE_V7); 117 set_feature(env, ARM_FEATURE_THUMB2); 118 set_feature(env, ARM_FEATURE_VFP); 119 set_feature(env, ARM_FEATURE_VFP3); 120 set_feature(env, ARM_FEATURE_NEON); 121 set_feature(env, ARM_FEATURE_THUMB2EE); 122 set_feature(env, ARM_FEATURE_DIV); 123 break; 124 case ARM_CPUID_TI915T: 125 case ARM_CPUID_TI925T: 126 set_feature(env, ARM_FEATURE_OMAPCP); 127 env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */ 128 env->cp15.c0_cachetype = 0x5109149; 129 env->cp15.c1_sys = 0x00000070; 130 env->cp15.c15_i_max = 0x000; 131 env->cp15.c15_i_min = 0xff0; 132 break; 133 case ARM_CPUID_PXA250: 134 case ARM_CPUID_PXA255: 135 case ARM_CPUID_PXA260: 136 case ARM_CPUID_PXA261: 137 case ARM_CPUID_PXA262: 138 set_feature(env, ARM_FEATURE_XSCALE); 139 /* JTAG_ID is ((id << 28) | 0x09265013) */ 140 env->cp15.c0_cachetype = 0xd172172; 141 env->cp15.c1_sys = 0x00000078; 142 break; 143 case ARM_CPUID_PXA270_A0: 144 case ARM_CPUID_PXA270_A1: 145 case ARM_CPUID_PXA270_B0: 146 case ARM_CPUID_PXA270_B1: 147 case ARM_CPUID_PXA270_C0: 148 case ARM_CPUID_PXA270_C5: 149 set_feature(env, ARM_FEATURE_XSCALE); 150 /* JTAG_ID is ((id << 28) | 0x09265013) */ 151 set_feature(env, ARM_FEATURE_IWMMXT); 152 env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; 153 env->cp15.c0_cachetype = 0xd172172; 154 env->cp15.c1_sys = 0x00000078; 155 break; 156 default: 157 cpu_abort(env, "Bad CPU ID: %x\n", id); 158 break; 159 } 160} 161 162void cpu_reset(CPUARMState *env) 163{ 164 uint32_t id; 165 166 if (qemu_loglevel_mask(CPU_LOG_RESET)) { 167 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index); 168 log_cpu_state(env, 0); 169 } 170 171 id = env->cp15.c0_cpuid; 172 memset(env, 0, offsetof(CPUARMState, breakpoints)); 173 if (id) 174 cpu_reset_model_id(env, id); 175#if defined (CONFIG_USER_ONLY) 176 env->uncached_cpsr = ARM_CPU_MODE_USR; 177 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30; 178#else 179 /* SVC mode with interrupts disabled. */ 180 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I; 181 /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is 182 clear at reset. */ 183 if (IS_M(env)) 184 env->uncached_cpsr &= ~CPSR_I; 185 env->vfp.xregs[ARM_VFP_FPEXC] = 0; 186 env->cp15.c2_base_mask = 0xffffc000u; 187#endif 188 env->regs[15] = 0; 189 tlb_flush(env, 1); 190} 191 192static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg) 193{ 194 int nregs; 195 196 /* VFP data registers are always little-endian. */ 197 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 198 if (reg < nregs) { 199 stfq_le_p(buf, env->vfp.regs[reg]); 200 return 8; 201 } 202 if (arm_feature(env, ARM_FEATURE_NEON)) { 203 /* Aliases for Q regs. */ 204 nregs += 16; 205 if (reg < nregs) { 206 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]); 207 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]); 208 return 16; 209 } 210 } 211 switch (reg - nregs) { 212 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 213 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4; 214 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 215 } 216 return 0; 217} 218 219static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg) 220{ 221 int nregs; 222 223 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 224 if (reg < nregs) { 225 env->vfp.regs[reg] = ldfq_le_p(buf); 226 return 8; 227 } 228 if (arm_feature(env, ARM_FEATURE_NEON)) { 229 nregs += 16; 230 if (reg < nregs) { 231 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf); 232 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8); 233 return 16; 234 } 235 } 236 switch (reg - nregs) { 237 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 238 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4; 239 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf); return 4; 240 } 241 return 0; 242} 243 244CPUARMState *cpu_arm_init(const char *cpu_model) 245{ 246 CPUARMState *env; 247 uint32_t id; 248 static int inited = 0; 249 250 id = cpu_arm_find_by_name(cpu_model); 251 if (id == 0) 252 return NULL; 253 env = qemu_mallocz(sizeof(CPUARMState)); 254 cpu_exec_init(env); 255 if (!inited) { 256 inited = 1; 257 arm_translate_init(); 258 } 259 260 env->cpu_model_str = cpu_model; 261 env->cp15.c0_cpuid = id; 262 cpu_reset(env); 263 if (arm_feature(env, ARM_FEATURE_NEON)) { 264 gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 265 51, "arm-neon.xml", 0); 266 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 267 gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 268 35, "arm-vfp3.xml", 0); 269 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 270 gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg, 271 19, "arm-vfp.xml", 0); 272 } 273 qemu_init_vcpu(env); 274 return env; 275} 276 277struct arm_cpu_t { 278 uint32_t id; 279 const char *name; 280}; 281 282static const struct arm_cpu_t arm_cpu_names[] = { 283 { ARM_CPUID_ARM926, "arm926"}, 284 { ARM_CPUID_ARM946, "arm946"}, 285 { ARM_CPUID_ARM1026, "arm1026"}, 286 { ARM_CPUID_ARM1136, "arm1136"}, 287 { ARM_CPUID_ARM1136_R2, "arm1136-r2"}, 288 { ARM_CPUID_ARM11MPCORE, "arm11mpcore"}, 289 { ARM_CPUID_CORTEXM3, "cortex-m3"}, 290 { ARM_CPUID_CORTEXA8, "cortex-a8"}, 291 { ARM_CPUID_TI925T, "ti925t" }, 292 { ARM_CPUID_PXA250, "pxa250" }, 293 { ARM_CPUID_PXA255, "pxa255" }, 294 { ARM_CPUID_PXA260, "pxa260" }, 295 { ARM_CPUID_PXA261, "pxa261" }, 296 { ARM_CPUID_PXA262, "pxa262" }, 297 { ARM_CPUID_PXA270, "pxa270" }, 298 { ARM_CPUID_PXA270_A0, "pxa270-a0" }, 299 { ARM_CPUID_PXA270_A1, "pxa270-a1" }, 300 { ARM_CPUID_PXA270_B0, "pxa270-b0" }, 301 { ARM_CPUID_PXA270_B1, "pxa270-b1" }, 302 { ARM_CPUID_PXA270_C0, "pxa270-c0" }, 303 { ARM_CPUID_PXA270_C5, "pxa270-c5" }, 304 { ARM_CPUID_ANY, "any"}, 305 { 0, NULL} 306}; 307 308void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) 309{ 310 int i; 311 312 (*cpu_fprintf)(f, "Available CPUs:\n"); 313 for (i = 0; arm_cpu_names[i].name; i++) { 314 (*cpu_fprintf)(f, " %s\n", arm_cpu_names[i].name); 315 } 316} 317 318/* return 0 if not found */ 319static uint32_t cpu_arm_find_by_name(const char *name) 320{ 321 int i; 322 uint32_t id; 323 324 id = 0; 325 for (i = 0; arm_cpu_names[i].name; i++) { 326 if (strcmp(name, arm_cpu_names[i].name) == 0) { 327 id = arm_cpu_names[i].id; 328 break; 329 } 330 } 331 return id; 332} 333 334void cpu_arm_close(CPUARMState *env) 335{ 336 free(env); 337} 338 339uint32_t cpsr_read(CPUARMState *env) 340{ 341 int ZF; 342 ZF = (env->ZF == 0); 343 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 344 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 345 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 346 | ((env->condexec_bits & 0xfc) << 8) 347 | (env->GE << 16); 348} 349 350void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) 351{ 352 if (mask & CPSR_NZCV) { 353 env->ZF = (~val) & CPSR_Z; 354 env->NF = val; 355 env->CF = (val >> 29) & 1; 356 env->VF = (val << 3) & 0x80000000; 357 } 358 if (mask & CPSR_Q) 359 env->QF = ((val & CPSR_Q) != 0); 360 if (mask & CPSR_T) 361 env->thumb = ((val & CPSR_T) != 0); 362 if (mask & CPSR_IT_0_1) { 363 env->condexec_bits &= ~3; 364 env->condexec_bits |= (val >> 25) & 3; 365 } 366 if (mask & CPSR_IT_2_7) { 367 env->condexec_bits &= 3; 368 env->condexec_bits |= (val >> 8) & 0xfc; 369 } 370 if (mask & CPSR_GE) { 371 env->GE = (val >> 16) & 0xf; 372 } 373 374 if ((env->uncached_cpsr ^ val) & mask & CPSR_M) { 375 switch_mode(env, val & CPSR_M); 376 } 377 mask &= ~CACHED_CPSR_BITS; 378 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 379} 380 381/* Sign/zero extend */ 382uint32_t HELPER(sxtb16)(uint32_t x) 383{ 384 uint32_t res; 385 res = (uint16_t)(int8_t)x; 386 res |= (uint32_t)(int8_t)(x >> 16) << 16; 387 return res; 388} 389 390uint32_t HELPER(uxtb16)(uint32_t x) 391{ 392 uint32_t res; 393 res = (uint16_t)(uint8_t)x; 394 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 395 return res; 396} 397 398uint32_t HELPER(clz)(uint32_t x) 399{ 400 int count; 401 for (count = 32; x; count--) 402 x >>= 1; 403 return count; 404} 405 406int32_t HELPER(sdiv)(int32_t num, int32_t den) 407{ 408 if (den == 0) 409 return 0; 410 return num / den; 411} 412 413uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 414{ 415 if (den == 0) 416 return 0; 417 return num / den; 418} 419 420uint32_t HELPER(rbit)(uint32_t x) 421{ 422 x = ((x & 0xff000000) >> 24) 423 | ((x & 0x00ff0000) >> 8) 424 | ((x & 0x0000ff00) << 8) 425 | ((x & 0x000000ff) << 24); 426 x = ((x & 0xf0f0f0f0) >> 4) 427 | ((x & 0x0f0f0f0f) << 4); 428 x = ((x & 0x88888888) >> 3) 429 | ((x & 0x44444444) >> 1) 430 | ((x & 0x22222222) << 1) 431 | ((x & 0x11111111) << 3); 432 return x; 433} 434 435uint32_t HELPER(abs)(uint32_t x) 436{ 437 return ((int32_t)x < 0) ? -x : x; 438} 439 440#if defined(CONFIG_USER_ONLY) 441 442void do_interrupt (CPUState *env) 443{ 444 env->exception_index = -1; 445} 446 447/* Structure used to record exclusive memory locations. */ 448typedef struct mmon_state { 449 struct mmon_state *next; 450 CPUARMState *cpu_env; 451 uint32_t addr; 452} mmon_state; 453 454/* Chain of current locks. */ 455static mmon_state* mmon_head = NULL; 456 457int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw, 458 int mmu_idx, int is_softmmu) 459{ 460 if (rw == 2) { 461 env->exception_index = EXCP_PREFETCH_ABORT; 462 env->cp15.c6_insn = address; 463 } else { 464 env->exception_index = EXCP_DATA_ABORT; 465 env->cp15.c6_data = address; 466 } 467 return 1; 468} 469 470static void allocate_mmon_state(CPUState *env) 471{ 472 env->mmon_entry = malloc(sizeof (mmon_state)); 473 memset (env->mmon_entry, 0, sizeof (mmon_state)); 474 env->mmon_entry->cpu_env = env; 475 mmon_head = env->mmon_entry; 476} 477 478/* Flush any monitor locks for the specified address. */ 479static void flush_mmon(uint32_t addr) 480{ 481 mmon_state *mon; 482 483 for (mon = mmon_head; mon; mon = mon->next) 484 { 485 if (mon->addr != addr) 486 continue; 487 488 mon->addr = 0; 489 break; 490 } 491} 492 493/* Mark an address for exclusive access. */ 494void HELPER(mark_exclusive)(CPUState *env, uint32_t addr) 495{ 496 if (!env->mmon_entry) 497 allocate_mmon_state(env); 498 /* Clear any previous locks. */ 499 flush_mmon(addr); 500 env->mmon_entry->addr = addr; 501} 502 503/* Test if an exclusive address is still exclusive. Returns zero 504 if the address is still exclusive. */ 505uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr) 506{ 507 int res; 508 509 if (!env->mmon_entry) 510 return 1; 511 if (env->mmon_entry->addr == addr) 512 res = 0; 513 else 514 res = 1; 515 flush_mmon(addr); 516 return res; 517} 518 519void HELPER(clrex)(CPUState *env) 520{ 521 if (!(env->mmon_entry && env->mmon_entry->addr)) 522 return; 523 flush_mmon(env->mmon_entry->addr); 524} 525 526target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) 527{ 528 return addr; 529} 530 531/* These should probably raise undefined insn exceptions. */ 532void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val) 533{ 534 int op1 = (insn >> 8) & 0xf; 535 cpu_abort(env, "cp%i insn %08x\n", op1, insn); 536 return; 537} 538 539uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn) 540{ 541 int op1 = (insn >> 8) & 0xf; 542 cpu_abort(env, "cp%i insn %08x\n", op1, insn); 543 return 0; 544} 545 546void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val) 547{ 548 cpu_abort(env, "cp15 insn %08x\n", insn); 549} 550 551uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn) 552{ 553 cpu_abort(env, "cp15 insn %08x\n", insn); 554 return 0; 555} 556 557/* These should probably raise undefined insn exceptions. */ 558void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val) 559{ 560 cpu_abort(env, "v7m_mrs %d\n", reg); 561} 562 563uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg) 564{ 565 cpu_abort(env, "v7m_mrs %d\n", reg); 566 return 0; 567} 568 569void switch_mode(CPUState *env, int mode) 570{ 571 if (mode != ARM_CPU_MODE_USR) 572 cpu_abort(env, "Tried to switch out of user mode\n"); 573} 574 575void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val) 576{ 577 cpu_abort(env, "banked r13 write\n"); 578} 579 580uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode) 581{ 582 cpu_abort(env, "banked r13 read\n"); 583 return 0; 584} 585 586#else 587 588extern int semihosting_enabled; 589 590/* Map CPU modes onto saved register banks. */ 591static inline int bank_number (int mode) 592{ 593 switch (mode) { 594 case ARM_CPU_MODE_USR: 595 case ARM_CPU_MODE_SYS: 596 return 0; 597 case ARM_CPU_MODE_SVC: 598 return 1; 599 case ARM_CPU_MODE_ABT: 600 return 2; 601 case ARM_CPU_MODE_UND: 602 return 3; 603 case ARM_CPU_MODE_IRQ: 604 return 4; 605 case ARM_CPU_MODE_FIQ: 606 return 5; 607 } 608 cpu_abort(cpu_single_env, "Bad mode %x\n", mode); 609 return -1; 610} 611 612void switch_mode(CPUState *env, int mode) 613{ 614 int old_mode; 615 int i; 616 617 old_mode = env->uncached_cpsr & CPSR_M; 618 if (mode == old_mode) 619 return; 620 621 if (old_mode == ARM_CPU_MODE_FIQ) { 622 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 623 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 624 } else if (mode == ARM_CPU_MODE_FIQ) { 625 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 626 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 627 } 628 629 i = bank_number(old_mode); 630 env->banked_r13[i] = env->regs[13]; 631 env->banked_r14[i] = env->regs[14]; 632 env->banked_spsr[i] = env->spsr; 633 634 i = bank_number(mode); 635 env->regs[13] = env->banked_r13[i]; 636 env->regs[14] = env->banked_r14[i]; 637 env->spsr = env->banked_spsr[i]; 638} 639 640static void v7m_push(CPUARMState *env, uint32_t val) 641{ 642 env->regs[13] -= 4; 643 stl_phys(env->regs[13], val); 644} 645 646static uint32_t v7m_pop(CPUARMState *env) 647{ 648 uint32_t val; 649 val = ldl_phys(env->regs[13]); 650 env->regs[13] += 4; 651 return val; 652} 653 654/* Switch to V7M main or process stack pointer. */ 655static void switch_v7m_sp(CPUARMState *env, int process) 656{ 657 uint32_t tmp; 658 if (env->v7m.current_sp != process) { 659 tmp = env->v7m.other_sp; 660 env->v7m.other_sp = env->regs[13]; 661 env->regs[13] = tmp; 662 env->v7m.current_sp = process; 663 } 664} 665 666static void do_v7m_exception_exit(CPUARMState *env) 667{ 668 uint32_t type; 669 uint32_t xpsr; 670 671 type = env->regs[15]; 672 if (env->v7m.exception != 0) 673 armv7m_nvic_complete_irq(env->v7m.nvic, env->v7m.exception); 674 675 /* Switch to the target stack. */ 676 switch_v7m_sp(env, (type & 4) != 0); 677 /* Pop registers. */ 678 env->regs[0] = v7m_pop(env); 679 env->regs[1] = v7m_pop(env); 680 env->regs[2] = v7m_pop(env); 681 env->regs[3] = v7m_pop(env); 682 env->regs[12] = v7m_pop(env); 683 env->regs[14] = v7m_pop(env); 684 env->regs[15] = v7m_pop(env); 685 xpsr = v7m_pop(env); 686 xpsr_write(env, xpsr, 0xfffffdff); 687 /* Undo stack alignment. */ 688 if (xpsr & 0x200) 689 env->regs[13] |= 4; 690 /* ??? The exception return type specifies Thread/Handler mode. However 691 this is also implied by the xPSR value. Not sure what to do 692 if there is a mismatch. */ 693 /* ??? Likewise for mismatches between the CONTROL register and the stack 694 pointer. */ 695} 696 697static void do_interrupt_v7m(CPUARMState *env) 698{ 699 uint32_t xpsr = xpsr_read(env); 700 uint32_t lr; 701 uint32_t addr; 702 703 lr = 0xfffffff1; 704 if (env->v7m.current_sp) 705 lr |= 4; 706 if (env->v7m.exception == 0) 707 lr |= 8; 708 709 /* For exceptions we just mark as pending on the NVIC, and let that 710 handle it. */ 711 /* TODO: Need to escalate if the current priority is higher than the 712 one we're raising. */ 713 switch (env->exception_index) { 714 case EXCP_UDEF: 715 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_USAGE); 716 return; 717 case EXCP_SWI: 718 env->regs[15] += 2; 719 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_SVC); 720 return; 721 case EXCP_PREFETCH_ABORT: 722 case EXCP_DATA_ABORT: 723 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_MEM); 724 return; 725 case EXCP_BKPT: 726 if (semihosting_enabled) { 727 int nr; 728 nr = lduw_code(env->regs[15]) & 0xff; 729 if (nr == 0xab) { 730 env->regs[15] += 2; 731 env->regs[0] = do_arm_semihosting(env); 732 return; 733 } 734 } 735 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_DEBUG); 736 return; 737 case EXCP_IRQ: 738 env->v7m.exception = armv7m_nvic_acknowledge_irq(env->v7m.nvic); 739 break; 740 case EXCP_EXCEPTION_EXIT: 741 do_v7m_exception_exit(env); 742 return; 743 default: 744 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); 745 return; /* Never happens. Keep compiler happy. */ 746 } 747 748 /* Align stack pointer. */ 749 /* ??? Should only do this if Configuration Control Register 750 STACKALIGN bit is set. */ 751 if (env->regs[13] & 4) { 752 env->regs[13] -= 4; 753 xpsr |= 0x200; 754 } 755 /* Switch to the handler mode. */ 756 v7m_push(env, xpsr); 757 v7m_push(env, env->regs[15]); 758 v7m_push(env, env->regs[14]); 759 v7m_push(env, env->regs[12]); 760 v7m_push(env, env->regs[3]); 761 v7m_push(env, env->regs[2]); 762 v7m_push(env, env->regs[1]); 763 v7m_push(env, env->regs[0]); 764 switch_v7m_sp(env, 0); 765 env->uncached_cpsr &= ~CPSR_IT; 766 env->regs[14] = lr; 767 addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4); 768 env->regs[15] = addr & 0xfffffffe; 769 env->thumb = addr & 1; 770} 771 772/* Handle a CPU exception. */ 773void do_interrupt(CPUARMState *env) 774{ 775 uint32_t addr; 776 uint32_t mask; 777 int new_mode; 778 uint32_t offset; 779 780#ifdef CONFIG_TRACE 781 if (tracing) { 782 trace_exception(env->regs[15]); 783 } 784#endif 785 786 if (IS_M(env)) { 787 do_interrupt_v7m(env); 788 return; 789 } 790 /* TODO: Vectored interrupt controller. */ 791 switch (env->exception_index) { 792 case EXCP_UDEF: 793 new_mode = ARM_CPU_MODE_UND; 794 addr = 0x04; 795 mask = CPSR_I; 796 if (env->thumb) 797 offset = 2; 798 else 799 offset = 4; 800 break; 801 case EXCP_SWI: 802 if (semihosting_enabled) { 803 /* Check for semihosting interrupt. */ 804 if (env->thumb) { 805 mask = lduw_code(env->regs[15] - 2) & 0xff; 806 } else { 807 mask = ldl_code(env->regs[15] - 4) & 0xffffff; 808 } 809 /* Only intercept calls from privileged modes, to provide some 810 semblance of security. */ 811 if (((mask == 0x123456 && !env->thumb) 812 || (mask == 0xab && env->thumb)) 813 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { 814 env->regs[0] = do_arm_semihosting(env); 815 return; 816 } 817 } 818 new_mode = ARM_CPU_MODE_SVC; 819 addr = 0x08; 820 mask = CPSR_I; 821 /* The PC already points to the next instruction. */ 822 offset = 0; 823 break; 824 case EXCP_BKPT: 825 /* See if this is a semihosting syscall. */ 826 if (env->thumb && semihosting_enabled) { 827 mask = lduw_code(env->regs[15]) & 0xff; 828 if (mask == 0xab 829 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { 830 env->regs[15] += 2; 831 env->regs[0] = do_arm_semihosting(env); 832 return; 833 } 834 } 835 /* Fall through to prefetch abort. */ 836 case EXCP_PREFETCH_ABORT: 837 new_mode = ARM_CPU_MODE_ABT; 838 addr = 0x0c; 839 mask = CPSR_A | CPSR_I; 840 offset = 4; 841 break; 842 case EXCP_DATA_ABORT: 843 new_mode = ARM_CPU_MODE_ABT; 844 addr = 0x10; 845 mask = CPSR_A | CPSR_I; 846 offset = 8; 847 break; 848 case EXCP_IRQ: 849 new_mode = ARM_CPU_MODE_IRQ; 850 addr = 0x18; 851 /* Disable IRQ and imprecise data aborts. */ 852 mask = CPSR_A | CPSR_I; 853 offset = 4; 854 break; 855 case EXCP_FIQ: 856 new_mode = ARM_CPU_MODE_FIQ; 857 addr = 0x1c; 858 /* Disable FIQ, IRQ and imprecise data aborts. */ 859 mask = CPSR_A | CPSR_I | CPSR_F; 860 offset = 4; 861 break; 862 default: 863 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index); 864 return; /* Never happens. Keep compiler happy. */ 865 } 866 /* High vectors. */ 867 if (env->cp15.c1_sys & (1 << 13)) { 868 addr += 0xffff0000; 869 } 870 switch_mode (env, new_mode); 871 env->spsr = cpsr_read(env); 872 /* Clear IT bits. */ 873 env->condexec_bits = 0; 874 /* Switch to the new mode, and switch to Arm mode. */ 875 /* ??? Thumb interrupt handlers not implemented. */ 876 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 877 env->uncached_cpsr |= mask; 878 env->thumb = 0; 879 env->regs[14] = env->regs[15] + offset; 880 env->regs[15] = addr; 881 env->interrupt_request |= CPU_INTERRUPT_EXITTB; 882} 883 884/* Check section/page access permissions. 885 Returns the page protection flags, or zero if the access is not 886 permitted. */ 887static inline int check_ap(CPUState *env, int ap, int domain, int access_type, 888 int is_user) 889{ 890 int prot_ro; 891 892 if (domain == 3) 893 return PAGE_READ | PAGE_WRITE; 894 895 if (access_type == 1) 896 prot_ro = 0; 897 else 898 prot_ro = PAGE_READ; 899 900 switch (ap) { 901 case 0: 902 if (access_type == 1) 903 return 0; 904 switch ((env->cp15.c1_sys >> 8) & 3) { 905 case 1: 906 return is_user ? 0 : PAGE_READ; 907 case 2: 908 return PAGE_READ; 909 default: 910 return 0; 911 } 912 case 1: 913 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 914 case 2: 915 if (is_user) 916 return prot_ro; 917 else 918 return PAGE_READ | PAGE_WRITE; 919 case 3: 920 return PAGE_READ | PAGE_WRITE; 921 case 4: /* Reserved. */ 922 return 0; 923 case 5: 924 return is_user ? 0 : prot_ro; 925 case 6: 926 return prot_ro; 927 case 7: 928 if (!arm_feature (env, ARM_FEATURE_V7)) 929 return 0; 930 return prot_ro; 931 default: 932 abort(); 933 } 934} 935 936static uint32_t get_level1_table_address(CPUState *env, uint32_t address) 937{ 938 uint32_t table; 939 940 if (address & env->cp15.c2_mask) 941 table = env->cp15.c2_base1 & 0xffffc000; 942 else 943 table = env->cp15.c2_base0 & env->cp15.c2_base_mask; 944 945 table |= (address >> 18) & 0x3ffc; 946 return table; 947} 948 949static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type, 950 int is_user, uint32_t *phys_ptr, int *prot) 951{ 952 int code; 953 uint32_t table; 954 uint32_t desc; 955 int type; 956 int ap; 957 int domain; 958 uint32_t phys_addr; 959 960 /* Pagetable walk. */ 961 /* Lookup l1 descriptor. */ 962 table = get_level1_table_address(env, address); 963 desc = ldl_phys(table); 964 type = (desc & 3); 965 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3; 966 if (type == 0) { 967 /* Section translation fault. */ 968 code = 5; 969 goto do_fault; 970 } 971 if (domain == 0 || domain == 2) { 972 if (type == 2) 973 code = 9; /* Section domain fault. */ 974 else 975 code = 11; /* Page domain fault. */ 976 goto do_fault; 977 } 978 if (type == 2) { 979 /* 1Mb section. */ 980 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 981 ap = (desc >> 10) & 3; 982 code = 13; 983 } else { 984 /* Lookup l2 entry. */ 985 if (type == 1) { 986 /* Coarse pagetable. */ 987 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 988 } else { 989 /* Fine pagetable. */ 990 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 991 } 992 desc = ldl_phys(table); 993 switch (desc & 3) { 994 case 0: /* Page translation fault. */ 995 code = 7; 996 goto do_fault; 997 case 1: /* 64k page. */ 998 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 999 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 1000 break; 1001 case 2: /* 4k page. */ 1002 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1003 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 1004 break; 1005 case 3: /* 1k page. */ 1006 if (type == 1) { 1007 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 1008 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1009 } else { 1010 /* Page translation fault. */ 1011 code = 7; 1012 goto do_fault; 1013 } 1014 } else { 1015 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 1016 } 1017 ap = (desc >> 4) & 3; 1018 break; 1019 default: 1020 /* Never happens, but compiler isn't smart enough to tell. */ 1021 abort(); 1022 } 1023 code = 15; 1024 } 1025 *prot = check_ap(env, ap, domain, access_type, is_user); 1026 if (!*prot) { 1027 /* Access permission fault. */ 1028 goto do_fault; 1029 } 1030 *phys_ptr = phys_addr; 1031 return 0; 1032do_fault: 1033 return code | (domain << 4); 1034} 1035 1036static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type, 1037 int is_user, uint32_t *phys_ptr, int *prot) 1038{ 1039 int code; 1040 uint32_t table; 1041 uint32_t desc; 1042 uint32_t xn; 1043 int type; 1044 int ap; 1045 int domain; 1046 uint32_t phys_addr; 1047 1048 /* Pagetable walk. */ 1049 /* Lookup l1 descriptor. */ 1050 table = get_level1_table_address(env, address); 1051 desc = ldl_phys(table); 1052 type = (desc & 3); 1053 if (type == 0) { 1054 /* Section translation fault. */ 1055 code = 5; 1056 domain = 0; 1057 goto do_fault; 1058 } else if (type == 2 && (desc & (1 << 18))) { 1059 /* Supersection. */ 1060 domain = 0; 1061 } else { 1062 /* Section or page. */ 1063 domain = (desc >> 4) & 0x1e; 1064 } 1065 domain = (env->cp15.c3 >> domain) & 3; 1066 if (domain == 0 || domain == 2) { 1067 if (type == 2) 1068 code = 9; /* Section domain fault. */ 1069 else 1070 code = 11; /* Page domain fault. */ 1071 goto do_fault; 1072 } 1073 if (type == 2) { 1074 if (desc & (1 << 18)) { 1075 /* Supersection. */ 1076 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 1077 } else { 1078 /* Section. */ 1079 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 1080 } 1081 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 1082 xn = desc & (1 << 4); 1083 code = 13; 1084 } else { 1085 /* Lookup l2 entry. */ 1086 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 1087 desc = ldl_phys(table); 1088 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 1089 switch (desc & 3) { 1090 case 0: /* Page translation fault. */ 1091 code = 7; 1092 goto do_fault; 1093 case 1: /* 64k page. */ 1094 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 1095 xn = desc & (1 << 15); 1096 break; 1097 case 2: case 3: /* 4k page. */ 1098 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1099 xn = desc & 1; 1100 break; 1101 default: 1102 /* Never happens, but compiler isn't smart enough to tell. */ 1103 abort(); 1104 } 1105 code = 15; 1106 } 1107 if (xn && access_type == 2) 1108 goto do_fault; 1109 1110 /* The simplified model uses AP[0] as an access control bit. */ 1111 if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) { 1112 /* Access flag fault. */ 1113 code = (code == 15) ? 6 : 3; 1114 goto do_fault; 1115 } 1116 *prot = check_ap(env, ap, domain, access_type, is_user); 1117 if (!*prot) { 1118 /* Access permission fault. */ 1119 goto do_fault; 1120 } 1121 *phys_ptr = phys_addr; 1122 return 0; 1123do_fault: 1124 return code | (domain << 4); 1125} 1126 1127static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type, 1128 int is_user, uint32_t *phys_ptr, int *prot) 1129{ 1130 int n; 1131 uint32_t mask; 1132 uint32_t base; 1133 1134 *phys_ptr = address; 1135 for (n = 7; n >= 0; n--) { 1136 base = env->cp15.c6_region[n]; 1137 if ((base & 1) == 0) 1138 continue; 1139 mask = 1 << ((base >> 1) & 0x1f); 1140 /* Keep this shift separate from the above to avoid an 1141 (undefined) << 32. */ 1142 mask = (mask << 1) - 1; 1143 if (((base ^ address) & ~mask) == 0) 1144 break; 1145 } 1146 if (n < 0) 1147 return 2; 1148 1149 if (access_type == 2) { 1150 mask = env->cp15.c5_insn; 1151 } else { 1152 mask = env->cp15.c5_data; 1153 } 1154 mask = (mask >> (n * 4)) & 0xf; 1155 switch (mask) { 1156 case 0: 1157 return 1; 1158 case 1: 1159 if (is_user) 1160 return 1; 1161 *prot = PAGE_READ | PAGE_WRITE; 1162 break; 1163 case 2: 1164 *prot = PAGE_READ; 1165 if (!is_user) 1166 *prot |= PAGE_WRITE; 1167 break; 1168 case 3: 1169 *prot = PAGE_READ | PAGE_WRITE; 1170 break; 1171 case 5: 1172 if (is_user) 1173 return 1; 1174 *prot = PAGE_READ; 1175 break; 1176 case 6: 1177 *prot = PAGE_READ; 1178 break; 1179 default: 1180 /* Bad permission. */ 1181 return 1; 1182 } 1183 return 0; 1184} 1185 1186static inline int get_phys_addr(CPUState *env, uint32_t address, 1187 int access_type, int is_user, 1188 uint32_t *phys_ptr, int *prot) 1189{ 1190 /* Fast Context Switch Extension. */ 1191 if (address < 0x02000000) 1192 address += env->cp15.c13_fcse; 1193 1194 if ((env->cp15.c1_sys & 1) == 0) { 1195 /* MMU/MPU disabled. */ 1196 *phys_ptr = address; 1197 *prot = PAGE_READ | PAGE_WRITE; 1198 return 0; 1199 } else if (arm_feature(env, ARM_FEATURE_MPU)) { 1200 return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr, 1201 prot); 1202 } else if (env->cp15.c1_sys & (1 << 23)) { 1203 return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr, 1204 prot); 1205 } else { 1206 return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr, 1207 prot); 1208 } 1209} 1210 1211int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, 1212 int access_type, int mmu_idx, int is_softmmu) 1213{ 1214 uint32_t phys_addr = 0; 1215 int prot; 1216 int ret, is_user; 1217 1218 is_user = mmu_idx == MMU_USER_IDX; 1219 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot); 1220 if (ret == 0) { 1221 /* Map a single [sub]page. */ 1222 phys_addr &= ~(uint32_t)0x3ff; 1223 address &= ~(uint32_t)0x3ff; 1224 return tlb_set_page (env, address, phys_addr, prot, mmu_idx, 1225 is_softmmu); 1226 } 1227 1228 if (access_type == 2) { 1229 env->cp15.c5_insn = ret; 1230 env->cp15.c6_insn = address; 1231 env->exception_index = EXCP_PREFETCH_ABORT; 1232 } else { 1233 env->cp15.c5_data = ret; 1234 if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) 1235 env->cp15.c5_data |= (1 << 11); 1236 env->cp15.c6_data = address; 1237 env->exception_index = EXCP_DATA_ABORT; 1238 } 1239 return 1; 1240} 1241 1242target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) 1243{ 1244 uint32_t phys_addr = 0; 1245 int prot; 1246 int ret; 1247 1248 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot); 1249 1250 if (ret != 0) 1251 return -1; 1252 1253 return phys_addr; 1254} 1255 1256/* Not really implemented. Need to figure out a sane way of doing this. 1257 Maybe add generic watchpoint support and use that. */ 1258 1259void HELPER(mark_exclusive)(CPUState *env, uint32_t addr) 1260{ 1261 env->mmon_addr = addr; 1262} 1263 1264uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr) 1265{ 1266 return (env->mmon_addr != addr); 1267} 1268 1269void HELPER(clrex)(CPUState *env) 1270{ 1271 env->mmon_addr = -1; 1272} 1273 1274void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val) 1275{ 1276 int cp_num = (insn >> 8) & 0xf; 1277 int cp_info = (insn >> 5) & 7; 1278 int src = (insn >> 16) & 0xf; 1279 int operand = insn & 0xf; 1280 1281 if (env->cp[cp_num].cp_write) 1282 env->cp[cp_num].cp_write(env->cp[cp_num].opaque, 1283 cp_info, src, operand, val); 1284} 1285 1286uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn) 1287{ 1288 int cp_num = (insn >> 8) & 0xf; 1289 int cp_info = (insn >> 5) & 7; 1290 int dest = (insn >> 16) & 0xf; 1291 int operand = insn & 0xf; 1292 1293 if (env->cp[cp_num].cp_read) 1294 return env->cp[cp_num].cp_read(env->cp[cp_num].opaque, 1295 cp_info, dest, operand); 1296 return 0; 1297} 1298 1299/* Return basic MPU access permission bits. */ 1300static uint32_t simple_mpu_ap_bits(uint32_t val) 1301{ 1302 uint32_t ret; 1303 uint32_t mask; 1304 int i; 1305 ret = 0; 1306 mask = 3; 1307 for (i = 0; i < 16; i += 2) { 1308 ret |= (val >> i) & mask; 1309 mask <<= 2; 1310 } 1311 return ret; 1312} 1313 1314/* Pad basic MPU access permission bits to extended format. */ 1315static uint32_t extended_mpu_ap_bits(uint32_t val) 1316{ 1317 uint32_t ret; 1318 uint32_t mask; 1319 int i; 1320 ret = 0; 1321 mask = 3; 1322 for (i = 0; i < 16; i += 2) { 1323 ret |= (val & mask) << i; 1324 mask <<= 2; 1325 } 1326 return ret; 1327} 1328 1329void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val) 1330{ 1331 int op1; 1332 int op2; 1333 int crm; 1334 1335 op1 = (insn >> 21) & 7; 1336 op2 = (insn >> 5) & 7; 1337 crm = insn & 0xf; 1338 switch ((insn >> 16) & 0xf) { 1339 case 0: 1340 /* ID codes. */ 1341 if (arm_feature(env, ARM_FEATURE_XSCALE)) 1342 break; 1343 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1344 break; 1345 if (arm_feature(env, ARM_FEATURE_V7) 1346 && op1 == 2 && crm == 0 && op2 == 0) { 1347 env->cp15.c0_cssel = val & 0xf; 1348 break; 1349 } 1350 goto bad_reg; 1351 case 1: /* System configuration. */ 1352 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1353 op2 = 0; 1354 switch (op2) { 1355 case 0: 1356 if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0) 1357 env->cp15.c1_sys = val; 1358 /* ??? Lots of these bits are not implemented. */ 1359 /* This may enable/disable the MMU, so do a TLB flush. */ 1360 tlb_flush(env, 1); 1361 break; 1362 case 1: /* Auxiliary cotrol register. */ 1363 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 1364 env->cp15.c1_xscaleauxcr = val; 1365 break; 1366 } 1367 /* Not implemented. */ 1368 break; 1369 case 2: 1370 if (arm_feature(env, ARM_FEATURE_XSCALE)) 1371 goto bad_reg; 1372 if (env->cp15.c1_coproc != val) { 1373 env->cp15.c1_coproc = val; 1374 /* ??? Is this safe when called from within a TB? */ 1375 tb_flush(env); 1376 } 1377 break; 1378 default: 1379 goto bad_reg; 1380 } 1381 break; 1382 case 2: /* MMU Page table control / MPU cache control. */ 1383 if (arm_feature(env, ARM_FEATURE_MPU)) { 1384 switch (op2) { 1385 case 0: 1386 env->cp15.c2_data = val; 1387 break; 1388 case 1: 1389 env->cp15.c2_insn = val; 1390 break; 1391 default: 1392 goto bad_reg; 1393 } 1394 } else { 1395 switch (op2) { 1396 case 0: 1397 env->cp15.c2_base0 = val; 1398 break; 1399 case 1: 1400 env->cp15.c2_base1 = val; 1401 break; 1402 case 2: 1403 val &= 7; 1404 env->cp15.c2_control = val; 1405 env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val); 1406 env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val); 1407 break; 1408 default: 1409 goto bad_reg; 1410 } 1411 } 1412 break; 1413 case 3: /* MMU Domain access control / MPU write buffer control. */ 1414 env->cp15.c3 = val; 1415 tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */ 1416 break; 1417 case 4: /* Reserved. */ 1418 goto bad_reg; 1419 case 5: /* MMU Fault status / MPU access permission. */ 1420 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1421 op2 = 0; 1422 switch (op2) { 1423 case 0: 1424 if (arm_feature(env, ARM_FEATURE_MPU)) 1425 val = extended_mpu_ap_bits(val); 1426 env->cp15.c5_data = val; 1427 break; 1428 case 1: 1429 if (arm_feature(env, ARM_FEATURE_MPU)) 1430 val = extended_mpu_ap_bits(val); 1431 env->cp15.c5_insn = val; 1432 break; 1433 case 2: 1434 if (!arm_feature(env, ARM_FEATURE_MPU)) 1435 goto bad_reg; 1436 env->cp15.c5_data = val; 1437 break; 1438 case 3: 1439 if (!arm_feature(env, ARM_FEATURE_MPU)) 1440 goto bad_reg; 1441 env->cp15.c5_insn = val; 1442 break; 1443 default: 1444 goto bad_reg; 1445 } 1446 break; 1447 case 6: /* MMU Fault address / MPU base/size. */ 1448 if (arm_feature(env, ARM_FEATURE_MPU)) { 1449 if (crm >= 8) 1450 goto bad_reg; 1451 env->cp15.c6_region[crm] = val; 1452 } else { 1453 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1454 op2 = 0; 1455 switch (op2) { 1456 case 0: 1457 env->cp15.c6_data = val; 1458 break; 1459 case 1: /* ??? This is WFAR on armv6 */ 1460 case 2: 1461 env->cp15.c6_insn = val; 1462 break; 1463 default: 1464 goto bad_reg; 1465 } 1466 } 1467 break; 1468 case 7: /* Cache control. */ 1469 env->cp15.c15_i_max = 0x000; 1470 env->cp15.c15_i_min = 0xff0; 1471 /* No cache, so nothing to do. */ 1472 /* ??? MPCore has VA to PA translation functions. */ 1473 break; 1474 case 8: /* MMU TLB control. */ 1475 switch (op2) { 1476 case 0: /* Invalidate all. */ 1477 tlb_flush(env, 0); 1478 break; 1479 case 1: /* Invalidate single TLB entry. */ 1480#if 0 1481 /* ??? This is wrong for large pages and sections. */ 1482 /* As an ugly hack to make linux work we always flush a 4K 1483 pages. */ 1484 val &= 0xfffff000; 1485 tlb_flush_page(env, val); 1486 tlb_flush_page(env, val + 0x400); 1487 tlb_flush_page(env, val + 0x800); 1488 tlb_flush_page(env, val + 0xc00); 1489#else 1490 tlb_flush(env, 1); 1491#endif 1492 break; 1493 case 2: /* Invalidate on ASID. */ 1494 tlb_flush(env, val == 0); 1495 break; 1496 case 3: /* Invalidate single entry on MVA. */ 1497 /* ??? This is like case 1, but ignores ASID. */ 1498 tlb_flush(env, 1); 1499 break; 1500 default: 1501 goto bad_reg; 1502 } 1503 break; 1504 case 9: 1505 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1506 break; 1507 switch (crm) { 1508 case 0: /* Cache lockdown. */ 1509 switch (op1) { 1510 case 0: /* L1 cache. */ 1511 switch (op2) { 1512 case 0: 1513 env->cp15.c9_data = val; 1514 break; 1515 case 1: 1516 env->cp15.c9_insn = val; 1517 break; 1518 default: 1519 goto bad_reg; 1520 } 1521 break; 1522 case 1: /* L2 cache. */ 1523 /* Ignore writes to L2 lockdown/auxiliary registers. */ 1524 break; 1525 default: 1526 goto bad_reg; 1527 } 1528 break; 1529 case 1: /* TCM memory region registers. */ 1530 /* Not implemented. */ 1531 goto bad_reg; 1532 default: 1533 goto bad_reg; 1534 } 1535 break; 1536 case 10: /* MMU TLB lockdown. */ 1537 /* ??? TLB lockdown not implemented. */ 1538 break; 1539 case 12: /* Reserved. */ 1540 goto bad_reg; 1541 case 13: /* Process ID. */ 1542 switch (op2) { 1543 case 0: 1544 /* Unlike real hardware the qemu TLB uses virtual addresses, 1545 not modified virtual addresses, so this causes a TLB flush. 1546 */ 1547 if (env->cp15.c13_fcse != val) 1548 tlb_flush(env, 1); 1549 env->cp15.c13_fcse = val; 1550 break; 1551 case 1: 1552 /* This changes the ASID, so do a TLB flush. */ 1553 if (env->cp15.c13_context != val 1554 && !arm_feature(env, ARM_FEATURE_MPU)) 1555 tlb_flush(env, 0); 1556 env->cp15.c13_context = val; 1557 break; 1558 case 2: 1559 env->cp15.c13_tls1 = val; 1560 break; 1561 case 3: 1562 env->cp15.c13_tls2 = val; 1563 break; 1564 case 4: 1565 env->cp15.c13_tls3 = val; 1566 break; 1567 default: 1568 goto bad_reg; 1569 } 1570 break; 1571 case 14: /* Reserved. */ 1572 goto bad_reg; 1573 case 15: /* Implementation specific. */ 1574 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 1575 if (op2 == 0 && crm == 1) { 1576 if (env->cp15.c15_cpar != (val & 0x3fff)) { 1577 /* Changes cp0 to cp13 behavior, so needs a TB flush. */ 1578 tb_flush(env); 1579 env->cp15.c15_cpar = val & 0x3fff; 1580 } 1581 break; 1582 } 1583 goto bad_reg; 1584 } 1585 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 1586 switch (crm) { 1587 case 0: 1588 break; 1589 case 1: /* Set TI925T configuration. */ 1590 env->cp15.c15_ticonfig = val & 0xe7; 1591 env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */ 1592 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 1593 break; 1594 case 2: /* Set I_max. */ 1595 env->cp15.c15_i_max = val; 1596 break; 1597 case 3: /* Set I_min. */ 1598 env->cp15.c15_i_min = val; 1599 break; 1600 case 4: /* Set thread-ID. */ 1601 env->cp15.c15_threadid = val & 0xffff; 1602 break; 1603 case 8: /* Wait-for-interrupt (deprecated). */ 1604 cpu_interrupt(env, CPU_INTERRUPT_HALT); 1605 break; 1606 default: 1607 goto bad_reg; 1608 } 1609 } 1610 break; 1611 } 1612 return; 1613bad_reg: 1614 /* ??? For debugging only. Should raise illegal instruction exception. */ 1615 cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n", 1616 (insn >> 16) & 0xf, crm, op1, op2); 1617} 1618 1619uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn) 1620{ 1621 int op1; 1622 int op2; 1623 int crm; 1624 1625 op1 = (insn >> 21) & 7; 1626 op2 = (insn >> 5) & 7; 1627 crm = insn & 0xf; 1628 switch ((insn >> 16) & 0xf) { 1629 case 0: /* ID codes. */ 1630 switch (op1) { 1631 case 0: 1632 switch (crm) { 1633 case 0: 1634 switch (op2) { 1635 case 0: /* Device ID. */ 1636 return env->cp15.c0_cpuid; 1637 case 1: /* Cache Type. */ 1638 return env->cp15.c0_cachetype; 1639 case 2: /* TCM status. */ 1640 return 0; 1641 case 3: /* TLB type register. */ 1642 return 0; /* No lockable TLB entries. */ 1643 case 5: /* CPU ID */ 1644 return env->cpu_index; 1645 default: 1646 goto bad_reg; 1647 } 1648 case 1: 1649 if (!arm_feature(env, ARM_FEATURE_V6)) 1650 goto bad_reg; 1651 return env->cp15.c0_c1[op2]; 1652 case 2: 1653 if (!arm_feature(env, ARM_FEATURE_V6)) 1654 goto bad_reg; 1655 return env->cp15.c0_c2[op2]; 1656 case 3: case 4: case 5: case 6: case 7: 1657 return 0; 1658 default: 1659 goto bad_reg; 1660 } 1661 case 1: 1662 /* These registers aren't documented on arm11 cores. However 1663 Linux looks at them anyway. */ 1664 if (!arm_feature(env, ARM_FEATURE_V6)) 1665 goto bad_reg; 1666 if (crm != 0) 1667 goto bad_reg; 1668 if (!arm_feature(env, ARM_FEATURE_V7)) 1669 return 0; 1670 1671 switch (op2) { 1672 case 0: 1673 return env->cp15.c0_ccsid[env->cp15.c0_cssel]; 1674 case 1: 1675 return env->cp15.c0_clid; 1676 case 7: 1677 return 0; 1678 } 1679 goto bad_reg; 1680 case 2: 1681 if (op2 != 0 || crm != 0) 1682 goto bad_reg; 1683 return env->cp15.c0_cssel; 1684 default: 1685 goto bad_reg; 1686 } 1687 case 1: /* System configuration. */ 1688 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1689 op2 = 0; 1690 switch (op2) { 1691 case 0: /* Control register. */ 1692 return env->cp15.c1_sys; 1693 case 1: /* Auxiliary control register. */ 1694 if (arm_feature(env, ARM_FEATURE_XSCALE)) 1695 return env->cp15.c1_xscaleauxcr; 1696 if (!arm_feature(env, ARM_FEATURE_AUXCR)) 1697 goto bad_reg; 1698 switch (ARM_CPUID(env)) { 1699 case ARM_CPUID_ARM1026: 1700 return 1; 1701 case ARM_CPUID_ARM1136: 1702 case ARM_CPUID_ARM1136_R2: 1703 return 7; 1704 case ARM_CPUID_ARM11MPCORE: 1705 return 1; 1706 case ARM_CPUID_CORTEXA8: 1707 return 2; 1708 default: 1709 goto bad_reg; 1710 } 1711 case 2: /* Coprocessor access register. */ 1712 if (arm_feature(env, ARM_FEATURE_XSCALE)) 1713 goto bad_reg; 1714 return env->cp15.c1_coproc; 1715 default: 1716 goto bad_reg; 1717 } 1718 case 2: /* MMU Page table control / MPU cache control. */ 1719 if (arm_feature(env, ARM_FEATURE_MPU)) { 1720 switch (op2) { 1721 case 0: 1722 return env->cp15.c2_data; 1723 break; 1724 case 1: 1725 return env->cp15.c2_insn; 1726 break; 1727 default: 1728 goto bad_reg; 1729 } 1730 } else { 1731 switch (op2) { 1732 case 0: 1733 return env->cp15.c2_base0; 1734 case 1: 1735 return env->cp15.c2_base1; 1736 case 2: 1737 return env->cp15.c2_control; 1738 default: 1739 goto bad_reg; 1740 } 1741 } 1742 case 3: /* MMU Domain access control / MPU write buffer control. */ 1743 return env->cp15.c3; 1744 case 4: /* Reserved. */ 1745 goto bad_reg; 1746 case 5: /* MMU Fault status / MPU access permission. */ 1747 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1748 op2 = 0; 1749 switch (op2) { 1750 case 0: 1751 if (arm_feature(env, ARM_FEATURE_MPU)) 1752 return simple_mpu_ap_bits(env->cp15.c5_data); 1753 return env->cp15.c5_data; 1754 case 1: 1755 if (arm_feature(env, ARM_FEATURE_MPU)) 1756 return simple_mpu_ap_bits(env->cp15.c5_data); 1757 return env->cp15.c5_insn; 1758 case 2: 1759 if (!arm_feature(env, ARM_FEATURE_MPU)) 1760 goto bad_reg; 1761 return env->cp15.c5_data; 1762 case 3: 1763 if (!arm_feature(env, ARM_FEATURE_MPU)) 1764 goto bad_reg; 1765 return env->cp15.c5_insn; 1766 default: 1767 goto bad_reg; 1768 } 1769 case 6: /* MMU Fault address. */ 1770 if (arm_feature(env, ARM_FEATURE_MPU)) { 1771 if (crm >= 8) 1772 goto bad_reg; 1773 return env->cp15.c6_region[crm]; 1774 } else { 1775 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1776 op2 = 0; 1777 switch (op2) { 1778 case 0: 1779 return env->cp15.c6_data; 1780 case 1: 1781 if (arm_feature(env, ARM_FEATURE_V6)) { 1782 /* Watchpoint Fault Adrress. */ 1783 return 0; /* Not implemented. */ 1784 } else { 1785 /* Instruction Fault Adrress. */ 1786 /* Arm9 doesn't have an IFAR, but implementing it anyway 1787 shouldn't do any harm. */ 1788 return env->cp15.c6_insn; 1789 } 1790 case 2: 1791 if (arm_feature(env, ARM_FEATURE_V6)) { 1792 /* Instruction Fault Adrress. */ 1793 return env->cp15.c6_insn; 1794 } else { 1795 goto bad_reg; 1796 } 1797 default: 1798 goto bad_reg; 1799 } 1800 } 1801 case 7: /* Cache control. */ 1802 /* FIXME: Should only clear Z flag if destination is r15. */ 1803 env->ZF = 0; 1804 return 0; 1805 case 8: /* MMU TLB control. */ 1806 goto bad_reg; 1807 case 9: /* Cache lockdown. */ 1808 switch (op1) { 1809 case 0: /* L1 cache. */ 1810 if (arm_feature(env, ARM_FEATURE_OMAPCP)) 1811 return 0; 1812 switch (op2) { 1813 case 0: 1814 return env->cp15.c9_data; 1815 case 1: 1816 return env->cp15.c9_insn; 1817 default: 1818 goto bad_reg; 1819 } 1820 case 1: /* L2 cache */ 1821 if (crm != 0) 1822 goto bad_reg; 1823 /* L2 Lockdown and Auxiliary control. */ 1824 return 0; 1825 default: 1826 goto bad_reg; 1827 } 1828 case 10: /* MMU TLB lockdown. */ 1829 /* ??? TLB lockdown not implemented. */ 1830 return 0; 1831 case 11: /* TCM DMA control. */ 1832 case 12: /* Reserved. */ 1833 goto bad_reg; 1834 case 13: /* Process ID. */ 1835 switch (op2) { 1836 case 0: 1837 return env->cp15.c13_fcse; 1838 case 1: 1839 return env->cp15.c13_context; 1840 case 2: 1841 return env->cp15.c13_tls1; 1842 case 3: 1843 return env->cp15.c13_tls2; 1844 case 4: 1845 return env->cp15.c13_tls3; 1846 default: 1847 goto bad_reg; 1848 } 1849 case 14: /* Reserved. */ 1850 goto bad_reg; 1851 case 15: /* Implementation specific. */ 1852 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 1853 if (op2 == 0 && crm == 1) 1854 return env->cp15.c15_cpar; 1855 1856 goto bad_reg; 1857 } 1858 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 1859 switch (crm) { 1860 case 0: 1861 return 0; 1862 case 1: /* Read TI925T configuration. */ 1863 return env->cp15.c15_ticonfig; 1864 case 2: /* Read I_max. */ 1865 return env->cp15.c15_i_max; 1866 case 3: /* Read I_min. */ 1867 return env->cp15.c15_i_min; 1868 case 4: /* Read thread-ID. */ 1869 return env->cp15.c15_threadid; 1870 case 8: /* TI925T_status */ 1871 return 0; 1872 } 1873 /* TODO: Peripheral port remap register: 1874 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt 1875 * controller base address at $rn & ~0xfff and map size of 1876 * 0x200 << ($rn & 0xfff), when MMU is off. */ 1877 goto bad_reg; 1878 } 1879 return 0; 1880 } 1881bad_reg: 1882 /* ??? For debugging only. Should raise illegal instruction exception. */ 1883 cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n", 1884 (insn >> 16) & 0xf, crm, op1, op2); 1885 return 0; 1886} 1887 1888void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val) 1889{ 1890 env->banked_r13[bank_number(mode)] = val; 1891} 1892 1893uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode) 1894{ 1895 return env->banked_r13[bank_number(mode)]; 1896} 1897 1898uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg) 1899{ 1900 switch (reg) { 1901 case 0: /* APSR */ 1902 return xpsr_read(env) & 0xf8000000; 1903 case 1: /* IAPSR */ 1904 return xpsr_read(env) & 0xf80001ff; 1905 case 2: /* EAPSR */ 1906 return xpsr_read(env) & 0xff00fc00; 1907 case 3: /* xPSR */ 1908 return xpsr_read(env) & 0xff00fdff; 1909 case 5: /* IPSR */ 1910 return xpsr_read(env) & 0x000001ff; 1911 case 6: /* EPSR */ 1912 return xpsr_read(env) & 0x0700fc00; 1913 case 7: /* IEPSR */ 1914 return xpsr_read(env) & 0x0700edff; 1915 case 8: /* MSP */ 1916 return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13]; 1917 case 9: /* PSP */ 1918 return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp; 1919 case 16: /* PRIMASK */ 1920 return (env->uncached_cpsr & CPSR_I) != 0; 1921 case 17: /* FAULTMASK */ 1922 return (env->uncached_cpsr & CPSR_F) != 0; 1923 case 18: /* BASEPRI */ 1924 case 19: /* BASEPRI_MAX */ 1925 return env->v7m.basepri; 1926 case 20: /* CONTROL */ 1927 return env->v7m.control; 1928 default: 1929 /* ??? For debugging only. */ 1930 cpu_abort(env, "Unimplemented system register read (%d)\n", reg); 1931 return 0; 1932 } 1933} 1934 1935void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val) 1936{ 1937 switch (reg) { 1938 case 0: /* APSR */ 1939 xpsr_write(env, val, 0xf8000000); 1940 break; 1941 case 1: /* IAPSR */ 1942 xpsr_write(env, val, 0xf8000000); 1943 break; 1944 case 2: /* EAPSR */ 1945 xpsr_write(env, val, 0xfe00fc00); 1946 break; 1947 case 3: /* xPSR */ 1948 xpsr_write(env, val, 0xfe00fc00); 1949 break; 1950 case 5: /* IPSR */ 1951 /* IPSR bits are readonly. */ 1952 break; 1953 case 6: /* EPSR */ 1954 xpsr_write(env, val, 0x0600fc00); 1955 break; 1956 case 7: /* IEPSR */ 1957 xpsr_write(env, val, 0x0600fc00); 1958 break; 1959 case 8: /* MSP */ 1960 if (env->v7m.current_sp) 1961 env->v7m.other_sp = val; 1962 else 1963 env->regs[13] = val; 1964 break; 1965 case 9: /* PSP */ 1966 if (env->v7m.current_sp) 1967 env->regs[13] = val; 1968 else 1969 env->v7m.other_sp = val; 1970 break; 1971 case 16: /* PRIMASK */ 1972 if (val & 1) 1973 env->uncached_cpsr |= CPSR_I; 1974 else 1975 env->uncached_cpsr &= ~CPSR_I; 1976 break; 1977 case 17: /* FAULTMASK */ 1978 if (val & 1) 1979 env->uncached_cpsr |= CPSR_F; 1980 else 1981 env->uncached_cpsr &= ~CPSR_F; 1982 break; 1983 case 18: /* BASEPRI */ 1984 env->v7m.basepri = val & 0xff; 1985 break; 1986 case 19: /* BASEPRI_MAX */ 1987 val &= 0xff; 1988 if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0)) 1989 env->v7m.basepri = val; 1990 break; 1991 case 20: /* CONTROL */ 1992 env->v7m.control = val & 3; 1993 switch_v7m_sp(env, (val & 2) != 0); 1994 break; 1995 default: 1996 /* ??? For debugging only. */ 1997 cpu_abort(env, "Unimplemented system register write (%d)\n", reg); 1998 return; 1999 } 2000} 2001 2002void cpu_arm_set_cp_io(CPUARMState *env, int cpnum, 2003 ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write, 2004 void *opaque) 2005{ 2006 if (cpnum < 0 || cpnum > 14) { 2007 cpu_abort(env, "Bad coprocessor number: %i\n", cpnum); 2008 return; 2009 } 2010 2011 env->cp[cpnum].cp_read = cp_read; 2012 env->cp[cpnum].cp_write = cp_write; 2013 env->cp[cpnum].opaque = opaque; 2014} 2015 2016#endif 2017 2018/* Note that signed overflow is undefined in C. The following routines are 2019 careful to use unsigned types where modulo arithmetic is required. 2020 Failure to do so _will_ break on newer gcc. */ 2021 2022/* Signed saturating arithmetic. */ 2023 2024/* Perform 16-bit signed saturating addition. */ 2025static inline uint16_t add16_sat(uint16_t a, uint16_t b) 2026{ 2027 uint16_t res; 2028 2029 res = a + b; 2030 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 2031 if (a & 0x8000) 2032 res = 0x8000; 2033 else 2034 res = 0x7fff; 2035 } 2036 return res; 2037} 2038 2039/* Perform 8-bit signed saturating addition. */ 2040static inline uint8_t add8_sat(uint8_t a, uint8_t b) 2041{ 2042 uint8_t res; 2043 2044 res = a + b; 2045 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 2046 if (a & 0x80) 2047 res = 0x80; 2048 else 2049 res = 0x7f; 2050 } 2051 return res; 2052} 2053 2054/* Perform 16-bit signed saturating subtraction. */ 2055static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 2056{ 2057 uint16_t res; 2058 2059 res = a - b; 2060 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 2061 if (a & 0x8000) 2062 res = 0x8000; 2063 else 2064 res = 0x7fff; 2065 } 2066 return res; 2067} 2068 2069/* Perform 8-bit signed saturating subtraction. */ 2070static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 2071{ 2072 uint8_t res; 2073 2074 res = a - b; 2075 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 2076 if (a & 0x80) 2077 res = 0x80; 2078 else 2079 res = 0x7f; 2080 } 2081 return res; 2082} 2083 2084#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 2085#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 2086#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 2087#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 2088#define PFX q 2089 2090#include "op_addsub.h" 2091 2092/* Unsigned saturating arithmetic. */ 2093static inline uint16_t add16_usat(uint16_t a, uint16_t b) 2094{ 2095 uint16_t res; 2096 res = a + b; 2097 if (res < a) 2098 res = 0xffff; 2099 return res; 2100} 2101 2102static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 2103{ 2104 if (a < b) 2105 return a - b; 2106 else 2107 return 0; 2108} 2109 2110static inline uint8_t add8_usat(uint8_t a, uint8_t b) 2111{ 2112 uint8_t res; 2113 res = a + b; 2114 if (res < a) 2115 res = 0xff; 2116 return res; 2117} 2118 2119static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 2120{ 2121 if (a < b) 2122 return a - b; 2123 else 2124 return 0; 2125} 2126 2127#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 2128#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 2129#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 2130#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 2131#define PFX uq 2132 2133#include "op_addsub.h" 2134 2135/* Signed modulo arithmetic. */ 2136#define SARITH16(a, b, n, op) do { \ 2137 int32_t sum; \ 2138 sum = (int16_t)((uint16_t)(a) op (uint16_t)(b)); \ 2139 RESULT(sum, n, 16); \ 2140 if (sum >= 0) \ 2141 ge |= 3 << (n * 2); \ 2142 } while(0) 2143 2144#define SARITH8(a, b, n, op) do { \ 2145 int32_t sum; \ 2146 sum = (int8_t)((uint8_t)(a) op (uint8_t)(b)); \ 2147 RESULT(sum, n, 8); \ 2148 if (sum >= 0) \ 2149 ge |= 1 << n; \ 2150 } while(0) 2151 2152 2153#define ADD16(a, b, n) SARITH16(a, b, n, +) 2154#define SUB16(a, b, n) SARITH16(a, b, n, -) 2155#define ADD8(a, b, n) SARITH8(a, b, n, +) 2156#define SUB8(a, b, n) SARITH8(a, b, n, -) 2157#define PFX s 2158#define ARITH_GE 2159 2160#include "op_addsub.h" 2161 2162/* Unsigned modulo arithmetic. */ 2163#define ADD16(a, b, n) do { \ 2164 uint32_t sum; \ 2165 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 2166 RESULT(sum, n, 16); \ 2167 if ((sum >> 16) == 1) \ 2168 ge |= 3 << (n * 2); \ 2169 } while(0) 2170 2171#define ADD8(a, b, n) do { \ 2172 uint32_t sum; \ 2173 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 2174 RESULT(sum, n, 8); \ 2175 if ((sum >> 8) == 1) \ 2176 ge |= 1 << n; \ 2177 } while(0) 2178 2179#define SUB16(a, b, n) do { \ 2180 uint32_t sum; \ 2181 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 2182 RESULT(sum, n, 16); \ 2183 if ((sum >> 16) == 0) \ 2184 ge |= 3 << (n * 2); \ 2185 } while(0) 2186 2187#define SUB8(a, b, n) do { \ 2188 uint32_t sum; \ 2189 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 2190 RESULT(sum, n, 8); \ 2191 if ((sum >> 8) == 0) \ 2192 ge |= 1 << n; \ 2193 } while(0) 2194 2195#define PFX u 2196#define ARITH_GE 2197 2198#include "op_addsub.h" 2199 2200/* Halved signed arithmetic. */ 2201#define ADD16(a, b, n) \ 2202 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 2203#define SUB16(a, b, n) \ 2204 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 2205#define ADD8(a, b, n) \ 2206 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 2207#define SUB8(a, b, n) \ 2208 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 2209#define PFX sh 2210 2211#include "op_addsub.h" 2212 2213/* Halved unsigned arithmetic. */ 2214#define ADD16(a, b, n) \ 2215 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 2216#define SUB16(a, b, n) \ 2217 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 2218#define ADD8(a, b, n) \ 2219 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 2220#define SUB8(a, b, n) \ 2221 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 2222#define PFX uh 2223 2224#include "op_addsub.h" 2225 2226static inline uint8_t do_usad(uint8_t a, uint8_t b) 2227{ 2228 if (a > b) 2229 return a - b; 2230 else 2231 return b - a; 2232} 2233 2234/* Unsigned sum of absolute byte differences. */ 2235uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 2236{ 2237 uint32_t sum; 2238 sum = do_usad(a, b); 2239 sum += do_usad(a >> 8, b >> 8); 2240 sum += do_usad(a >> 16, b >>16); 2241 sum += do_usad(a >> 24, b >> 24); 2242 return sum; 2243} 2244 2245/* For ARMv6 SEL instruction. */ 2246uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 2247{ 2248 uint32_t mask; 2249 2250 mask = 0; 2251 if (flags & 1) 2252 mask |= 0xff; 2253 if (flags & 2) 2254 mask |= 0xff00; 2255 if (flags & 4) 2256 mask |= 0xff0000; 2257 if (flags & 8) 2258 mask |= 0xff000000; 2259 return (a & mask) | (b & ~mask); 2260} 2261 2262uint32_t HELPER(logicq_cc)(uint64_t val) 2263{ 2264 return (val >> 32) | (val != 0); 2265} 2266 2267/* VFP support. We follow the convention used for VFP instrunctions: 2268 Single precition routines have a "s" suffix, double precision a 2269 "d" suffix. */ 2270 2271/* Convert host exception flags to vfp form. */ 2272static inline int vfp_exceptbits_from_host(int host_bits) 2273{ 2274 int target_bits = 0; 2275 2276 if (host_bits & float_flag_invalid) 2277 target_bits |= 1; 2278 if (host_bits & float_flag_divbyzero) 2279 target_bits |= 2; 2280 if (host_bits & float_flag_overflow) 2281 target_bits |= 4; 2282 if (host_bits & float_flag_underflow) 2283 target_bits |= 8; 2284 if (host_bits & float_flag_inexact) 2285 target_bits |= 0x10; 2286 return target_bits; 2287} 2288 2289uint32_t HELPER(vfp_get_fpscr)(CPUState *env) 2290{ 2291 int i; 2292 uint32_t fpscr; 2293 2294 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) 2295 | (env->vfp.vec_len << 16) 2296 | (env->vfp.vec_stride << 20); 2297 i = get_float_exception_flags(&env->vfp.fp_status); 2298 fpscr |= vfp_exceptbits_from_host(i); 2299 return fpscr; 2300} 2301 2302/* Convert vfp exception flags to target form. */ 2303static inline int vfp_exceptbits_to_host(int target_bits) 2304{ 2305 int host_bits = 0; 2306 2307 if (target_bits & 1) 2308 host_bits |= float_flag_invalid; 2309 if (target_bits & 2) 2310 host_bits |= float_flag_divbyzero; 2311 if (target_bits & 4) 2312 host_bits |= float_flag_overflow; 2313 if (target_bits & 8) 2314 host_bits |= float_flag_underflow; 2315 if (target_bits & 0x10) 2316 host_bits |= float_flag_inexact; 2317 return host_bits; 2318} 2319 2320void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val) 2321{ 2322 int i; 2323 uint32_t changed; 2324 2325 changed = env->vfp.xregs[ARM_VFP_FPSCR]; 2326 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); 2327 env->vfp.vec_len = (val >> 16) & 7; 2328 env->vfp.vec_stride = (val >> 20) & 3; 2329 2330 changed ^= val; 2331 if (changed & (3 << 22)) { 2332 i = (val >> 22) & 3; 2333 switch (i) { 2334 case 0: 2335 i = float_round_nearest_even; 2336 break; 2337 case 1: 2338 i = float_round_up; 2339 break; 2340 case 2: 2341 i = float_round_down; 2342 break; 2343 case 3: 2344 i = float_round_to_zero; 2345 break; 2346 } 2347 set_float_rounding_mode(i, &env->vfp.fp_status); 2348 } 2349 if (changed & (1 << 24)) 2350 set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); 2351 if (changed & (1 << 25)) 2352 set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status); 2353 2354 i = vfp_exceptbits_to_host((val >> 8) & 0x1f); 2355 set_float_exception_flags(i, &env->vfp.fp_status); 2356} 2357 2358#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) 2359 2360#define VFP_BINOP(name) \ 2361float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \ 2362{ \ 2363 return float32_ ## name (a, b, &env->vfp.fp_status); \ 2364} \ 2365float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \ 2366{ \ 2367 return float64_ ## name (a, b, &env->vfp.fp_status); \ 2368} 2369VFP_BINOP(add) 2370VFP_BINOP(sub) 2371VFP_BINOP(mul) 2372VFP_BINOP(div) 2373#undef VFP_BINOP 2374 2375float32 VFP_HELPER(neg, s)(float32 a) 2376{ 2377 return float32_chs(a); 2378} 2379 2380float64 VFP_HELPER(neg, d)(float64 a) 2381{ 2382 return float64_chs(a); 2383} 2384 2385float32 VFP_HELPER(abs, s)(float32 a) 2386{ 2387 return float32_abs(a); 2388} 2389 2390float64 VFP_HELPER(abs, d)(float64 a) 2391{ 2392 return float64_abs(a); 2393} 2394 2395float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env) 2396{ 2397 return float32_sqrt(a, &env->vfp.fp_status); 2398} 2399 2400float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env) 2401{ 2402 return float64_sqrt(a, &env->vfp.fp_status); 2403} 2404 2405/* XXX: check quiet/signaling case */ 2406#define DO_VFP_cmp(p, type) \ 2407void VFP_HELPER(cmp, p)(type a, type b, CPUState *env) \ 2408{ \ 2409 uint32_t flags; \ 2410 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \ 2411 case 0: flags = 0x6; break; \ 2412 case -1: flags = 0x8; break; \ 2413 case 1: flags = 0x2; break; \ 2414 default: case 2: flags = 0x3; break; \ 2415 } \ 2416 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 2417 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 2418} \ 2419void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \ 2420{ \ 2421 uint32_t flags; \ 2422 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \ 2423 case 0: flags = 0x6; break; \ 2424 case -1: flags = 0x8; break; \ 2425 case 1: flags = 0x2; break; \ 2426 default: case 2: flags = 0x3; break; \ 2427 } \ 2428 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 2429 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 2430} 2431DO_VFP_cmp(s, float32) 2432DO_VFP_cmp(d, float64) 2433#undef DO_VFP_cmp 2434 2435/* Helper routines to perform bitwise copies between float and int. */ 2436static inline float32 vfp_itos(uint32_t i) 2437{ 2438 union { 2439 uint32_t i; 2440 float32 s; 2441 } v; 2442 2443 v.i = i; 2444 return v.s; 2445} 2446 2447static inline uint32_t vfp_stoi(float32 s) 2448{ 2449 union { 2450 uint32_t i; 2451 float32 s; 2452 } v; 2453 2454 v.s = s; 2455 return v.i; 2456} 2457 2458static inline float64 vfp_itod(uint64_t i) 2459{ 2460 union { 2461 uint64_t i; 2462 float64 d; 2463 } v; 2464 2465 v.i = i; 2466 return v.d; 2467} 2468 2469static inline uint64_t vfp_dtoi(float64 d) 2470{ 2471 union { 2472 uint64_t i; 2473 float64 d; 2474 } v; 2475 2476 v.d = d; 2477 return v.i; 2478} 2479 2480/* Integer to float conversion. */ 2481float32 VFP_HELPER(uito, s)(float32 x, CPUState *env) 2482{ 2483 return uint32_to_float32(vfp_stoi(x), &env->vfp.fp_status); 2484} 2485 2486float64 VFP_HELPER(uito, d)(float32 x, CPUState *env) 2487{ 2488 return uint32_to_float64(vfp_stoi(x), &env->vfp.fp_status); 2489} 2490 2491float32 VFP_HELPER(sito, s)(float32 x, CPUState *env) 2492{ 2493 return int32_to_float32(vfp_stoi(x), &env->vfp.fp_status); 2494} 2495 2496float64 VFP_HELPER(sito, d)(float32 x, CPUState *env) 2497{ 2498 return int32_to_float64(vfp_stoi(x), &env->vfp.fp_status); 2499} 2500 2501/* Float to integer conversion. */ 2502float32 VFP_HELPER(toui, s)(float32 x, CPUState *env) 2503{ 2504 return vfp_itos(float32_to_uint32(x, &env->vfp.fp_status)); 2505} 2506 2507float32 VFP_HELPER(toui, d)(float64 x, CPUState *env) 2508{ 2509 return vfp_itos(float64_to_uint32(x, &env->vfp.fp_status)); 2510} 2511 2512float32 VFP_HELPER(tosi, s)(float32 x, CPUState *env) 2513{ 2514 return vfp_itos(float32_to_int32(x, &env->vfp.fp_status)); 2515} 2516 2517float32 VFP_HELPER(tosi, d)(float64 x, CPUState *env) 2518{ 2519 return vfp_itos(float64_to_int32(x, &env->vfp.fp_status)); 2520} 2521 2522float32 VFP_HELPER(touiz, s)(float32 x, CPUState *env) 2523{ 2524 return vfp_itos(float32_to_uint32_round_to_zero(x, &env->vfp.fp_status)); 2525} 2526 2527float32 VFP_HELPER(touiz, d)(float64 x, CPUState *env) 2528{ 2529 return vfp_itos(float64_to_uint32_round_to_zero(x, &env->vfp.fp_status)); 2530} 2531 2532float32 VFP_HELPER(tosiz, s)(float32 x, CPUState *env) 2533{ 2534 return vfp_itos(float32_to_int32_round_to_zero(x, &env->vfp.fp_status)); 2535} 2536 2537float32 VFP_HELPER(tosiz, d)(float64 x, CPUState *env) 2538{ 2539 return vfp_itos(float64_to_int32_round_to_zero(x, &env->vfp.fp_status)); 2540} 2541 2542/* floating point conversion */ 2543float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env) 2544{ 2545 return float32_to_float64(x, &env->vfp.fp_status); 2546} 2547 2548float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env) 2549{ 2550 return float64_to_float32(x, &env->vfp.fp_status); 2551} 2552 2553/* VFP3 fixed point conversion. */ 2554#define VFP_CONV_FIX(name, p, ftype, itype, sign) \ 2555ftype VFP_HELPER(name##to, p)(ftype x, uint32_t shift, CPUState *env) \ 2556{ \ 2557 ftype tmp; \ 2558 tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(x), \ 2559 &env->vfp.fp_status); \ 2560 return ftype##_scalbn(tmp, -(int)shift, &env->vfp.fp_status); \ 2561} \ 2562ftype VFP_HELPER(to##name, p)(ftype x, uint32_t shift, CPUState *env) \ 2563{ \ 2564 ftype tmp; \ 2565 tmp = ftype##_scalbn(x, shift, &env->vfp.fp_status); \ 2566 return vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \ 2567 &env->vfp.fp_status)); \ 2568} 2569 2570VFP_CONV_FIX(sh, d, float64, int16, ) 2571VFP_CONV_FIX(sl, d, float64, int32, ) 2572VFP_CONV_FIX(uh, d, float64, uint16, u) 2573VFP_CONV_FIX(ul, d, float64, uint32, u) 2574VFP_CONV_FIX(sh, s, float32, int16, ) 2575VFP_CONV_FIX(sl, s, float32, int32, ) 2576VFP_CONV_FIX(uh, s, float32, uint16, u) 2577VFP_CONV_FIX(ul, s, float32, uint32, u) 2578#undef VFP_CONV_FIX 2579 2580float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env) 2581{ 2582 float_status *s = &env->vfp.fp_status; 2583 float32 two = int32_to_float32(2, s); 2584 return float32_sub(two, float32_mul(a, b, s), s); 2585} 2586 2587float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env) 2588{ 2589 float_status *s = &env->vfp.fp_status; 2590 float32 three = int32_to_float32(3, s); 2591 return float32_sub(three, float32_mul(a, b, s), s); 2592} 2593 2594/* NEON helpers. */ 2595 2596/* TODO: The architecture specifies the value that the estimate functions 2597 should return. We return the exact reciprocal/root instead. */ 2598float32 HELPER(recpe_f32)(float32 a, CPUState *env) 2599{ 2600 float_status *s = &env->vfp.fp_status; 2601 float32 one = int32_to_float32(1, s); 2602 return float32_div(one, a, s); 2603} 2604 2605float32 HELPER(rsqrte_f32)(float32 a, CPUState *env) 2606{ 2607 float_status *s = &env->vfp.fp_status; 2608 float32 one = int32_to_float32(1, s); 2609 return float32_div(one, float32_sqrt(a, s), s); 2610} 2611 2612uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env) 2613{ 2614 float_status *s = &env->vfp.fp_status; 2615 float32 tmp; 2616 tmp = int32_to_float32(a, s); 2617 tmp = float32_scalbn(tmp, -32, s); 2618 tmp = helper_recpe_f32(tmp, env); 2619 tmp = float32_scalbn(tmp, 31, s); 2620 return float32_to_int32(tmp, s); 2621} 2622 2623uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env) 2624{ 2625 float_status *s = &env->vfp.fp_status; 2626 float32 tmp; 2627 tmp = int32_to_float32(a, s); 2628 tmp = float32_scalbn(tmp, -32, s); 2629 tmp = helper_rsqrte_f32(tmp, env); 2630 tmp = float32_scalbn(tmp, 31, s); 2631 return float32_to_int32(tmp, s); 2632} 2633 2634void HELPER(set_teecr)(CPUState *env, uint32_t val) 2635{ 2636 val &= 1; 2637 if (env->teecr != val) { 2638 env->teecr = val; 2639 tb_flush(env); 2640 } 2641} 2642