/external/vixl/src/aarch64/ |
H A D | assembler-aarch64.h | 1255 void clrex(int imm4 = 0xf); 2816 static Instr CRm(int imm4) { argument 2817 VIXL_ASSERT(IsUint4(imm4)); 2818 return imm4 << CRm_offset; 2821 static Instr CRn(int imm4) { argument 2822 VIXL_ASSERT(IsUint4(imm4)); 2823 return imm4 << CRn_offset; 3010 static Instr ImmNEONExt(int imm4) { argument 3011 VIXL_ASSERT(IsUint4(imm4)); 3012 return imm4 << ImmNEONExt_offse 3025 int imm4 = index << s; local [all...] |
H A D | simulator-aarch64.cc | 3881 int imm4 = instr->GetImmNEON4(); local 3882 int rn_index = imm4 >> tz;
|
H A D | assembler-aarch64.cc | 2061 void Assembler::clrex(int imm4) { Emit(CLREX | CRm(imm4)); }
|
/external/valgrind/VEX/priv/ |
H A D | host_arm_isel.c | 3150 UInt imm4; 3159 case Iop_VDup8x8: imm4 = (index << 1) + 1; break; 3160 case Iop_VDup16x4: imm4 = (index << 2) + 2; break; 3161 case Iop_VDup32x2: imm4 = (index << 3) + 4; break; 3164 if (imm4 >= 16) { 3169 res, argL, imm4, False)); 3325 UInt imm4; local 3329 imm4 = (index << 1) + 1; 3337 imm4, False 3344 UInt imm4; local 3363 UInt imm4; local 3730 UInt imm4; local 3958 UInt imm4; local 3977 UInt imm4; local 3996 UInt imm4; local 5314 UInt imm4; local [all...] |
H A D | host_arm_defs.h | 236 ARMri84_I84=7, /* imm8 `ror` (2 * imm4) */ 247 UShort imm4; member in struct:__anon22226::__anon22227::__anon22228 256 extern ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 );
|
H A D | guest_arm64_toIR.c | 8201 0 q 101110 op2 0 m 0 imm4 0 n d 8213 UInt imm4 = INSN(14,11); local 8225 if (imm4 == 0) { 8228 vassert(imm4 >= 1 && imm4 <= 15); 8230 mkexpr(sHi), mkexpr(sLo), mkU8(imm4))); 8233 DIP("ext v%u.16b, v%u.16b, v%u.16b, #%u\n", dd, nn, mm, imm4); 8235 if (imm4 >= 8) return False; 8236 if (imm4 == 0) { 8239 vassert(imm4 > 8584 UInt imm4 = INSN(14,11); local 9022 UInt imm4 = INSN(14,11); local [all...] |
H A D | host_arm_defs.c | 423 ARMRI84* ARMRI84_I84 ( UShort imm8, UShort imm4 ) { 427 ri84->ARMri84.I84.imm4 = imm4; 429 vassert(imm4 >= 0 && imm4 <= 15); 443 2 * ri84->ARMri84.I84.imm4)); 2767 vassert(0 == (ri->ARMri84.I84.imm4 & ~0x0F)); 2770 instr |= (ri->ARMri84.I84.imm4 << 8); 4339 case ARMneon_VEXT: /* VEXT.8 reg, reg, #imm4*/ 4536 UInt imm4 local [all...] |
H A D | guest_arm_toIR.c | 2862 UInt imm4 = (theInstr >> 8) & 0xf; local 2868 /*loV128*/getQReg(nreg), mkU8(imm4)), condT); 2871 /*loI64*/getDRegI64(nreg), mkU8(imm4)), condT); 2874 reg_t, mreg, imm4); 2990 UInt imm4 = (theInstr >> 16) & 0xF; local 2997 if ((imm4 == 0) || (imm4 == 8)) 3009 if ((imm4 & 1) == 1) { 3012 index = imm4 >> 1; 3014 } else if ((imm4 14334 UInt imm4 = INSN(3,0); local [all...] |
H A D | host_arm64_defs.c | 5190 011 01110 000 m 0 imm4 0 n d EXT Vd.16b, Vn.16b, Vm.16b, #imm4 5191 where imm4 = the shift amount, in bytes, 5197 UInt imm4 = i->ARM64in.VExtV.amtB; local 5198 vassert(imm4 >= 1 && imm4 <= 15); 5200 X000000 | (imm4 << 1), vN, vD);
|
/external/v8/src/arm/ |
H A D | assembler-arm.cc | 3171 // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0) 3179 int imm4 = (imm5 >> 1) & 0xf; local 3181 vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
|
/external/vixl/src/aarch32/ |
H A D | assembler-aarch32.cc | 16329 uint32_t imm4 = imm / dt.GetSize(); local 16331 rm.Encode(5, 0) | (imm4 << 8)); 16349 uint32_t imm4 = imm / dt.GetSize(); local 16351 rm.Encode(5, 0) | (imm4 << 8)); 16386 uint32_t imm4 = imm / dt.GetSize(); local 16388 rm.Encode(5, 0) | (imm4 << 8)); 16406 uint32_t imm4 = imm / dt.GetSize(); local 16408 rm.Encode(5, 0) | (imm4 << 8));
|