ARMv7Assembler.h revision cad810f21b803229eb11403f9209855525a25d57
1/* 2 * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. 3 * Copyright (C) 2010 University of Szeged 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#ifndef ARMAssembler_h 28#define ARMAssembler_h 29 30#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) 31 32#include "AssemblerBuffer.h" 33#include <wtf/Assertions.h> 34#include <wtf/Vector.h> 35#include <stdint.h> 36 37namespace JSC { 38 39namespace ARMRegisters { 40 typedef enum { 41 r0, 42 r1, 43 r2, 44 r3, 45 r4, 46 r5, 47 r6, 48 r7, wr = r7, // thumb work register 49 r8, 50 r9, sb = r9, // static base 51 r10, sl = r10, // stack limit 52 r11, fp = r11, // frame pointer 53 r12, ip = r12, 54 r13, sp = r13, 55 r14, lr = r14, 56 r15, pc = r15, 57 } RegisterID; 58 59 typedef enum { 60 s0, 61 s1, 62 s2, 63 s3, 64 s4, 65 s5, 66 s6, 67 s7, 68 s8, 69 s9, 70 s10, 71 s11, 72 s12, 73 s13, 74 s14, 75 s15, 76 s16, 77 s17, 78 s18, 79 s19, 80 s20, 81 s21, 82 s22, 83 s23, 84 s24, 85 s25, 86 s26, 87 s27, 88 s28, 89 s29, 90 s30, 91 s31, 92 } FPSingleRegisterID; 93 94 typedef enum { 95 d0, 96 d1, 97 d2, 98 d3, 99 d4, 100 d5, 101 d6, 102 d7, 103 d8, 104 d9, 105 d10, 106 d11, 107 d12, 108 d13, 109 d14, 110 d15, 111 d16, 112 d17, 113 d18, 114 d19, 115 d20, 116 d21, 117 d22, 118 d23, 119 d24, 120 d25, 121 d26, 122 d27, 123 d28, 124 d29, 125 d30, 126 d31, 127 } FPDoubleRegisterID; 128 129 typedef enum { 130 q0, 131 q1, 132 q2, 133 q3, 134 q4, 135 q5, 136 q6, 137 q7, 138 q8, 139 q9, 140 q10, 141 q11, 142 q12, 143 q13, 144 q14, 145 q15, 146 q16, 147 q17, 148 q18, 149 q19, 150 q20, 151 q21, 152 q22, 153 q23, 154 q24, 155 q25, 156 q26, 157 q27, 158 q28, 159 q29, 160 q30, 161 q31, 162 } FPQuadRegisterID; 163 164 inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg) 165 { 166 ASSERT(reg < d16); 167 return (FPSingleRegisterID)(reg << 1); 168 } 169 170 inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg) 171 { 172 ASSERT(!(reg & 1)); 173 return (FPDoubleRegisterID)(reg >> 1); 174 } 175} 176 177class ARMv7Assembler; 178class ARMThumbImmediate { 179 friend class ARMv7Assembler; 180 181 typedef uint8_t ThumbImmediateType; 182 static const ThumbImmediateType TypeInvalid = 0; 183 static const ThumbImmediateType TypeEncoded = 1; 184 static const ThumbImmediateType TypeUInt16 = 2; 185 186 typedef union { 187 int16_t asInt; 188 struct { 189 unsigned imm8 : 8; 190 unsigned imm3 : 3; 191 unsigned i : 1; 192 unsigned imm4 : 4; 193 }; 194 // If this is an encoded immediate, then it may describe a shift, or a pattern. 195 struct { 196 unsigned shiftValue7 : 7; 197 unsigned shiftAmount : 5; 198 }; 199 struct { 200 unsigned immediate : 8; 201 unsigned pattern : 4; 202 }; 203 } ThumbImmediateValue; 204 205 // byte0 contains least significant bit; not using an array to make client code endian agnostic. 206 typedef union { 207 int32_t asInt; 208 struct { 209 uint8_t byte0; 210 uint8_t byte1; 211 uint8_t byte2; 212 uint8_t byte3; 213 }; 214 } PatternBytes; 215 216 ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N) 217 { 218 if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */ 219 value >>= N; /* if any were set, lose the bottom N */ 220 else /* if none of the top N bits are set, */ 221 zeros += N; /* then we have identified N leading zeros */ 222 } 223 224 static int32_t countLeadingZeros(uint32_t value) 225 { 226 if (!value) 227 return 32; 228 229 int32_t zeros = 0; 230 countLeadingZerosPartial(value, zeros, 16); 231 countLeadingZerosPartial(value, zeros, 8); 232 countLeadingZerosPartial(value, zeros, 4); 233 countLeadingZerosPartial(value, zeros, 2); 234 countLeadingZerosPartial(value, zeros, 1); 235 return zeros; 236 } 237 238 ARMThumbImmediate() 239 : m_type(TypeInvalid) 240 { 241 m_value.asInt = 0; 242 } 243 244 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value) 245 : m_type(type) 246 , m_value(value) 247 { 248 } 249 250 ARMThumbImmediate(ThumbImmediateType type, uint16_t value) 251 : m_type(TypeUInt16) 252 { 253 // Make sure this constructor is only reached with type TypeUInt16; 254 // this extra parameter makes the code a little clearer by making it 255 // explicit at call sites which type is being constructed 256 ASSERT_UNUSED(type, type == TypeUInt16); 257 258 m_value.asInt = value; 259 } 260 261public: 262 static ARMThumbImmediate makeEncodedImm(uint32_t value) 263 { 264 ThumbImmediateValue encoding; 265 encoding.asInt = 0; 266 267 // okay, these are easy. 268 if (value < 256) { 269 encoding.immediate = value; 270 encoding.pattern = 0; 271 return ARMThumbImmediate(TypeEncoded, encoding); 272 } 273 274 int32_t leadingZeros = countLeadingZeros(value); 275 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case. 276 ASSERT(leadingZeros < 24); 277 278 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32, 279 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for 280 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z). 281 int32_t rightShiftAmount = 24 - leadingZeros; 282 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) { 283 // Shift the value down to the low byte position. The assign to 284 // shiftValue7 drops the implicit top bit. 285 encoding.shiftValue7 = value >> rightShiftAmount; 286 // The endoded shift amount is the magnitude of a right rotate. 287 encoding.shiftAmount = 8 + leadingZeros; 288 return ARMThumbImmediate(TypeEncoded, encoding); 289 } 290 291 PatternBytes bytes; 292 bytes.asInt = value; 293 294 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) { 295 encoding.immediate = bytes.byte0; 296 encoding.pattern = 3; 297 return ARMThumbImmediate(TypeEncoded, encoding); 298 } 299 300 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) { 301 encoding.immediate = bytes.byte0; 302 encoding.pattern = 1; 303 return ARMThumbImmediate(TypeEncoded, encoding); 304 } 305 306 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) { 307 encoding.immediate = bytes.byte1; 308 encoding.pattern = 2; 309 return ARMThumbImmediate(TypeEncoded, encoding); 310 } 311 312 return ARMThumbImmediate(); 313 } 314 315 static ARMThumbImmediate makeUInt12(int32_t value) 316 { 317 return (!(value & 0xfffff000)) 318 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value) 319 : ARMThumbImmediate(); 320 } 321 322 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value) 323 { 324 // If this is not a 12-bit unsigned it, try making an encoded immediate. 325 return (!(value & 0xfffff000)) 326 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value) 327 : makeEncodedImm(value); 328 } 329 330 // The 'make' methods, above, return a !isValid() value if the argument 331 // cannot be represented as the requested type. This methods is called 332 // 'get' since the argument can always be represented. 333 static ARMThumbImmediate makeUInt16(uint16_t value) 334 { 335 return ARMThumbImmediate(TypeUInt16, value); 336 } 337 338 bool isValid() 339 { 340 return m_type != TypeInvalid; 341 } 342 343 // These methods rely on the format of encoded byte values. 344 bool isUInt3() { return !(m_value.asInt & 0xfff8); } 345 bool isUInt4() { return !(m_value.asInt & 0xfff0); } 346 bool isUInt5() { return !(m_value.asInt & 0xffe0); } 347 bool isUInt6() { return !(m_value.asInt & 0xffc0); } 348 bool isUInt7() { return !(m_value.asInt & 0xff80); } 349 bool isUInt8() { return !(m_value.asInt & 0xff00); } 350 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); } 351 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); } 352 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); } 353 bool isUInt16() { return m_type == TypeUInt16; } 354 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; } 355 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; } 356 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; } 357 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; } 358 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; } 359 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; } 360 uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; } 361 uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; } 362 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; } 363 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; } 364 365 bool isEncodedImm() { return m_type == TypeEncoded; } 366 367private: 368 ThumbImmediateType m_type; 369 ThumbImmediateValue m_value; 370}; 371 372class VFPImmediate { 373public: 374 VFPImmediate(double d) 375 : m_value(-1) 376 { 377 union { 378 uint64_t i; 379 double d; 380 } u; 381 382 u.d = d; 383 384 int sign = static_cast<int>(u.i >> 63); 385 int exponent = static_cast<int>(u.i >> 52) & 0x7ff; 386 uint64_t mantissa = u.i & 0x000fffffffffffffull; 387 388 if ((exponent >= 0x3fc) && (exponent <= 0x403) && !(mantissa & 0x0000ffffffffffffull)) 389 m_value = (sign << 7) | ((exponent & 7) << 4) | (int)(mantissa >> 48); 390 } 391 392 bool isValid() 393 { 394 return m_value != -1; 395 } 396 397 uint8_t value() 398 { 399 return (uint8_t)m_value; 400 } 401 402private: 403 int m_value; 404}; 405 406typedef enum { 407 SRType_LSL, 408 SRType_LSR, 409 SRType_ASR, 410 SRType_ROR, 411 412 SRType_RRX = SRType_ROR 413} ARMShiftType; 414 415class ARMv7Assembler; 416class ShiftTypeAndAmount { 417 friend class ARMv7Assembler; 418 419public: 420 ShiftTypeAndAmount() 421 { 422 m_u.type = (ARMShiftType)0; 423 m_u.amount = 0; 424 } 425 426 ShiftTypeAndAmount(ARMShiftType type, unsigned amount) 427 { 428 m_u.type = type; 429 m_u.amount = amount & 31; 430 } 431 432 unsigned lo4() { return m_u.lo4; } 433 unsigned hi4() { return m_u.hi4; } 434 435private: 436 union { 437 struct { 438 unsigned lo4 : 4; 439 unsigned hi4 : 4; 440 }; 441 struct { 442 unsigned type : 2; 443 unsigned amount : 6; 444 }; 445 } m_u; 446}; 447 448class ARMv7Assembler { 449public: 450 ~ARMv7Assembler() 451 { 452 ASSERT(m_jumpsToLink.isEmpty()); 453 } 454 455 typedef ARMRegisters::RegisterID RegisterID; 456 typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID; 457 typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID; 458 typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID; 459 460 // (HS, LO, HI, LS) -> (AE, B, A, BE) 461 // (VS, VC) -> (O, NO) 462 typedef enum { 463 ConditionEQ, 464 ConditionNE, 465 ConditionHS, ConditionCS = ConditionHS, 466 ConditionLO, ConditionCC = ConditionLO, 467 ConditionMI, 468 ConditionPL, 469 ConditionVS, 470 ConditionVC, 471 ConditionHI, 472 ConditionLS, 473 ConditionGE, 474 ConditionLT, 475 ConditionGT, 476 ConditionLE, 477 ConditionAL, 478 ConditionInvalid 479 } Condition; 480 481 enum JumpType { JumpFixed, JumpNoCondition, JumpCondition, JumpNoConditionFixedSize, JumpConditionFixedSize, JumpTypeCount }; 482 enum JumpLinkType { LinkInvalid, LinkJumpT1, LinkJumpT2, LinkJumpT3, 483 LinkJumpT4, LinkConditionalJumpT4, LinkBX, LinkConditionalBX, JumpLinkTypeCount }; 484 static const int JumpSizes[JumpLinkTypeCount]; 485 static const int JumpPaddingSizes[JumpTypeCount]; 486 class LinkRecord { 487 public: 488 LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition) 489 : m_from(from) 490 , m_to(to) 491 , m_type(type) 492 , m_linkType(LinkInvalid) 493 , m_condition(condition) 494 { 495 } 496 intptr_t from() const { return m_from; } 497 void setFrom(intptr_t from) { m_from = from; } 498 intptr_t to() const { return m_to; } 499 JumpType type() const { return m_type; } 500 JumpLinkType linkType() const { return m_linkType; } 501 void setLinkType(JumpLinkType linkType) { ASSERT(m_linkType == LinkInvalid); m_linkType = linkType; } 502 Condition condition() const { return m_condition; } 503 private: 504 intptr_t m_from : 31; 505 intptr_t m_to : 31; 506 JumpType m_type : 3; 507 JumpLinkType m_linkType : 4; 508 Condition m_condition : 16; 509 }; 510 511 class JmpSrc { 512 friend class ARMv7Assembler; 513 friend class ARMInstructionFormatter; 514 friend class LinkBuffer; 515 public: 516 JmpSrc() 517 : m_offset(-1) 518 { 519 } 520 521 private: 522 JmpSrc(int offset, JumpType type) 523 : m_offset(offset) 524 , m_condition(ConditionInvalid) 525 , m_type(type) 526 { 527 ASSERT(m_type == JumpFixed || m_type == JumpNoCondition || m_type == JumpNoConditionFixedSize); 528 } 529 530 JmpSrc(int offset, JumpType type, Condition condition) 531 : m_offset(offset) 532 , m_condition(condition) 533 , m_type(type) 534 { 535 ASSERT(m_type == JumpFixed || m_type == JumpCondition || m_type == JumpConditionFixedSize); 536 } 537 538 int m_offset; 539 Condition m_condition : 16; 540 JumpType m_type : 16; 541 542 }; 543 544 class JmpDst { 545 friend class ARMv7Assembler; 546 friend class ARMInstructionFormatter; 547 friend class LinkBuffer; 548 public: 549 JmpDst() 550 : m_offset(-1) 551 , m_used(false) 552 { 553 } 554 555 bool isUsed() const { return m_used; } 556 bool isSet() const { return (m_offset != -1); } 557 void used() { m_used = true; } 558 private: 559 JmpDst(int offset) 560 : m_offset(offset) 561 , m_used(false) 562 { 563 ASSERT(m_offset == offset); 564 } 565 566 int m_offset : 31; 567 int m_used : 1; 568 }; 569 570private: 571 572 // ARMv7, Appx-A.6.3 573 bool BadReg(RegisterID reg) 574 { 575 return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc); 576 } 577 578 uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift) 579 { 580 uint32_t rdMask = (rdNum >> 1) << highBitsShift; 581 if (rdNum & 1) 582 rdMask |= 1 << lowBitShift; 583 return rdMask; 584 } 585 586 uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift) 587 { 588 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift; 589 if (rdNum & 16) 590 rdMask |= 1 << highBitShift; 591 return rdMask; 592 } 593 594 typedef enum { 595 OP_ADD_reg_T1 = 0x1800, 596 OP_SUB_reg_T1 = 0x1A00, 597 OP_ADD_imm_T1 = 0x1C00, 598 OP_SUB_imm_T1 = 0x1E00, 599 OP_MOV_imm_T1 = 0x2000, 600 OP_CMP_imm_T1 = 0x2800, 601 OP_ADD_imm_T2 = 0x3000, 602 OP_SUB_imm_T2 = 0x3800, 603 OP_AND_reg_T1 = 0x4000, 604 OP_EOR_reg_T1 = 0x4040, 605 OP_TST_reg_T1 = 0x4200, 606 OP_RSB_imm_T1 = 0x4240, 607 OP_CMP_reg_T1 = 0x4280, 608 OP_ORR_reg_T1 = 0x4300, 609 OP_MVN_reg_T1 = 0x43C0, 610 OP_ADD_reg_T2 = 0x4400, 611 OP_MOV_reg_T1 = 0x4600, 612 OP_BLX = 0x4700, 613 OP_BX = 0x4700, 614 OP_STR_reg_T1 = 0x5000, 615 OP_LDR_reg_T1 = 0x5800, 616 OP_LDRH_reg_T1 = 0x5A00, 617 OP_LDRB_reg_T1 = 0x5C00, 618 OP_STR_imm_T1 = 0x6000, 619 OP_LDR_imm_T1 = 0x6800, 620 OP_LDRB_imm_T1 = 0x7800, 621 OP_LDRH_imm_T1 = 0x8800, 622 OP_STR_imm_T2 = 0x9000, 623 OP_LDR_imm_T2 = 0x9800, 624 OP_ADD_SP_imm_T1 = 0xA800, 625 OP_ADD_SP_imm_T2 = 0xB000, 626 OP_SUB_SP_imm_T1 = 0xB080, 627 OP_BKPT = 0xBE00, 628 OP_IT = 0xBF00, 629 OP_NOP_T1 = 0xBF00, 630 } OpcodeID; 631 632 typedef enum { 633 OP_B_T1 = 0xD000, 634 OP_B_T2 = 0xE000, 635 OP_AND_reg_T2 = 0xEA00, 636 OP_TST_reg_T2 = 0xEA10, 637 OP_ORR_reg_T2 = 0xEA40, 638 OP_ORR_S_reg_T2 = 0xEA50, 639 OP_ASR_imm_T1 = 0xEA4F, 640 OP_LSL_imm_T1 = 0xEA4F, 641 OP_LSR_imm_T1 = 0xEA4F, 642 OP_ROR_imm_T1 = 0xEA4F, 643 OP_MVN_reg_T2 = 0xEA6F, 644 OP_EOR_reg_T2 = 0xEA80, 645 OP_ADD_reg_T3 = 0xEB00, 646 OP_ADD_S_reg_T3 = 0xEB10, 647 OP_SUB_reg_T2 = 0xEBA0, 648 OP_SUB_S_reg_T2 = 0xEBB0, 649 OP_CMP_reg_T2 = 0xEBB0, 650 OP_VSTR = 0xED00, 651 OP_VLDR = 0xED10, 652 OP_VMOV_StoC = 0xEE00, 653 OP_VMOV_CtoS = 0xEE10, 654 OP_VMUL_T2 = 0xEE20, 655 OP_VADD_T2 = 0xEE30, 656 OP_VSUB_T2 = 0xEE30, 657 OP_VDIV = 0xEE80, 658 OP_VCMP = 0xEEB0, 659 OP_VCVT_FPIVFP = 0xEEB0, 660 OP_VMOV_IMM_T2 = 0xEEB0, 661 OP_VMRS = 0xEEB0, 662 OP_B_T3a = 0xF000, 663 OP_B_T4a = 0xF000, 664 OP_AND_imm_T1 = 0xF000, 665 OP_TST_imm = 0xF010, 666 OP_ORR_imm_T1 = 0xF040, 667 OP_MOV_imm_T2 = 0xF040, 668 OP_MVN_imm = 0xF060, 669 OP_EOR_imm_T1 = 0xF080, 670 OP_ADD_imm_T3 = 0xF100, 671 OP_ADD_S_imm_T3 = 0xF110, 672 OP_CMN_imm = 0xF110, 673 OP_SUB_imm_T3 = 0xF1A0, 674 OP_SUB_S_imm_T3 = 0xF1B0, 675 OP_CMP_imm_T2 = 0xF1B0, 676 OP_RSB_imm_T2 = 0xF1C0, 677 OP_ADD_imm_T4 = 0xF200, 678 OP_MOV_imm_T3 = 0xF240, 679 OP_SUB_imm_T4 = 0xF2A0, 680 OP_MOVT = 0xF2C0, 681 OP_NOP_T2a = 0xF3AF, 682 OP_LDRB_imm_T3 = 0xF810, 683 OP_LDRB_reg_T2 = 0xF810, 684 OP_LDRH_reg_T2 = 0xF830, 685 OP_LDRH_imm_T3 = 0xF830, 686 OP_STR_imm_T4 = 0xF840, 687 OP_STR_reg_T2 = 0xF840, 688 OP_LDR_imm_T4 = 0xF850, 689 OP_LDR_reg_T2 = 0xF850, 690 OP_LDRB_imm_T2 = 0xF890, 691 OP_LDRH_imm_T2 = 0xF8B0, 692 OP_STR_imm_T3 = 0xF8C0, 693 OP_LDR_imm_T3 = 0xF8D0, 694 OP_LSL_reg_T2 = 0xFA00, 695 OP_LSR_reg_T2 = 0xFA20, 696 OP_ASR_reg_T2 = 0xFA40, 697 OP_ROR_reg_T2 = 0xFA60, 698 OP_CLZ = 0xFAB0, 699 OP_SMULL_T1 = 0xFB80, 700 } OpcodeID1; 701 702 typedef enum { 703 OP_VADD_T2b = 0x0A00, 704 OP_VDIVb = 0x0A00, 705 OP_VLDRb = 0x0A00, 706 OP_VMOV_IMM_T2b = 0x0A00, 707 OP_VMUL_T2b = 0x0A00, 708 OP_VSTRb = 0x0A00, 709 OP_VMOV_CtoSb = 0x0A10, 710 OP_VMOV_StoCb = 0x0A10, 711 OP_VMRSb = 0x0A10, 712 OP_VCMPb = 0x0A40, 713 OP_VCVT_FPIVFPb = 0x0A40, 714 OP_VSUB_T2b = 0x0A40, 715 OP_NOP_T2b = 0x8000, 716 OP_B_T3b = 0x8000, 717 OP_B_T4b = 0x9000, 718 } OpcodeID2; 719 720 struct FourFours { 721 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0) 722 { 723 m_u.f0 = f0; 724 m_u.f1 = f1; 725 m_u.f2 = f2; 726 m_u.f3 = f3; 727 } 728 729 union { 730 unsigned value; 731 struct { 732 unsigned f0 : 4; 733 unsigned f1 : 4; 734 unsigned f2 : 4; 735 unsigned f3 : 4; 736 }; 737 } m_u; 738 }; 739 740 class ARMInstructionFormatter; 741 742 // false means else! 743 bool ifThenElseConditionBit(Condition condition, bool isIf) 744 { 745 return isIf ? (condition & 1) : !(condition & 1); 746 } 747 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if) 748 { 749 int mask = (ifThenElseConditionBit(condition, inst2if) << 3) 750 | (ifThenElseConditionBit(condition, inst3if) << 2) 751 | (ifThenElseConditionBit(condition, inst4if) << 1) 752 | 1; 753 ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); 754 return (condition << 4) | mask; 755 } 756 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) 757 { 758 int mask = (ifThenElseConditionBit(condition, inst2if) << 3) 759 | (ifThenElseConditionBit(condition, inst3if) << 2) 760 | 2; 761 ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); 762 return (condition << 4) | mask; 763 } 764 uint8_t ifThenElse(Condition condition, bool inst2if) 765 { 766 int mask = (ifThenElseConditionBit(condition, inst2if) << 3) 767 | 4; 768 ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); 769 return (condition << 4) | mask; 770 } 771 772 uint8_t ifThenElse(Condition condition) 773 { 774 int mask = 8; 775 return (condition << 4) | mask; 776 } 777 778public: 779 780 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) 781 { 782 // Rd can only be SP if Rn is also SP. 783 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); 784 ASSERT(rd != ARMRegisters::pc); 785 ASSERT(rn != ARMRegisters::pc); 786 ASSERT(imm.isValid()); 787 788 if (rn == ARMRegisters::sp) { 789 if (!(rd & 8) && imm.isUInt10()) { 790 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2); 791 return; 792 } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) { 793 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2); 794 return; 795 } 796 } else if (!((rd | rn) & 8)) { 797 if (imm.isUInt3()) { 798 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd); 799 return; 800 } else if ((rd == rn) && imm.isUInt8()) { 801 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8()); 802 return; 803 } 804 } 805 806 if (imm.isEncodedImm()) 807 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm); 808 else { 809 ASSERT(imm.isUInt12()); 810 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm); 811 } 812 } 813 814 void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 815 { 816 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); 817 ASSERT(rd != ARMRegisters::pc); 818 ASSERT(rn != ARMRegisters::pc); 819 ASSERT(!BadReg(rm)); 820 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 821 } 822 823 // NOTE: In an IT block, add doesn't modify the flags register. 824 void add(RegisterID rd, RegisterID rn, RegisterID rm) 825 { 826 if (rd == rn) 827 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd); 828 else if (rd == rm) 829 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd); 830 else if (!((rd | rn | rm) & 8)) 831 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd); 832 else 833 add(rd, rn, rm, ShiftTypeAndAmount()); 834 } 835 836 // Not allowed in an IT (if then) block. 837 void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) 838 { 839 // Rd can only be SP if Rn is also SP. 840 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); 841 ASSERT(rd != ARMRegisters::pc); 842 ASSERT(rn != ARMRegisters::pc); 843 ASSERT(imm.isEncodedImm()); 844 845 if (!((rd | rn) & 8)) { 846 if (imm.isUInt3()) { 847 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd); 848 return; 849 } else if ((rd == rn) && imm.isUInt8()) { 850 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8()); 851 return; 852 } 853 } 854 855 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm); 856 } 857 858 // Not allowed in an IT (if then) block? 859 void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 860 { 861 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); 862 ASSERT(rd != ARMRegisters::pc); 863 ASSERT(rn != ARMRegisters::pc); 864 ASSERT(!BadReg(rm)); 865 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 866 } 867 868 // Not allowed in an IT (if then) block. 869 void add_S(RegisterID rd, RegisterID rn, RegisterID rm) 870 { 871 if (!((rd | rn | rm) & 8)) 872 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd); 873 else 874 add_S(rd, rn, rm, ShiftTypeAndAmount()); 875 } 876 877 void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) 878 { 879 ASSERT(!BadReg(rd)); 880 ASSERT(!BadReg(rn)); 881 ASSERT(imm.isEncodedImm()); 882 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm); 883 } 884 885 void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 886 { 887 ASSERT(!BadReg(rd)); 888 ASSERT(!BadReg(rn)); 889 ASSERT(!BadReg(rm)); 890 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 891 } 892 893 void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm) 894 { 895 if ((rd == rn) && !((rd | rm) & 8)) 896 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd); 897 else if ((rd == rm) && !((rd | rn) & 8)) 898 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd); 899 else 900 ARM_and(rd, rn, rm, ShiftTypeAndAmount()); 901 } 902 903 void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount) 904 { 905 ASSERT(!BadReg(rd)); 906 ASSERT(!BadReg(rm)); 907 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount); 908 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 909 } 910 911 void asr(RegisterID rd, RegisterID rn, RegisterID rm) 912 { 913 ASSERT(!BadReg(rd)); 914 ASSERT(!BadReg(rn)); 915 ASSERT(!BadReg(rm)); 916 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); 917 } 918 919 // Only allowed in IT (if then) block if last instruction. 920 JmpSrc b(JumpType type) 921 { 922 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b); 923 return JmpSrc(m_formatter.size(), type); 924 } 925 926 // Only allowed in IT (if then) block if last instruction. 927 JmpSrc blx(RegisterID rm, JumpType type) 928 { 929 ASSERT(rm != ARMRegisters::pc); 930 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8); 931 return JmpSrc(m_formatter.size(), type); 932 } 933 934 // Only allowed in IT (if then) block if last instruction. 935 JmpSrc bx(RegisterID rm, JumpType type, Condition condition) 936 { 937 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0); 938 return JmpSrc(m_formatter.size(), type, condition); 939 } 940 941 JmpSrc bx(RegisterID rm, JumpType type) 942 { 943 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0); 944 return JmpSrc(m_formatter.size(), type); 945 } 946 947 void bkpt(uint8_t imm=0) 948 { 949 m_formatter.oneWordOp8Imm8(OP_BKPT, imm); 950 } 951 952 void clz(RegisterID rd, RegisterID rm) 953 { 954 ASSERT(!BadReg(rd)); 955 ASSERT(!BadReg(rm)); 956 m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm)); 957 } 958 959 void cmn(RegisterID rn, ARMThumbImmediate imm) 960 { 961 ASSERT(rn != ARMRegisters::pc); 962 ASSERT(imm.isEncodedImm()); 963 964 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm); 965 } 966 967 void cmp(RegisterID rn, ARMThumbImmediate imm) 968 { 969 ASSERT(rn != ARMRegisters::pc); 970 ASSERT(imm.isEncodedImm()); 971 972 if (!(rn & 8) && imm.isUInt8()) 973 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8()); 974 else 975 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm); 976 } 977 978 void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 979 { 980 ASSERT(rn != ARMRegisters::pc); 981 ASSERT(!BadReg(rm)); 982 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm)); 983 } 984 985 void cmp(RegisterID rn, RegisterID rm) 986 { 987 if ((rn | rm) & 8) 988 cmp(rn, rm, ShiftTypeAndAmount()); 989 else 990 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn); 991 } 992 993 // xor is not spelled with an 'e'. :-( 994 void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) 995 { 996 ASSERT(!BadReg(rd)); 997 ASSERT(!BadReg(rn)); 998 ASSERT(imm.isEncodedImm()); 999 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm); 1000 } 1001 1002 // xor is not spelled with an 'e'. :-( 1003 void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 1004 { 1005 ASSERT(!BadReg(rd)); 1006 ASSERT(!BadReg(rn)); 1007 ASSERT(!BadReg(rm)); 1008 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1009 } 1010 1011 // xor is not spelled with an 'e'. :-( 1012 void eor(RegisterID rd, RegisterID rn, RegisterID rm) 1013 { 1014 if ((rd == rn) && !((rd | rm) & 8)) 1015 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd); 1016 else if ((rd == rm) && !((rd | rn) & 8)) 1017 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd); 1018 else 1019 eor(rd, rn, rm, ShiftTypeAndAmount()); 1020 } 1021 1022 void it(Condition cond) 1023 { 1024 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond)); 1025 } 1026 1027 void it(Condition cond, bool inst2if) 1028 { 1029 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if)); 1030 } 1031 1032 void it(Condition cond, bool inst2if, bool inst3if) 1033 { 1034 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if)); 1035 } 1036 1037 void it(Condition cond, bool inst2if, bool inst3if, bool inst4if) 1038 { 1039 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if)); 1040 } 1041 1042 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. 1043 void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) 1044 { 1045 ASSERT(rn != ARMRegisters::pc); // LDR (literal) 1046 ASSERT(imm.isUInt12()); 1047 1048 if (!((rt | rn) & 8) && imm.isUInt7()) 1049 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt); 1050 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) 1051 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2); 1052 else 1053 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12()); 1054 } 1055 1056 // If index is set, this is a regular offset or a pre-indexed load; 1057 // if index is not set then is is a post-index load. 1058 // 1059 // If wback is set rn is updated - this is a pre or post index load, 1060 // if wback is not set this is a regular offset memory access. 1061 // 1062 // (-255 <= offset <= 255) 1063 // _reg = REG[rn] 1064 // _tmp = _reg + offset 1065 // MEM[index ? _tmp : _reg] = REG[rt] 1066 // if (wback) REG[rn] = _tmp 1067 void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) 1068 { 1069 ASSERT(rt != ARMRegisters::pc); 1070 ASSERT(rn != ARMRegisters::pc); 1071 ASSERT(index || wback); 1072 ASSERT(!wback | (rt != rn)); 1073 1074 bool add = true; 1075 if (offset < 0) { 1076 add = false; 1077 offset = -offset; 1078 } 1079 ASSERT((offset & ~0xff) == 0); 1080 1081 offset |= (wback << 8); 1082 offset |= (add << 9); 1083 offset |= (index << 10); 1084 offset |= (1 << 11); 1085 1086 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset); 1087 } 1088 1089 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. 1090 void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) 1091 { 1092 ASSERT(rn != ARMRegisters::pc); // LDR (literal) 1093 ASSERT(!BadReg(rm)); 1094 ASSERT(shift <= 3); 1095 1096 if (!shift && !((rt | rn | rm) & 8)) 1097 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt); 1098 else 1099 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm)); 1100 } 1101 1102 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. 1103 void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) 1104 { 1105 ASSERT(rn != ARMRegisters::pc); // LDR (literal) 1106 ASSERT(imm.isUInt12()); 1107 1108 if (!((rt | rn) & 8) && imm.isUInt6()) 1109 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt); 1110 else 1111 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12()); 1112 } 1113 1114 // If index is set, this is a regular offset or a pre-indexed load; 1115 // if index is not set then is is a post-index load. 1116 // 1117 // If wback is set rn is updated - this is a pre or post index load, 1118 // if wback is not set this is a regular offset memory access. 1119 // 1120 // (-255 <= offset <= 255) 1121 // _reg = REG[rn] 1122 // _tmp = _reg + offset 1123 // MEM[index ? _tmp : _reg] = REG[rt] 1124 // if (wback) REG[rn] = _tmp 1125 void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) 1126 { 1127 ASSERT(rt != ARMRegisters::pc); 1128 ASSERT(rn != ARMRegisters::pc); 1129 ASSERT(index || wback); 1130 ASSERT(!wback | (rt != rn)); 1131 1132 bool add = true; 1133 if (offset < 0) { 1134 add = false; 1135 offset = -offset; 1136 } 1137 ASSERT((offset & ~0xff) == 0); 1138 1139 offset |= (wback << 8); 1140 offset |= (add << 9); 1141 offset |= (index << 10); 1142 offset |= (1 << 11); 1143 1144 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset); 1145 } 1146 1147 void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) 1148 { 1149 ASSERT(!BadReg(rt)); // Memory hint 1150 ASSERT(rn != ARMRegisters::pc); // LDRH (literal) 1151 ASSERT(!BadReg(rm)); 1152 ASSERT(shift <= 3); 1153 1154 if (!shift && !((rt | rn | rm) & 8)) 1155 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt); 1156 else 1157 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm)); 1158 } 1159 1160 void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) 1161 { 1162 ASSERT(rn != ARMRegisters::pc); // LDR (literal) 1163 ASSERT(imm.isUInt12()); 1164 1165 if (!((rt | rn) & 8) && imm.isUInt5()) 1166 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt); 1167 else 1168 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12()); 1169 } 1170 1171 void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) 1172 { 1173 ASSERT(rt != ARMRegisters::pc); 1174 ASSERT(rn != ARMRegisters::pc); 1175 ASSERT(index || wback); 1176 ASSERT(!wback | (rt != rn)); 1177 1178 bool add = true; 1179 if (offset < 0) { 1180 add = false; 1181 offset = -offset; 1182 } 1183 1184 ASSERT(!(offset & ~0xff)); 1185 1186 offset |= (wback << 8); 1187 offset |= (add << 9); 1188 offset |= (index << 10); 1189 offset |= (1 << 11); 1190 1191 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset); 1192 } 1193 1194 void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) 1195 { 1196 ASSERT(rn != ARMRegisters::pc); // LDR (literal) 1197 ASSERT(!BadReg(rm)); 1198 ASSERT(shift <= 3); 1199 1200 if (!shift && !((rt | rn | rm) & 8)) 1201 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt); 1202 else 1203 m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm)); 1204 } 1205 1206 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount) 1207 { 1208 ASSERT(!BadReg(rd)); 1209 ASSERT(!BadReg(rm)); 1210 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount); 1211 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1212 } 1213 1214 void lsl(RegisterID rd, RegisterID rn, RegisterID rm) 1215 { 1216 ASSERT(!BadReg(rd)); 1217 ASSERT(!BadReg(rn)); 1218 ASSERT(!BadReg(rm)); 1219 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm)); 1220 } 1221 1222 void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount) 1223 { 1224 ASSERT(!BadReg(rd)); 1225 ASSERT(!BadReg(rm)); 1226 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount); 1227 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1228 } 1229 1230 void lsr(RegisterID rd, RegisterID rn, RegisterID rm) 1231 { 1232 ASSERT(!BadReg(rd)); 1233 ASSERT(!BadReg(rn)); 1234 ASSERT(!BadReg(rm)); 1235 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); 1236 } 1237 1238 void movT3(RegisterID rd, ARMThumbImmediate imm) 1239 { 1240 ASSERT(imm.isValid()); 1241 ASSERT(!imm.isEncodedImm()); 1242 ASSERT(!BadReg(rd)); 1243 1244 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm); 1245 } 1246 1247 void mov(RegisterID rd, ARMThumbImmediate imm) 1248 { 1249 ASSERT(imm.isValid()); 1250 ASSERT(!BadReg(rd)); 1251 1252 if ((rd < 8) && imm.isUInt8()) 1253 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8()); 1254 else if (imm.isEncodedImm()) 1255 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm); 1256 else 1257 movT3(rd, imm); 1258 } 1259 1260 void mov(RegisterID rd, RegisterID rm) 1261 { 1262 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd); 1263 } 1264 1265 void movt(RegisterID rd, ARMThumbImmediate imm) 1266 { 1267 ASSERT(imm.isUInt16()); 1268 ASSERT(!BadReg(rd)); 1269 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm); 1270 } 1271 1272 void mvn(RegisterID rd, ARMThumbImmediate imm) 1273 { 1274 ASSERT(imm.isEncodedImm()); 1275 ASSERT(!BadReg(rd)); 1276 1277 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm); 1278 } 1279 1280 void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift) 1281 { 1282 ASSERT(!BadReg(rd)); 1283 ASSERT(!BadReg(rm)); 1284 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1285 } 1286 1287 void mvn(RegisterID rd, RegisterID rm) 1288 { 1289 if (!((rd | rm) & 8)) 1290 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd); 1291 else 1292 mvn(rd, rm, ShiftTypeAndAmount()); 1293 } 1294 1295 void neg(RegisterID rd, RegisterID rm) 1296 { 1297 ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0); 1298 sub(rd, zero, rm); 1299 } 1300 1301 void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) 1302 { 1303 ASSERT(!BadReg(rd)); 1304 ASSERT(!BadReg(rn)); 1305 ASSERT(imm.isEncodedImm()); 1306 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm); 1307 } 1308 1309 void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 1310 { 1311 ASSERT(!BadReg(rd)); 1312 ASSERT(!BadReg(rn)); 1313 ASSERT(!BadReg(rm)); 1314 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1315 } 1316 1317 void orr(RegisterID rd, RegisterID rn, RegisterID rm) 1318 { 1319 if ((rd == rn) && !((rd | rm) & 8)) 1320 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd); 1321 else if ((rd == rm) && !((rd | rn) & 8)) 1322 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd); 1323 else 1324 orr(rd, rn, rm, ShiftTypeAndAmount()); 1325 } 1326 1327 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 1328 { 1329 ASSERT(!BadReg(rd)); 1330 ASSERT(!BadReg(rn)); 1331 ASSERT(!BadReg(rm)); 1332 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1333 } 1334 1335 void orr_S(RegisterID rd, RegisterID rn, RegisterID rm) 1336 { 1337 if ((rd == rn) && !((rd | rm) & 8)) 1338 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd); 1339 else if ((rd == rm) && !((rd | rn) & 8)) 1340 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd); 1341 else 1342 orr_S(rd, rn, rm, ShiftTypeAndAmount()); 1343 } 1344 1345 void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount) 1346 { 1347 ASSERT(!BadReg(rd)); 1348 ASSERT(!BadReg(rm)); 1349 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount); 1350 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1351 } 1352 1353 void ror(RegisterID rd, RegisterID rn, RegisterID rm) 1354 { 1355 ASSERT(!BadReg(rd)); 1356 ASSERT(!BadReg(rn)); 1357 ASSERT(!BadReg(rm)); 1358 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); 1359 } 1360 1361 void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm) 1362 { 1363 ASSERT(!BadReg(rdLo)); 1364 ASSERT(!BadReg(rdHi)); 1365 ASSERT(!BadReg(rn)); 1366 ASSERT(!BadReg(rm)); 1367 ASSERT(rdLo != rdHi); 1368 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm)); 1369 } 1370 1371 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. 1372 void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) 1373 { 1374 ASSERT(rt != ARMRegisters::pc); 1375 ASSERT(rn != ARMRegisters::pc); 1376 ASSERT(imm.isUInt12()); 1377 1378 if (!((rt | rn) & 8) && imm.isUInt7()) 1379 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt); 1380 else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) 1381 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2); 1382 else 1383 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12()); 1384 } 1385 1386 // If index is set, this is a regular offset or a pre-indexed store; 1387 // if index is not set then is is a post-index store. 1388 // 1389 // If wback is set rn is updated - this is a pre or post index store, 1390 // if wback is not set this is a regular offset memory access. 1391 // 1392 // (-255 <= offset <= 255) 1393 // _reg = REG[rn] 1394 // _tmp = _reg + offset 1395 // MEM[index ? _tmp : _reg] = REG[rt] 1396 // if (wback) REG[rn] = _tmp 1397 void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) 1398 { 1399 ASSERT(rt != ARMRegisters::pc); 1400 ASSERT(rn != ARMRegisters::pc); 1401 ASSERT(index || wback); 1402 ASSERT(!wback | (rt != rn)); 1403 1404 bool add = true; 1405 if (offset < 0) { 1406 add = false; 1407 offset = -offset; 1408 } 1409 ASSERT((offset & ~0xff) == 0); 1410 1411 offset |= (wback << 8); 1412 offset |= (add << 9); 1413 offset |= (index << 10); 1414 offset |= (1 << 11); 1415 1416 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset); 1417 } 1418 1419 // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. 1420 void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) 1421 { 1422 ASSERT(rn != ARMRegisters::pc); 1423 ASSERT(!BadReg(rm)); 1424 ASSERT(shift <= 3); 1425 1426 if (!shift && !((rt | rn | rm) & 8)) 1427 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt); 1428 else 1429 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm)); 1430 } 1431 1432 void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) 1433 { 1434 // Rd can only be SP if Rn is also SP. 1435 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); 1436 ASSERT(rd != ARMRegisters::pc); 1437 ASSERT(rn != ARMRegisters::pc); 1438 ASSERT(imm.isValid()); 1439 1440 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) { 1441 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2); 1442 return; 1443 } else if (!((rd | rn) & 8)) { 1444 if (imm.isUInt3()) { 1445 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd); 1446 return; 1447 } else if ((rd == rn) && imm.isUInt8()) { 1448 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8()); 1449 return; 1450 } 1451 } 1452 1453 if (imm.isEncodedImm()) 1454 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm); 1455 else { 1456 ASSERT(imm.isUInt12()); 1457 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm); 1458 } 1459 } 1460 1461 void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn) 1462 { 1463 ASSERT(rd != ARMRegisters::pc); 1464 ASSERT(rn != ARMRegisters::pc); 1465 ASSERT(imm.isValid()); 1466 ASSERT(imm.isUInt12()); 1467 1468 if (!((rd | rn) & 8) && !imm.getUInt12()) 1469 m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd); 1470 else 1471 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm); 1472 } 1473 1474 void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 1475 { 1476 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); 1477 ASSERT(rd != ARMRegisters::pc); 1478 ASSERT(rn != ARMRegisters::pc); 1479 ASSERT(!BadReg(rm)); 1480 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1481 } 1482 1483 // NOTE: In an IT block, add doesn't modify the flags register. 1484 void sub(RegisterID rd, RegisterID rn, RegisterID rm) 1485 { 1486 if (!((rd | rn | rm) & 8)) 1487 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd); 1488 else 1489 sub(rd, rn, rm, ShiftTypeAndAmount()); 1490 } 1491 1492 // Not allowed in an IT (if then) block. 1493 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) 1494 { 1495 // Rd can only be SP if Rn is also SP. 1496 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); 1497 ASSERT(rd != ARMRegisters::pc); 1498 ASSERT(rn != ARMRegisters::pc); 1499 ASSERT(imm.isValid()); 1500 1501 if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) { 1502 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2); 1503 return; 1504 } else if (!((rd | rn) & 8)) { 1505 if (imm.isUInt3()) { 1506 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd); 1507 return; 1508 } else if ((rd == rn) && imm.isUInt8()) { 1509 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8()); 1510 return; 1511 } 1512 } 1513 1514 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm); 1515 } 1516 1517 // Not allowed in an IT (if then) block? 1518 void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 1519 { 1520 ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); 1521 ASSERT(rd != ARMRegisters::pc); 1522 ASSERT(rn != ARMRegisters::pc); 1523 ASSERT(!BadReg(rm)); 1524 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); 1525 } 1526 1527 // Not allowed in an IT (if then) block. 1528 void sub_S(RegisterID rd, RegisterID rn, RegisterID rm) 1529 { 1530 if (!((rd | rn | rm) & 8)) 1531 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd); 1532 else 1533 sub_S(rd, rn, rm, ShiftTypeAndAmount()); 1534 } 1535 1536 void tst(RegisterID rn, ARMThumbImmediate imm) 1537 { 1538 ASSERT(!BadReg(rn)); 1539 ASSERT(imm.isEncodedImm()); 1540 1541 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm); 1542 } 1543 1544 void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) 1545 { 1546 ASSERT(!BadReg(rn)); 1547 ASSERT(!BadReg(rm)); 1548 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm)); 1549 } 1550 1551 void tst(RegisterID rn, RegisterID rm) 1552 { 1553 if ((rn | rm) & 8) 1554 tst(rn, rm, ShiftTypeAndAmount()); 1555 else 1556 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn); 1557 } 1558 1559 void vadd_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) 1560 { 1561 m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm); 1562 } 1563 1564 void vcmp_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rm) 1565 { 1566 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm); 1567 } 1568 1569 void vcmpz_F64(FPDoubleRegisterID rd) 1570 { 1571 m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0)); 1572 } 1573 1574 void vcvt_F64_S32(FPDoubleRegisterID rd, FPSingleRegisterID rm) 1575 { 1576 // boolean values are 64bit (toInt, unsigned, roundZero) 1577 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm); 1578 } 1579 1580 void vcvtr_S32_F64(FPSingleRegisterID rd, FPDoubleRegisterID rm) 1581 { 1582 // boolean values are 64bit (toInt, unsigned, roundZero) 1583 m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm); 1584 } 1585 1586 void vdiv_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) 1587 { 1588 m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm); 1589 } 1590 1591 void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm) 1592 { 1593 m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm); 1594 } 1595 1596 void vmov(RegisterID rd, FPSingleRegisterID rn) 1597 { 1598 ASSERT(!BadReg(rd)); 1599 m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rn, rd, VFPOperand(0)); 1600 } 1601 1602 void vmov(FPSingleRegisterID rd, RegisterID rn) 1603 { 1604 ASSERT(!BadReg(rn)); 1605 m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rd, rn, VFPOperand(0)); 1606 } 1607 1608 void vmrs(RegisterID reg = ARMRegisters::pc) 1609 { 1610 ASSERT(reg != ARMRegisters::sp); 1611 m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0)); 1612 } 1613 1614 void vmul_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) 1615 { 1616 m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm); 1617 } 1618 1619 void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm) 1620 { 1621 m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm); 1622 } 1623 1624 void vsub_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) 1625 { 1626 m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm); 1627 } 1628 1629 JmpDst label() 1630 { 1631 return JmpDst(m_formatter.size()); 1632 } 1633 1634 JmpDst align(int alignment) 1635 { 1636 while (!m_formatter.isAligned(alignment)) 1637 bkpt(); 1638 1639 return label(); 1640 } 1641 1642 static void* getRelocatedAddress(void* code, JmpSrc jump) 1643 { 1644 ASSERT(jump.m_offset != -1); 1645 1646 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset); 1647 } 1648 1649 static void* getRelocatedAddress(void* code, JmpDst destination) 1650 { 1651 ASSERT(destination.m_offset != -1); 1652 1653 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset); 1654 } 1655 1656 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst) 1657 { 1658 return dst.m_offset - src.m_offset; 1659 } 1660 1661 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst) 1662 { 1663 return dst.m_offset - src.m_offset; 1664 } 1665 1666 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst) 1667 { 1668 return dst.m_offset - src.m_offset; 1669 } 1670 1671 int executableOffsetFor(int location) 1672 { 1673 if (!location) 1674 return 0; 1675 return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1]; 1676 } 1677 1678 int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JumpPaddingSizes[jumpType] - JumpSizes[jumpLinkType]; } 1679 1680 // Assembler admin methods: 1681 1682 size_t size() const 1683 { 1684 return m_formatter.size(); 1685 } 1686 1687 static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) 1688 { 1689 return a.from() < b.from(); 1690 } 1691 1692 bool canCompact(JumpType jumpType) 1693 { 1694 // The following cannot be compacted: 1695 // JumpFixed: represents custom jump sequence 1696 // JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size 1697 // JumpConditionFixedSize: represents conditional jump that must remain a fixed size 1698 return (jumpType == JumpNoCondition) || (jumpType == JumpCondition); 1699 } 1700 1701 JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) 1702 { 1703 if (jumpType == JumpFixed) 1704 return LinkInvalid; 1705 1706 // for patchable jump we must leave space for the longest code sequence 1707 if (jumpType == JumpNoConditionFixedSize) 1708 return LinkBX; 1709 if (jumpType == JumpConditionFixedSize) 1710 return LinkConditionalBX; 1711 1712 const int paddingSize = JumpPaddingSizes[jumpType]; 1713 bool mayTriggerErrata = false; 1714 1715 if (jumpType == JumpCondition) { 1716 // 2-byte conditional T1 1717 const uint16_t* jumpT1Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT1])); 1718 if (canBeJumpT1(jumpT1Location, to)) 1719 return LinkJumpT1; 1720 // 4-byte conditional T3 1721 const uint16_t* jumpT3Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT3])); 1722 if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) { 1723 if (!mayTriggerErrata) 1724 return LinkJumpT3; 1725 } 1726 // 4-byte conditional T4 with IT 1727 const uint16_t* conditionalJumpT4Location = 1728 reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkConditionalJumpT4])); 1729 if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) { 1730 if (!mayTriggerErrata) 1731 return LinkConditionalJumpT4; 1732 } 1733 } else { 1734 // 2-byte unconditional T2 1735 const uint16_t* jumpT2Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT2])); 1736 if (canBeJumpT2(jumpT2Location, to)) 1737 return LinkJumpT2; 1738 // 4-byte unconditional T4 1739 const uint16_t* jumpT4Location = reinterpret_cast<const uint16_t*>(from - (paddingSize - JumpSizes[LinkJumpT4])); 1740 if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) { 1741 if (!mayTriggerErrata) 1742 return LinkJumpT4; 1743 } 1744 // use long jump sequence 1745 return LinkBX; 1746 } 1747 1748 ASSERT(jumpType == JumpCondition); 1749 return LinkConditionalBX; 1750 } 1751 1752 JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) 1753 { 1754 JumpLinkType linkType = computeJumpType(record.type(), from, to); 1755 record.setLinkType(linkType); 1756 return linkType; 1757 } 1758 1759 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) 1760 { 1761 int32_t ptr = regionStart / sizeof(int32_t); 1762 const int32_t end = regionEnd / sizeof(int32_t); 1763 int32_t* offsets = static_cast<int32_t*>(m_formatter.data()); 1764 while (ptr < end) 1765 offsets[ptr++] = offset; 1766 } 1767 1768 Vector<LinkRecord>& jumpsToLink() 1769 { 1770 std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); 1771 return m_jumpsToLink; 1772 } 1773 1774 void link(LinkRecord& record, uint8_t* from, uint8_t* to) 1775 { 1776 switch (record.linkType()) { 1777 case LinkJumpT1: 1778 linkJumpT1(record.condition(), reinterpret_cast<uint16_t*>(from), to); 1779 break; 1780 case LinkJumpT2: 1781 linkJumpT2(reinterpret_cast<uint16_t*>(from), to); 1782 break; 1783 case LinkJumpT3: 1784 linkJumpT3(record.condition(), reinterpret_cast<uint16_t*>(from), to); 1785 break; 1786 case LinkJumpT4: 1787 linkJumpT4(reinterpret_cast<uint16_t*>(from), to); 1788 break; 1789 case LinkConditionalJumpT4: 1790 linkConditionalJumpT4(record.condition(), reinterpret_cast<uint16_t*>(from), to); 1791 break; 1792 case LinkConditionalBX: 1793 linkConditionalBX(record.condition(), reinterpret_cast<uint16_t*>(from), to); 1794 break; 1795 case LinkBX: 1796 linkBX(reinterpret_cast<uint16_t*>(from), to); 1797 break; 1798 default: 1799 ASSERT_NOT_REACHED(); 1800 break; 1801 } 1802 } 1803 1804 void* unlinkedCode() { return m_formatter.data(); } 1805 1806 static unsigned getCallReturnOffset(JmpSrc call) 1807 { 1808 ASSERT(call.m_offset >= 0); 1809 return call.m_offset; 1810 } 1811 1812 // Linking & patching: 1813 // 1814 // 'link' and 'patch' methods are for use on unprotected code - such as the code 1815 // within the AssemblerBuffer, and code being patched by the patch buffer. Once 1816 // code has been finalized it is (platform support permitting) within a non- 1817 // writable region of memory; to modify the code in an execute-only execuable 1818 // pool the 'repatch' and 'relink' methods should be used. 1819 1820 void linkJump(JmpSrc from, JmpDst to) 1821 { 1822 ASSERT(to.m_offset != -1); 1823 ASSERT(from.m_offset != -1); 1824 m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, from.m_type, from.m_condition)); 1825 } 1826 1827 static void linkJump(void* code, JmpSrc from, void* to) 1828 { 1829 ASSERT(from.m_offset != -1); 1830 1831 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset); 1832 linkJumpAbsolute(location, to); 1833 } 1834 1835 // bah, this mathod should really be static, since it is used by the LinkBuffer. 1836 // return a bool saying whether the link was successful? 1837 static void linkCall(void* code, JmpSrc from, void* to) 1838 { 1839 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1)); 1840 ASSERT(from.m_offset != -1); 1841 ASSERT(reinterpret_cast<intptr_t>(to) & 1); 1842 1843 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to); 1844 } 1845 1846 static void linkPointer(void* code, JmpDst where, void* value) 1847 { 1848 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value); 1849 } 1850 1851 static void relinkJump(void* from, void* to) 1852 { 1853 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1)); 1854 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1)); 1855 1856 linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to); 1857 1858 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t)); 1859 } 1860 1861 static void relinkCall(void* from, void* to) 1862 { 1863 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1)); 1864 ASSERT(reinterpret_cast<intptr_t>(to) & 1); 1865 1866 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to); 1867 } 1868 1869 static void repatchInt32(void* where, int32_t value) 1870 { 1871 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1)); 1872 1873 setInt32(where, value); 1874 } 1875 1876 static void repatchPointer(void* where, void* value) 1877 { 1878 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1)); 1879 1880 setPointer(where, value); 1881 } 1882 1883 static void repatchLoadPtrToLEA(void* where) 1884 { 1885 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1)); 1886 uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4; 1887 1888 ASSERT((loadOp[0] & 0xfff0) == OP_LDR_reg_T2); 1889 ASSERT((loadOp[1] & 0x0ff0) == 0); 1890 int rn = loadOp[0] & 0xf; 1891 int rt = loadOp[1] >> 12; 1892 int rm = loadOp[1] & 0xf; 1893 1894 loadOp[0] = OP_ADD_reg_T3 | rn; 1895 loadOp[1] = rt << 8 | rm; 1896 ExecutableAllocator::cacheFlush(loadOp, sizeof(uint32_t)); 1897 } 1898 1899private: 1900 // VFP operations commonly take one or more 5-bit operands, typically representing a 1901 // floating point register number. This will commonly be encoded in the instruction 1902 // in two parts, with one single bit field, and one 4-bit field. In the case of 1903 // double precision operands the high bit of the register number will be encoded 1904 // separately, and for single precision operands the high bit of the register number 1905 // will be encoded individually. 1906 // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit 1907 // field to be encoded together in the instruction (the low 4-bits of a double 1908 // register number, or the high 4-bits of a single register number), and bit 4 1909 // contains the bit value to be encoded individually. 1910 struct VFPOperand { 1911 explicit VFPOperand(uint32_t value) 1912 : m_value(value) 1913 { 1914 ASSERT(!(m_value & ~0x1f)); 1915 } 1916 1917 VFPOperand(FPDoubleRegisterID reg) 1918 : m_value(reg) 1919 { 1920 } 1921 1922 VFPOperand(RegisterID reg) 1923 : m_value(reg) 1924 { 1925 } 1926 1927 VFPOperand(FPSingleRegisterID reg) 1928 : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top. 1929 { 1930 } 1931 1932 uint32_t bits1() 1933 { 1934 return m_value >> 4; 1935 } 1936 1937 uint32_t bits4() 1938 { 1939 return m_value & 0xf; 1940 } 1941 1942 uint32_t m_value; 1943 }; 1944 1945 VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero) 1946 { 1947 // Cannot specify rounding when converting to float. 1948 ASSERT(toInteger || !isRoundZero); 1949 1950 uint32_t op = 0x8; 1951 if (toInteger) { 1952 // opc2 indicates both toInteger & isUnsigned. 1953 op |= isUnsigned ? 0x4 : 0x5; 1954 // 'op' field in instruction is isRoundZero 1955 if (isRoundZero) 1956 op |= 0x10; 1957 } else { 1958 // 'op' field in instruction is isUnsigned 1959 if (!isUnsigned) 1960 op |= 0x10; 1961 } 1962 return VFPOperand(op); 1963 } 1964 1965 static void setInt32(void* code, uint32_t value) 1966 { 1967 uint16_t* location = reinterpret_cast<uint16_t*>(code); 1968 ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2)); 1969 1970 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value)); 1971 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16)); 1972 location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); 1973 location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16); 1974 location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); 1975 location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16); 1976 1977 ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t)); 1978 } 1979 1980 static void setPointer(void* code, void* value) 1981 { 1982 setInt32(code, reinterpret_cast<uint32_t>(value)); 1983 } 1984 1985 static bool isB(void* address) 1986 { 1987 uint16_t* instruction = static_cast<uint16_t*>(address); 1988 return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b); 1989 } 1990 1991 static bool isBX(void* address) 1992 { 1993 uint16_t* instruction = static_cast<uint16_t*>(address); 1994 return (instruction[0] & 0xff87) == OP_BX; 1995 } 1996 1997 static bool isMOV_imm_T3(void* address) 1998 { 1999 uint16_t* instruction = static_cast<uint16_t*>(address); 2000 return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0); 2001 } 2002 2003 static bool isMOVT(void* address) 2004 { 2005 uint16_t* instruction = static_cast<uint16_t*>(address); 2006 return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0); 2007 } 2008 2009 static bool isNOP_T1(void* address) 2010 { 2011 uint16_t* instruction = static_cast<uint16_t*>(address); 2012 return instruction[0] == OP_NOP_T1; 2013 } 2014 2015 static bool isNOP_T2(void* address) 2016 { 2017 uint16_t* instruction = static_cast<uint16_t*>(address); 2018 return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b); 2019 } 2020 2021 static bool canBeJumpT1(const uint16_t* instruction, const void* target) 2022 { 2023 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2024 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2025 2026 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); 2027 // It does not appear to be documented in the ARM ARM (big surprise), but 2028 // for OP_B_T1 the branch displacement encoded in the instruction is 2 2029 // less than the actual displacement. 2030 relative -= 2; 2031 return ((relative << 23) >> 23) == relative; 2032 } 2033 2034 static bool canBeJumpT2(const uint16_t* instruction, const void* target) 2035 { 2036 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2037 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2038 2039 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); 2040 // It does not appear to be documented in the ARM ARM (big surprise), but 2041 // for OP_B_T2 the branch displacement encoded in the instruction is 2 2042 // less than the actual displacement. 2043 relative -= 2; 2044 return ((relative << 20) >> 20) == relative; 2045 } 2046 2047 static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata) 2048 { 2049 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2050 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2051 2052 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); 2053 // From Cortex-A8 errata: 2054 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and 2055 // the target of the branch falls within the first region it is 2056 // possible for the processor to incorrectly determine the branch 2057 // instruction, and it is also possible in some cases for the processor 2058 // to enter a deadlock state. 2059 // The instruction is spanning two pages if it ends at an address ending 0x002 2060 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002); 2061 mayTriggerErrata = spansTwo4K; 2062 // The target is in the first page if the jump branch back by [3..0x1002] bytes 2063 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2); 2064 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage; 2065 return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata; 2066 } 2067 2068 static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata) 2069 { 2070 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2071 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2072 2073 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); 2074 // From Cortex-A8 errata: 2075 // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and 2076 // the target of the branch falls within the first region it is 2077 // possible for the processor to incorrectly determine the branch 2078 // instruction, and it is also possible in some cases for the processor 2079 // to enter a deadlock state. 2080 // The instruction is spanning two pages if it ends at an address ending 0x002 2081 bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002); 2082 mayTriggerErrata = spansTwo4K; 2083 // The target is in the first page if the jump branch back by [3..0x1002] bytes 2084 bool targetInFirstPage = (relative >= -0x1002) && (relative < -2); 2085 bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage; 2086 return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata; 2087 } 2088 2089 void linkJumpT1(Condition cond, uint16_t* instruction, void* target) 2090 { 2091 // FIMXE: this should be up in the MacroAssembler layer. :-( 2092 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2093 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2094 ASSERT(canBeJumpT1(instruction, target)); 2095 2096 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); 2097 // It does not appear to be documented in the ARM ARM (big surprise), but 2098 // for OP_B_T1 the branch displacement encoded in the instruction is 2 2099 // less than the actual displacement. 2100 relative -= 2; 2101 2102 // All branch offsets should be an even distance. 2103 ASSERT(!(relative & 1)); 2104 instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1); 2105 } 2106 2107 static void linkJumpT2(uint16_t* instruction, void* target) 2108 { 2109 // FIMXE: this should be up in the MacroAssembler layer. :-( 2110 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2111 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2112 ASSERT(canBeJumpT2(instruction, target)); 2113 2114 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); 2115 // It does not appear to be documented in the ARM ARM (big surprise), but 2116 // for OP_B_T2 the branch displacement encoded in the instruction is 2 2117 // less than the actual displacement. 2118 relative -= 2; 2119 2120 // All branch offsets should be an even distance. 2121 ASSERT(!(relative & 1)); 2122 instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1); 2123 } 2124 2125 void linkJumpT3(Condition cond, uint16_t* instruction, void* target) 2126 { 2127 // FIMXE: this should be up in the MacroAssembler layer. :-( 2128 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2129 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2130 bool scratch; 2131 UNUSED_PARAM(scratch); 2132 ASSERT(canBeJumpT3(instruction, target, scratch)); 2133 2134 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); 2135 2136 // All branch offsets should be an even distance. 2137 ASSERT(!(relative & 1)); 2138 instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12); 2139 instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1); 2140 } 2141 2142 static void linkJumpT4(uint16_t* instruction, void* target) 2143 { 2144 // FIMXE: this should be up in the MacroAssembler layer. :-( 2145 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2146 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2147 bool scratch; 2148 UNUSED_PARAM(scratch); 2149 ASSERT(canBeJumpT4(instruction, target, scratch)); 2150 2151 intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction)); 2152 // ARM encoding for the top two bits below the sign bit is 'peculiar'. 2153 if (relative >= 0) 2154 relative ^= 0xC00000; 2155 2156 // All branch offsets should be an even distance. 2157 ASSERT(!(relative & 1)); 2158 instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12); 2159 instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); 2160 } 2161 2162 void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target) 2163 { 2164 // FIMXE: this should be up in the MacroAssembler layer. :-( 2165 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2166 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2167 2168 instruction[-3] = ifThenElse(cond) | OP_IT; 2169 linkJumpT4(instruction, target); 2170 } 2171 2172 static void linkBX(uint16_t* instruction, void* target) 2173 { 2174 // FIMXE: this should be up in the MacroAssembler layer. :-( 2175 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2176 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2177 2178 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip; 2179 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1)); 2180 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16)); 2181 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); 2182 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16); 2183 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); 2184 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); 2185 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); 2186 } 2187 2188 void linkConditionalBX(Condition cond, uint16_t* instruction, void* target) 2189 { 2190 // FIMXE: this should be up in the MacroAssembler layer. :-( 2191 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2192 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2193 2194 linkBX(instruction, target); 2195 instruction[-6] = ifThenElse(cond, true, true) | OP_IT; 2196 } 2197 2198 static void linkJumpAbsolute(uint16_t* instruction, void* target) 2199 { 2200 // FIMXE: this should be up in the MacroAssembler layer. :-( 2201 ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); 2202 ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); 2203 2204 ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1)) 2205 || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2))); 2206 2207 bool scratch; 2208 if (canBeJumpT4(instruction, target, scratch)) { 2209 // There may be a better way to fix this, but right now put the NOPs first, since in the 2210 // case of an conditional branch this will be coming after an ITTT predicating *three* 2211 // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to 2212 // variable wdith encoding - the previous instruction might *look* like an ITTT but 2213 // actually be the second half of a 2-word op. 2214 instruction[-5] = OP_NOP_T1; 2215 instruction[-4] = OP_NOP_T2a; 2216 instruction[-3] = OP_NOP_T2b; 2217 linkJumpT4(instruction, target); 2218 } else { 2219 const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip; 2220 ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1)); 2221 ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16)); 2222 instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); 2223 instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16); 2224 instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); 2225 instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); 2226 instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); 2227 } 2228 } 2229 2230 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm) 2231 { 2232 return op | (imm.m_value.i << 10) | imm.m_value.imm4; 2233 } 2234 2235 static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm) 2236 { 2237 return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8; 2238 } 2239 2240 class ARMInstructionFormatter { 2241 public: 2242 void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm) 2243 { 2244 m_buffer.putShort(op | (rd << 8) | imm); 2245 } 2246 2247 void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2) 2248 { 2249 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2); 2250 } 2251 2252 void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3) 2253 { 2254 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3); 2255 } 2256 2257 void oneWordOp8Imm8(OpcodeID op, uint8_t imm) 2258 { 2259 m_buffer.putShort(op | imm); 2260 } 2261 2262 void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2) 2263 { 2264 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7)); 2265 } 2266 void oneWordOp9Imm7(OpcodeID op, uint8_t imm) 2267 { 2268 m_buffer.putShort(op | imm); 2269 } 2270 2271 void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2) 2272 { 2273 m_buffer.putShort(op | (reg1 << 3) | reg2); 2274 } 2275 2276 void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff) 2277 { 2278 m_buffer.putShort(op | reg); 2279 m_buffer.putShort(ff.m_u.value); 2280 } 2281 2282 void twoWordOp16FourFours(OpcodeID1 op, FourFours ff) 2283 { 2284 m_buffer.putShort(op); 2285 m_buffer.putShort(ff.m_u.value); 2286 } 2287 2288 void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2) 2289 { 2290 m_buffer.putShort(op1); 2291 m_buffer.putShort(op2); 2292 } 2293 2294 void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm) 2295 { 2296 ARMThumbImmediate newImm = imm; 2297 newImm.m_value.imm4 = imm4; 2298 2299 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm)); 2300 m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm)); 2301 } 2302 2303 void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm) 2304 { 2305 m_buffer.putShort(op | reg1); 2306 m_buffer.putShort((reg2 << 12) | imm); 2307 } 2308 2309 // Formats up instructions of the pattern: 2310 // 111111111B11aaaa:bbbb222SA2C2cccc 2311 // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit. 2312 // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc. 2313 void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c) 2314 { 2315 ASSERT(!(op1 & 0x004f)); 2316 ASSERT(!(op2 & 0xf1af)); 2317 m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4()); 2318 m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4()); 2319 } 2320 2321 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. 2322 // (i.e. +/-(0..255) 32-bit words) 2323 void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm) 2324 { 2325 bool up = true; 2326 if (imm < 0) { 2327 imm = -imm; 2328 up = false; 2329 } 2330 2331 uint32_t offset = imm; 2332 ASSERT(!(offset & ~0x3fc)); 2333 offset >>= 2; 2334 2335 m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn); 2336 m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset); 2337 } 2338 2339 // Administrative methods: 2340 2341 size_t size() const { return m_buffer.size(); } 2342 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); } 2343 void* data() const { return m_buffer.data(); } 2344 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); } 2345 2346 private: 2347 AssemblerBuffer m_buffer; 2348 } m_formatter; 2349 2350 Vector<LinkRecord> m_jumpsToLink; 2351 Vector<int32_t> m_offsets; 2352}; 2353 2354} // namespace JSC 2355 2356#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) 2357 2358#endif // ARMAssembler_h 2359