assembler-arm.h revision 44f0eee88ff00398ff7f715fab053374d808c90d
1// Copyright (c) 1994-2006 Sun Microsystems Inc. 2// All Rights Reserved. 3// 4// Redistribution and use in source and binary forms, with or without 5// modification, are permitted provided that the following conditions 6// are met: 7// 8// - Redistributions of source code must retain the above copyright notice, 9// this list of conditions and the following disclaimer. 10// 11// - Redistribution in binary form must reproduce the above copyright 12// notice, this list of conditions and the following disclaimer in the 13// documentation and/or other materials provided with the 14// distribution. 15// 16// - Neither the name of Sun Microsystems or the names of contributors may 17// be used to endorse or promote products derived from this software without 18// specific prior written permission. 19// 20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 26// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 29// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 31// OF THE POSSIBILITY OF SUCH DAMAGE. 32 33// The original source code covered by the above license above has been 34// modified significantly by Google Inc. 35// Copyright 2010 the V8 project authors. All rights reserved. 36 37// A light-weight ARM Assembler 38// Generates user mode instructions for the ARM architecture up to version 5 39 40#ifndef V8_ARM_ASSEMBLER_ARM_H_ 41#define V8_ARM_ASSEMBLER_ARM_H_ 42#include <stdio.h> 43#include "assembler.h" 44#include "constants-arm.h" 45#include "serialize.h" 46 47namespace v8 { 48namespace internal { 49 50// CPU Registers. 51// 52// 1) We would prefer to use an enum, but enum values are assignment- 53// compatible with int, which has caused code-generation bugs. 54// 55// 2) We would prefer to use a class instead of a struct but we don't like 56// the register initialization to depend on the particular initialization 57// order (which appears to be different on OS X, Linux, and Windows for the 58// installed versions of C++ we tried). Using a struct permits C-style 59// "initialization". Also, the Register objects cannot be const as this 60// forces initialization stubs in MSVC, making us dependent on initialization 61// order. 62// 63// 3) By not using an enum, we are possibly preventing the compiler from 64// doing certain constant folds, which may significantly reduce the 65// code generated for some assembly instructions (because they boil down 66// to a few constants). If this is a problem, we could change the code 67// such that we use an enum in optimized mode, and the struct in debug 68// mode. This way we get the compile-time error checking in debug mode 69// and best performance in optimized code. 70 71// Core register 72struct Register { 73 static const int kNumRegisters = 16; 74 static const int kNumAllocatableRegisters = 8; 75 76 static int ToAllocationIndex(Register reg) { 77 ASSERT(reg.code() < kNumAllocatableRegisters); 78 return reg.code(); 79 } 80 81 static Register FromAllocationIndex(int index) { 82 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 83 return from_code(index); 84 } 85 86 static const char* AllocationIndexToString(int index) { 87 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 88 const char* const names[] = { 89 "r0", 90 "r1", 91 "r2", 92 "r3", 93 "r4", 94 "r5", 95 "r6", 96 "r7", 97 }; 98 return names[index]; 99 } 100 101 static Register from_code(int code) { 102 Register r = { code }; 103 return r; 104 } 105 106 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } 107 bool is(Register reg) const { return code_ == reg.code_; } 108 int code() const { 109 ASSERT(is_valid()); 110 return code_; 111 } 112 int bit() const { 113 ASSERT(is_valid()); 114 return 1 << code_; 115 } 116 117 void set_code(int code) { 118 code_ = code; 119 ASSERT(is_valid()); 120 } 121 122 // Unfortunately we can't make this private in a struct. 123 int code_; 124}; 125 126const Register no_reg = { -1 }; 127 128const Register r0 = { 0 }; 129const Register r1 = { 1 }; 130const Register r2 = { 2 }; 131const Register r3 = { 3 }; 132const Register r4 = { 4 }; 133const Register r5 = { 5 }; 134const Register r6 = { 6 }; 135const Register r7 = { 7 }; 136const Register r8 = { 8 }; // Used as context register. 137const Register r9 = { 9 }; // Used as lithium codegen scratch register. 138const Register r10 = { 10 }; // Used as roots register. 139const Register fp = { 11 }; 140const Register ip = { 12 }; 141const Register sp = { 13 }; 142const Register lr = { 14 }; 143const Register pc = { 15 }; 144 145// Single word VFP register. 146struct SwVfpRegister { 147 bool is_valid() const { return 0 <= code_ && code_ < 32; } 148 bool is(SwVfpRegister reg) const { return code_ == reg.code_; } 149 int code() const { 150 ASSERT(is_valid()); 151 return code_; 152 } 153 int bit() const { 154 ASSERT(is_valid()); 155 return 1 << code_; 156 } 157 void split_code(int* vm, int* m) const { 158 ASSERT(is_valid()); 159 *m = code_ & 0x1; 160 *vm = code_ >> 1; 161 } 162 163 int code_; 164}; 165 166 167// Double word VFP register. 168struct DwVfpRegister { 169 // d0 has been excluded from allocation. This is following ia32 170 // where xmm0 is excluded. This should be revisited. 171 // Currently d0 is used as a scratch register. 172 // d1 has also been excluded from allocation to be used as a scratch 173 // register as well. 174 static const int kNumRegisters = 16; 175 static const int kNumAllocatableRegisters = 15; 176 177 static int ToAllocationIndex(DwVfpRegister reg) { 178 ASSERT(reg.code() != 0); 179 return reg.code() - 1; 180 } 181 182 static DwVfpRegister FromAllocationIndex(int index) { 183 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 184 return from_code(index + 1); 185 } 186 187 static const char* AllocationIndexToString(int index) { 188 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 189 const char* const names[] = { 190 "d1", 191 "d2", 192 "d3", 193 "d4", 194 "d5", 195 "d6", 196 "d7", 197 "d8", 198 "d9", 199 "d10", 200 "d11", 201 "d12", 202 "d13", 203 "d14", 204 "d15" 205 }; 206 return names[index]; 207 } 208 209 static DwVfpRegister from_code(int code) { 210 DwVfpRegister r = { code }; 211 return r; 212 } 213 214 // Supporting d0 to d15, can be later extended to d31. 215 bool is_valid() const { return 0 <= code_ && code_ < 16; } 216 bool is(DwVfpRegister reg) const { return code_ == reg.code_; } 217 SwVfpRegister low() const { 218 SwVfpRegister reg; 219 reg.code_ = code_ * 2; 220 221 ASSERT(reg.is_valid()); 222 return reg; 223 } 224 SwVfpRegister high() const { 225 SwVfpRegister reg; 226 reg.code_ = (code_ * 2) + 1; 227 228 ASSERT(reg.is_valid()); 229 return reg; 230 } 231 int code() const { 232 ASSERT(is_valid()); 233 return code_; 234 } 235 int bit() const { 236 ASSERT(is_valid()); 237 return 1 << code_; 238 } 239 void split_code(int* vm, int* m) const { 240 ASSERT(is_valid()); 241 *m = (code_ & 0x10) >> 4; 242 *vm = code_ & 0x0F; 243 } 244 245 int code_; 246}; 247 248 249typedef DwVfpRegister DoubleRegister; 250 251 252// Support for the VFP registers s0 to s31 (d0 to d15). 253// Note that "s(N):s(N+1)" is the same as "d(N/2)". 254const SwVfpRegister s0 = { 0 }; 255const SwVfpRegister s1 = { 1 }; 256const SwVfpRegister s2 = { 2 }; 257const SwVfpRegister s3 = { 3 }; 258const SwVfpRegister s4 = { 4 }; 259const SwVfpRegister s5 = { 5 }; 260const SwVfpRegister s6 = { 6 }; 261const SwVfpRegister s7 = { 7 }; 262const SwVfpRegister s8 = { 8 }; 263const SwVfpRegister s9 = { 9 }; 264const SwVfpRegister s10 = { 10 }; 265const SwVfpRegister s11 = { 11 }; 266const SwVfpRegister s12 = { 12 }; 267const SwVfpRegister s13 = { 13 }; 268const SwVfpRegister s14 = { 14 }; 269const SwVfpRegister s15 = { 15 }; 270const SwVfpRegister s16 = { 16 }; 271const SwVfpRegister s17 = { 17 }; 272const SwVfpRegister s18 = { 18 }; 273const SwVfpRegister s19 = { 19 }; 274const SwVfpRegister s20 = { 20 }; 275const SwVfpRegister s21 = { 21 }; 276const SwVfpRegister s22 = { 22 }; 277const SwVfpRegister s23 = { 23 }; 278const SwVfpRegister s24 = { 24 }; 279const SwVfpRegister s25 = { 25 }; 280const SwVfpRegister s26 = { 26 }; 281const SwVfpRegister s27 = { 27 }; 282const SwVfpRegister s28 = { 28 }; 283const SwVfpRegister s29 = { 29 }; 284const SwVfpRegister s30 = { 30 }; 285const SwVfpRegister s31 = { 31 }; 286 287const DwVfpRegister no_dreg = { -1 }; 288const DwVfpRegister d0 = { 0 }; 289const DwVfpRegister d1 = { 1 }; 290const DwVfpRegister d2 = { 2 }; 291const DwVfpRegister d3 = { 3 }; 292const DwVfpRegister d4 = { 4 }; 293const DwVfpRegister d5 = { 5 }; 294const DwVfpRegister d6 = { 6 }; 295const DwVfpRegister d7 = { 7 }; 296const DwVfpRegister d8 = { 8 }; 297const DwVfpRegister d9 = { 9 }; 298const DwVfpRegister d10 = { 10 }; 299const DwVfpRegister d11 = { 11 }; 300const DwVfpRegister d12 = { 12 }; 301const DwVfpRegister d13 = { 13 }; 302const DwVfpRegister d14 = { 14 }; 303const DwVfpRegister d15 = { 15 }; 304 305 306// Coprocessor register 307struct CRegister { 308 bool is_valid() const { return 0 <= code_ && code_ < 16; } 309 bool is(CRegister creg) const { return code_ == creg.code_; } 310 int code() const { 311 ASSERT(is_valid()); 312 return code_; 313 } 314 int bit() const { 315 ASSERT(is_valid()); 316 return 1 << code_; 317 } 318 319 // Unfortunately we can't make this private in a struct. 320 int code_; 321}; 322 323 324const CRegister no_creg = { -1 }; 325 326const CRegister cr0 = { 0 }; 327const CRegister cr1 = { 1 }; 328const CRegister cr2 = { 2 }; 329const CRegister cr3 = { 3 }; 330const CRegister cr4 = { 4 }; 331const CRegister cr5 = { 5 }; 332const CRegister cr6 = { 6 }; 333const CRegister cr7 = { 7 }; 334const CRegister cr8 = { 8 }; 335const CRegister cr9 = { 9 }; 336const CRegister cr10 = { 10 }; 337const CRegister cr11 = { 11 }; 338const CRegister cr12 = { 12 }; 339const CRegister cr13 = { 13 }; 340const CRegister cr14 = { 14 }; 341const CRegister cr15 = { 15 }; 342 343 344// Coprocessor number 345enum Coprocessor { 346 p0 = 0, 347 p1 = 1, 348 p2 = 2, 349 p3 = 3, 350 p4 = 4, 351 p5 = 5, 352 p6 = 6, 353 p7 = 7, 354 p8 = 8, 355 p9 = 9, 356 p10 = 10, 357 p11 = 11, 358 p12 = 12, 359 p13 = 13, 360 p14 = 14, 361 p15 = 15 362}; 363 364 365// ----------------------------------------------------------------------------- 366// Machine instruction Operands 367 368// Class Operand represents a shifter operand in data processing instructions 369class Operand BASE_EMBEDDED { 370 public: 371 // immediate 372 INLINE(explicit Operand(int32_t immediate, 373 RelocInfo::Mode rmode = RelocInfo::NONE)); 374 INLINE(explicit Operand(const ExternalReference& f)); 375 INLINE(explicit Operand(const char* s)); 376 explicit Operand(Handle<Object> handle); 377 INLINE(explicit Operand(Smi* value)); 378 379 // rm 380 INLINE(explicit Operand(Register rm)); 381 382 // rm <shift_op> shift_imm 383 explicit Operand(Register rm, ShiftOp shift_op, int shift_imm); 384 385 // rm <shift_op> rs 386 explicit Operand(Register rm, ShiftOp shift_op, Register rs); 387 388 // Return true if this is a register operand. 389 INLINE(bool is_reg() const); 390 391 // Return true if this operand fits in one instruction so that no 392 // 2-instruction solution with a load into the ip register is necessary. If 393 // the instruction this operand is used for is a MOV or MVN instruction the 394 // actual instruction to use is required for this calculation. For other 395 // instructions instr is ignored. 396 bool is_single_instruction(Instr instr = 0) const; 397 bool must_use_constant_pool() const; 398 399 inline int32_t immediate() const { 400 ASSERT(!rm_.is_valid()); 401 return imm32_; 402 } 403 404 Register rm() const { return rm_; } 405 Register rs() const { return rs_; } 406 ShiftOp shift_op() const { return shift_op_; } 407 408 private: 409 Register rm_; 410 Register rs_; 411 ShiftOp shift_op_; 412 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg 413 int32_t imm32_; // valid if rm_ == no_reg 414 RelocInfo::Mode rmode_; 415 416 friend class Assembler; 417}; 418 419 420// Class MemOperand represents a memory operand in load and store instructions 421class MemOperand BASE_EMBEDDED { 422 public: 423 // [rn +/- offset] Offset/NegOffset 424 // [rn +/- offset]! PreIndex/NegPreIndex 425 // [rn], +/- offset PostIndex/NegPostIndex 426 // offset is any signed 32-bit value; offset is first loaded to register ip if 427 // it does not fit the addressing mode (12-bit unsigned and sign bit) 428 explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset); 429 430 // [rn +/- rm] Offset/NegOffset 431 // [rn +/- rm]! PreIndex/NegPreIndex 432 // [rn], +/- rm PostIndex/NegPostIndex 433 explicit MemOperand(Register rn, Register rm, AddrMode am = Offset); 434 435 // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset 436 // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex 437 // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex 438 explicit MemOperand(Register rn, Register rm, 439 ShiftOp shift_op, int shift_imm, AddrMode am = Offset); 440 441 void set_offset(int32_t offset) { 442 ASSERT(rm_.is(no_reg)); 443 offset_ = offset; 444 } 445 446 uint32_t offset() const { 447 ASSERT(rm_.is(no_reg)); 448 return offset_; 449 } 450 451 Register rn() const { return rn_; } 452 Register rm() const { return rm_; } 453 454 bool OffsetIsUint12Encodable() const { 455 return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); 456 } 457 458 private: 459 Register rn_; // base 460 Register rm_; // register offset 461 int32_t offset_; // valid if rm_ == no_reg 462 ShiftOp shift_op_; 463 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg 464 AddrMode am_; // bits P, U, and W 465 466 friend class Assembler; 467}; 468 469// CpuFeatures keeps track of which features are supported by the target CPU. 470// Supported features must be enabled by a Scope before use. 471class CpuFeatures { 472 public: 473 // Detect features of the target CPU. Set safe defaults if the serializer 474 // is enabled (snapshots must be portable). 475 void Probe(bool portable); 476 477 // Check whether a feature is supported by the target CPU. 478 bool IsSupported(CpuFeature f) const { 479 if (f == VFP3 && !FLAG_enable_vfp3) return false; 480 return (supported_ & (1u << f)) != 0; 481 } 482 483 // Check whether a feature is currently enabled. 484 bool IsEnabled(CpuFeature f) const { 485 return (enabled_ & (1u << f)) != 0; 486 } 487 488 // Enable a specified feature within a scope. 489 class Scope BASE_EMBEDDED { 490#ifdef DEBUG 491 public: 492 explicit Scope(CpuFeature f) 493 : cpu_features_(Isolate::Current()->cpu_features()), 494 isolate_(Isolate::Current()) { 495 ASSERT(cpu_features_->IsSupported(f)); 496 ASSERT(!Serializer::enabled() || 497 (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0); 498 old_enabled_ = cpu_features_->enabled_; 499 cpu_features_->enabled_ |= 1u << f; 500 } 501 ~Scope() { 502 ASSERT_EQ(Isolate::Current(), isolate_); 503 cpu_features_->enabled_ = old_enabled_; 504 } 505 private: 506 unsigned old_enabled_; 507 CpuFeatures* cpu_features_; 508 Isolate* isolate_; 509#else 510 public: 511 explicit Scope(CpuFeature f) {} 512#endif 513 }; 514 515 private: 516 CpuFeatures(); 517 518 unsigned supported_; 519 unsigned enabled_; 520 unsigned found_by_runtime_probing_; 521 522 friend class Isolate; 523 524 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); 525}; 526 527 528extern const Instr kMovLrPc; 529extern const Instr kLdrPCMask; 530extern const Instr kLdrPCPattern; 531extern const Instr kBlxRegMask; 532extern const Instr kBlxRegPattern; 533 534extern const Instr kMovMvnMask; 535extern const Instr kMovMvnPattern; 536extern const Instr kMovMvnFlip; 537 538extern const Instr kMovLeaveCCMask; 539extern const Instr kMovLeaveCCPattern; 540extern const Instr kMovwMask; 541extern const Instr kMovwPattern; 542extern const Instr kMovwLeaveCCFlip; 543 544extern const Instr kCmpCmnMask; 545extern const Instr kCmpCmnPattern; 546extern const Instr kCmpCmnFlip; 547extern const Instr kAddSubFlip; 548extern const Instr kAndBicFlip; 549 550 551 552class Assembler : public AssemblerBase { 553 public: 554 // Create an assembler. Instructions and relocation information are emitted 555 // into a buffer, with the instructions starting from the beginning and the 556 // relocation information starting from the end of the buffer. See CodeDesc 557 // for a detailed comment on the layout (globals.h). 558 // 559 // If the provided buffer is NULL, the assembler allocates and grows its own 560 // buffer, and buffer_size determines the initial buffer size. The buffer is 561 // owned by the assembler and deallocated upon destruction of the assembler. 562 // 563 // If the provided buffer is not NULL, the assembler uses the provided buffer 564 // for code generation and assumes its size to be buffer_size. If the buffer 565 // is too small, a fatal error occurs. No deallocation of the buffer is done 566 // upon destruction of the assembler. 567 Assembler(void* buffer, int buffer_size); 568 ~Assembler(); 569 570 // Overrides the default provided by FLAG_debug_code. 571 void set_emit_debug_code(bool value) { emit_debug_code_ = value; } 572 573 // GetCode emits any pending (non-emitted) code and fills the descriptor 574 // desc. GetCode() is idempotent; it returns the same result if no other 575 // Assembler functions are invoked in between GetCode() calls. 576 void GetCode(CodeDesc* desc); 577 578 // Label operations & relative jumps (PPUM Appendix D) 579 // 580 // Takes a branch opcode (cc) and a label (L) and generates 581 // either a backward branch or a forward branch and links it 582 // to the label fixup chain. Usage: 583 // 584 // Label L; // unbound label 585 // j(cc, &L); // forward branch to unbound label 586 // bind(&L); // bind label to the current pc 587 // j(cc, &L); // backward branch to bound label 588 // bind(&L); // illegal: a label may be bound only once 589 // 590 // Note: The same Label can be used for forward and backward branches 591 // but it may be bound only once. 592 593 void bind(Label* L); // binds an unbound label L to the current code position 594 595 // Returns the branch offset to the given label from the current code position 596 // Links the label to the current position if it is still unbound 597 // Manages the jump elimination optimization if the second parameter is true. 598 int branch_offset(Label* L, bool jump_elimination_allowed); 599 600 // Puts a labels target address at the given position. 601 // The high 8 bits are set to zero. 602 void label_at_put(Label* L, int at_offset); 603 604 // Return the address in the constant pool of the code target address used by 605 // the branch/call instruction at pc. 606 INLINE(static Address target_address_address_at(Address pc)); 607 608 // Read/Modify the code target address in the branch/call instruction at pc. 609 INLINE(static Address target_address_at(Address pc)); 610 INLINE(static void set_target_address_at(Address pc, Address target)); 611 612 // This sets the branch destination (which is in the constant pool on ARM). 613 // This is for calls and branches within generated code. 614 inline static void set_target_at(Address constant_pool_entry, Address target); 615 616 // This sets the branch destination (which is in the constant pool on ARM). 617 // This is for calls and branches to runtime code. 618 inline static void set_external_target_at(Address constant_pool_entry, 619 Address target) { 620 set_target_at(constant_pool_entry, target); 621 } 622 623 // Here we are patching the address in the constant pool, not the actual call 624 // instruction. The address in the constant pool is the same size as a 625 // pointer. 626 static const int kCallTargetSize = kPointerSize; 627 static const int kExternalTargetSize = kPointerSize; 628 629 // Size of an instruction. 630 static const int kInstrSize = sizeof(Instr); 631 632 // Distance between the instruction referring to the address of the call 633 // target and the return address. 634#ifdef USE_BLX 635 // Call sequence is: 636 // ldr ip, [pc, #...] @ call address 637 // blx ip 638 // @ return address 639 static const int kCallTargetAddressOffset = 2 * kInstrSize; 640#else 641 // Call sequence is: 642 // mov lr, pc 643 // ldr pc, [pc, #...] @ call address 644 // @ return address 645 static const int kCallTargetAddressOffset = kInstrSize; 646#endif 647 648 // Distance between start of patched return sequence and the emitted address 649 // to jump to. 650#ifdef USE_BLX 651 // Patched return sequence is: 652 // ldr ip, [pc, #0] @ emited address and start 653 // blx ip 654 static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize; 655#else 656 // Patched return sequence is: 657 // mov lr, pc @ start of sequence 658 // ldr pc, [pc, #-4] @ emited address 659 static const int kPatchReturnSequenceAddressOffset = kInstrSize; 660#endif 661 662 // Distance between start of patched debug break slot and the emitted address 663 // to jump to. 664#ifdef USE_BLX 665 // Patched debug break slot code is: 666 // ldr ip, [pc, #0] @ emited address and start 667 // blx ip 668 static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; 669#else 670 // Patched debug break slot code is: 671 // mov lr, pc @ start of sequence 672 // ldr pc, [pc, #-4] @ emited address 673 static const int kPatchDebugBreakSlotAddressOffset = kInstrSize; 674#endif 675 676 // Difference between address of current opcode and value read from pc 677 // register. 678 static const int kPcLoadDelta = 8; 679 680 static const int kJSReturnSequenceInstructions = 4; 681 static const int kDebugBreakSlotInstructions = 3; 682 static const int kDebugBreakSlotLength = 683 kDebugBreakSlotInstructions * kInstrSize; 684 685 // --------------------------------------------------------------------------- 686 // Code generation 687 688 // Insert the smallest number of nop instructions 689 // possible to align the pc offset to a multiple 690 // of m. m must be a power of 2 (>= 4). 691 void Align(int m); 692 // Aligns code to something that's optimal for a jump target for the platform. 693 void CodeTargetAlign(); 694 695 // Branch instructions 696 void b(int branch_offset, Condition cond = al); 697 void bl(int branch_offset, Condition cond = al); 698 void blx(int branch_offset); // v5 and above 699 void blx(Register target, Condition cond = al); // v5 and above 700 void bx(Register target, Condition cond = al); // v5 and above, plus v4t 701 702 // Convenience branch instructions using labels 703 void b(Label* L, Condition cond = al) { 704 b(branch_offset(L, cond == al), cond); 705 } 706 void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); } 707 void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); } 708 void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); } 709 void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above 710 711 // Data-processing instructions 712 713 void and_(Register dst, Register src1, const Operand& src2, 714 SBit s = LeaveCC, Condition cond = al); 715 716 void eor(Register dst, Register src1, const Operand& src2, 717 SBit s = LeaveCC, Condition cond = al); 718 719 void sub(Register dst, Register src1, const Operand& src2, 720 SBit s = LeaveCC, Condition cond = al); 721 void sub(Register dst, Register src1, Register src2, 722 SBit s = LeaveCC, Condition cond = al) { 723 sub(dst, src1, Operand(src2), s, cond); 724 } 725 726 void rsb(Register dst, Register src1, const Operand& src2, 727 SBit s = LeaveCC, Condition cond = al); 728 729 void add(Register dst, Register src1, const Operand& src2, 730 SBit s = LeaveCC, Condition cond = al); 731 void add(Register dst, Register src1, Register src2, 732 SBit s = LeaveCC, Condition cond = al) { 733 add(dst, src1, Operand(src2), s, cond); 734 } 735 736 void adc(Register dst, Register src1, const Operand& src2, 737 SBit s = LeaveCC, Condition cond = al); 738 739 void sbc(Register dst, Register src1, const Operand& src2, 740 SBit s = LeaveCC, Condition cond = al); 741 742 void rsc(Register dst, Register src1, const Operand& src2, 743 SBit s = LeaveCC, Condition cond = al); 744 745 void tst(Register src1, const Operand& src2, Condition cond = al); 746 void tst(Register src1, Register src2, Condition cond = al) { 747 tst(src1, Operand(src2), cond); 748 } 749 750 void teq(Register src1, const Operand& src2, Condition cond = al); 751 752 void cmp(Register src1, const Operand& src2, Condition cond = al); 753 void cmp(Register src1, Register src2, Condition cond = al) { 754 cmp(src1, Operand(src2), cond); 755 } 756 void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al); 757 758 void cmn(Register src1, const Operand& src2, Condition cond = al); 759 760 void orr(Register dst, Register src1, const Operand& src2, 761 SBit s = LeaveCC, Condition cond = al); 762 void orr(Register dst, Register src1, Register src2, 763 SBit s = LeaveCC, Condition cond = al) { 764 orr(dst, src1, Operand(src2), s, cond); 765 } 766 767 void mov(Register dst, const Operand& src, 768 SBit s = LeaveCC, Condition cond = al); 769 void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) { 770 mov(dst, Operand(src), s, cond); 771 } 772 773 // ARMv7 instructions for loading a 32 bit immediate in two instructions. 774 // This may actually emit a different mov instruction, but on an ARMv7 it 775 // is guaranteed to only emit one instruction. 776 void movw(Register reg, uint32_t immediate, Condition cond = al); 777 // The constant for movt should be in the range 0-0xffff. 778 void movt(Register reg, uint32_t immediate, Condition cond = al); 779 780 void bic(Register dst, Register src1, const Operand& src2, 781 SBit s = LeaveCC, Condition cond = al); 782 783 void mvn(Register dst, const Operand& src, 784 SBit s = LeaveCC, Condition cond = al); 785 786 // Multiply instructions 787 788 void mla(Register dst, Register src1, Register src2, Register srcA, 789 SBit s = LeaveCC, Condition cond = al); 790 791 void mul(Register dst, Register src1, Register src2, 792 SBit s = LeaveCC, Condition cond = al); 793 794 void smlal(Register dstL, Register dstH, Register src1, Register src2, 795 SBit s = LeaveCC, Condition cond = al); 796 797 void smull(Register dstL, Register dstH, Register src1, Register src2, 798 SBit s = LeaveCC, Condition cond = al); 799 800 void umlal(Register dstL, Register dstH, Register src1, Register src2, 801 SBit s = LeaveCC, Condition cond = al); 802 803 void umull(Register dstL, Register dstH, Register src1, Register src2, 804 SBit s = LeaveCC, Condition cond = al); 805 806 // Miscellaneous arithmetic instructions 807 808 void clz(Register dst, Register src, Condition cond = al); // v5 and above 809 810 // Saturating instructions. v6 and above. 811 812 // Unsigned saturate. 813 // 814 // Saturate an optionally shifted signed value to an unsigned range. 815 // 816 // usat dst, #satpos, src 817 // usat dst, #satpos, src, lsl #sh 818 // usat dst, #satpos, src, asr #sh 819 // 820 // Register dst will contain: 821 // 822 // 0, if s < 0 823 // (1 << satpos) - 1, if s > ((1 << satpos) - 1) 824 // s, otherwise 825 // 826 // where s is the contents of src after shifting (if used.) 827 void usat(Register dst, int satpos, const Operand& src, Condition cond = al); 828 829 // Bitfield manipulation instructions. v7 and above. 830 831 void ubfx(Register dst, Register src, int lsb, int width, 832 Condition cond = al); 833 834 void sbfx(Register dst, Register src, int lsb, int width, 835 Condition cond = al); 836 837 void bfc(Register dst, int lsb, int width, Condition cond = al); 838 839 void bfi(Register dst, Register src, int lsb, int width, 840 Condition cond = al); 841 842 // Status register access instructions 843 844 void mrs(Register dst, SRegister s, Condition cond = al); 845 void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al); 846 847 // Load/Store instructions 848 void ldr(Register dst, const MemOperand& src, Condition cond = al); 849 void str(Register src, const MemOperand& dst, Condition cond = al); 850 void ldrb(Register dst, const MemOperand& src, Condition cond = al); 851 void strb(Register src, const MemOperand& dst, Condition cond = al); 852 void ldrh(Register dst, const MemOperand& src, Condition cond = al); 853 void strh(Register src, const MemOperand& dst, Condition cond = al); 854 void ldrsb(Register dst, const MemOperand& src, Condition cond = al); 855 void ldrsh(Register dst, const MemOperand& src, Condition cond = al); 856 void ldrd(Register dst1, 857 Register dst2, 858 const MemOperand& src, Condition cond = al); 859 void strd(Register src1, 860 Register src2, 861 const MemOperand& dst, Condition cond = al); 862 863 // Load/Store multiple instructions 864 void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al); 865 void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); 866 867 // Exception-generating instructions and debugging support 868 void stop(const char* msg, 869 Condition cond = al, 870 int32_t code = kDefaultStopCode); 871 872 void bkpt(uint32_t imm16); // v5 and above 873 void svc(uint32_t imm24, Condition cond = al); 874 875 // Coprocessor instructions 876 877 void cdp(Coprocessor coproc, int opcode_1, 878 CRegister crd, CRegister crn, CRegister crm, 879 int opcode_2, Condition cond = al); 880 881 void cdp2(Coprocessor coproc, int opcode_1, 882 CRegister crd, CRegister crn, CRegister crm, 883 int opcode_2); // v5 and above 884 885 void mcr(Coprocessor coproc, int opcode_1, 886 Register rd, CRegister crn, CRegister crm, 887 int opcode_2 = 0, Condition cond = al); 888 889 void mcr2(Coprocessor coproc, int opcode_1, 890 Register rd, CRegister crn, CRegister crm, 891 int opcode_2 = 0); // v5 and above 892 893 void mrc(Coprocessor coproc, int opcode_1, 894 Register rd, CRegister crn, CRegister crm, 895 int opcode_2 = 0, Condition cond = al); 896 897 void mrc2(Coprocessor coproc, int opcode_1, 898 Register rd, CRegister crn, CRegister crm, 899 int opcode_2 = 0); // v5 and above 900 901 void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src, 902 LFlag l = Short, Condition cond = al); 903 void ldc(Coprocessor coproc, CRegister crd, Register base, int option, 904 LFlag l = Short, Condition cond = al); 905 906 void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src, 907 LFlag l = Short); // v5 and above 908 void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, 909 LFlag l = Short); // v5 and above 910 911 void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst, 912 LFlag l = Short, Condition cond = al); 913 void stc(Coprocessor coproc, CRegister crd, Register base, int option, 914 LFlag l = Short, Condition cond = al); 915 916 void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst, 917 LFlag l = Short); // v5 and above 918 void stc2(Coprocessor coproc, CRegister crd, Register base, int option, 919 LFlag l = Short); // v5 and above 920 921 // Support for VFP. 922 // All these APIs support S0 to S31 and D0 to D15. 923 // Currently these APIs do not support extended D registers, i.e, D16 to D31. 924 // However, some simple modifications can allow 925 // these APIs to support D16 to D31. 926 927 void vldr(const DwVfpRegister dst, 928 const Register base, 929 int offset, 930 const Condition cond = al); 931 void vldr(const DwVfpRegister dst, 932 const MemOperand& src, 933 const Condition cond = al); 934 935 void vldr(const SwVfpRegister dst, 936 const Register base, 937 int offset, 938 const Condition cond = al); 939 void vldr(const SwVfpRegister dst, 940 const MemOperand& src, 941 const Condition cond = al); 942 943 void vstr(const DwVfpRegister src, 944 const Register base, 945 int offset, 946 const Condition cond = al); 947 void vstr(const DwVfpRegister src, 948 const MemOperand& dst, 949 const Condition cond = al); 950 951 void vstr(const SwVfpRegister src, 952 const Register base, 953 int offset, 954 const Condition cond = al); 955 void vstr(const SwVfpRegister src, 956 const MemOperand& dst, 957 const Condition cond = al); 958 959 void vmov(const DwVfpRegister dst, 960 double imm, 961 const Condition cond = al); 962 void vmov(const SwVfpRegister dst, 963 const SwVfpRegister src, 964 const Condition cond = al); 965 void vmov(const DwVfpRegister dst, 966 const DwVfpRegister src, 967 const Condition cond = al); 968 void vmov(const DwVfpRegister dst, 969 const Register src1, 970 const Register src2, 971 const Condition cond = al); 972 void vmov(const Register dst1, 973 const Register dst2, 974 const DwVfpRegister src, 975 const Condition cond = al); 976 void vmov(const SwVfpRegister dst, 977 const Register src, 978 const Condition cond = al); 979 void vmov(const Register dst, 980 const SwVfpRegister src, 981 const Condition cond = al); 982 void vcvt_f64_s32(const DwVfpRegister dst, 983 const SwVfpRegister src, 984 VFPConversionMode mode = kDefaultRoundToZero, 985 const Condition cond = al); 986 void vcvt_f32_s32(const SwVfpRegister dst, 987 const SwVfpRegister src, 988 VFPConversionMode mode = kDefaultRoundToZero, 989 const Condition cond = al); 990 void vcvt_f64_u32(const DwVfpRegister dst, 991 const SwVfpRegister src, 992 VFPConversionMode mode = kDefaultRoundToZero, 993 const Condition cond = al); 994 void vcvt_s32_f64(const SwVfpRegister dst, 995 const DwVfpRegister src, 996 VFPConversionMode mode = kDefaultRoundToZero, 997 const Condition cond = al); 998 void vcvt_u32_f64(const SwVfpRegister dst, 999 const DwVfpRegister src, 1000 VFPConversionMode mode = kDefaultRoundToZero, 1001 const Condition cond = al); 1002 void vcvt_f64_f32(const DwVfpRegister dst, 1003 const SwVfpRegister src, 1004 VFPConversionMode mode = kDefaultRoundToZero, 1005 const Condition cond = al); 1006 void vcvt_f32_f64(const SwVfpRegister dst, 1007 const DwVfpRegister src, 1008 VFPConversionMode mode = kDefaultRoundToZero, 1009 const Condition cond = al); 1010 1011 void vneg(const DwVfpRegister dst, 1012 const DwVfpRegister src, 1013 const Condition cond = al); 1014 void vabs(const DwVfpRegister dst, 1015 const DwVfpRegister src, 1016 const Condition cond = al); 1017 void vadd(const DwVfpRegister dst, 1018 const DwVfpRegister src1, 1019 const DwVfpRegister src2, 1020 const Condition cond = al); 1021 void vsub(const DwVfpRegister dst, 1022 const DwVfpRegister src1, 1023 const DwVfpRegister src2, 1024 const Condition cond = al); 1025 void vmul(const DwVfpRegister dst, 1026 const DwVfpRegister src1, 1027 const DwVfpRegister src2, 1028 const Condition cond = al); 1029 void vdiv(const DwVfpRegister dst, 1030 const DwVfpRegister src1, 1031 const DwVfpRegister src2, 1032 const Condition cond = al); 1033 void vcmp(const DwVfpRegister src1, 1034 const DwVfpRegister src2, 1035 const Condition cond = al); 1036 void vcmp(const DwVfpRegister src1, 1037 const double src2, 1038 const Condition cond = al); 1039 void vmrs(const Register dst, 1040 const Condition cond = al); 1041 void vmsr(const Register dst, 1042 const Condition cond = al); 1043 void vsqrt(const DwVfpRegister dst, 1044 const DwVfpRegister src, 1045 const Condition cond = al); 1046 1047 // Pseudo instructions 1048 1049 // Different nop operations are used by the code generator to detect certain 1050 // states of the generated code. 1051 enum NopMarkerTypes { 1052 NON_MARKING_NOP = 0, 1053 DEBUG_BREAK_NOP, 1054 // IC markers. 1055 PROPERTY_ACCESS_INLINED, 1056 PROPERTY_ACCESS_INLINED_CONTEXT, 1057 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, 1058 // Helper values. 1059 LAST_CODE_MARKER, 1060 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED 1061 }; 1062 1063 void nop(int type = 0); // 0 is the default non-marking type. 1064 1065 void push(Register src, Condition cond = al) { 1066 str(src, MemOperand(sp, 4, NegPreIndex), cond); 1067 } 1068 1069 void pop(Register dst, Condition cond = al) { 1070 ldr(dst, MemOperand(sp, 4, PostIndex), cond); 1071 } 1072 1073 void pop() { 1074 add(sp, sp, Operand(kPointerSize)); 1075 } 1076 1077 // Jump unconditionally to given label. 1078 void jmp(Label* L) { b(L, al); } 1079 1080 // Check the code size generated from label to here. 1081 int InstructionsGeneratedSince(Label* l) { 1082 return (pc_offset() - l->pos()) / kInstrSize; 1083 } 1084 1085 // Check whether an immediate fits an addressing mode 1 instruction. 1086 bool ImmediateFitsAddrMode1Instruction(int32_t imm32); 1087 1088 // Class for scoping postponing the constant pool generation. 1089 class BlockConstPoolScope { 1090 public: 1091 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) { 1092 assem_->StartBlockConstPool(); 1093 } 1094 ~BlockConstPoolScope() { 1095 assem_->EndBlockConstPool(); 1096 } 1097 1098 private: 1099 Assembler* assem_; 1100 1101 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); 1102 }; 1103 1104 // Postpone the generation of the constant pool for the specified number of 1105 // instructions. 1106 void BlockConstPoolFor(int instructions); 1107 1108 // Debugging 1109 1110 // Mark address of the ExitJSFrame code. 1111 void RecordJSReturn(); 1112 1113 // Mark address of a debug break slot. 1114 void RecordDebugBreakSlot(); 1115 1116 // Record a comment relocation entry that can be used by a disassembler. 1117 // Use --code-comments to enable. 1118 void RecordComment(const char* msg); 1119 1120 // Writes a single byte or word of data in the code stream. Used 1121 // for inline tables, e.g., jump-tables. The constant pool should be 1122 // emitted before any use of db and dd to ensure that constant pools 1123 // are not emitted as part of the tables generated. 1124 void db(uint8_t data); 1125 void dd(uint32_t data); 1126 1127 int pc_offset() const { return pc_ - buffer_; } 1128 1129 PositionsRecorder* positions_recorder() { return &positions_recorder_; } 1130 1131 bool can_peephole_optimize(int instructions) { 1132 if (!allow_peephole_optimization_) return false; 1133 if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false; 1134 return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize; 1135 } 1136 1137 // Read/patch instructions 1138 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } 1139 static void instr_at_put(byte* pc, Instr instr) { 1140 *reinterpret_cast<Instr*>(pc) = instr; 1141 } 1142 static Condition GetCondition(Instr instr); 1143 static bool IsBranch(Instr instr); 1144 static int GetBranchOffset(Instr instr); 1145 static bool IsLdrRegisterImmediate(Instr instr); 1146 static int GetLdrRegisterImmediateOffset(Instr instr); 1147 static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset); 1148 static bool IsStrRegisterImmediate(Instr instr); 1149 static Instr SetStrRegisterImmediateOffset(Instr instr, int offset); 1150 static bool IsAddRegisterImmediate(Instr instr); 1151 static Instr SetAddRegisterImmediateOffset(Instr instr, int offset); 1152 static Register GetRd(Instr instr); 1153 static Register GetRn(Instr instr); 1154 static Register GetRm(Instr instr); 1155 static bool IsPush(Instr instr); 1156 static bool IsPop(Instr instr); 1157 static bool IsStrRegFpOffset(Instr instr); 1158 static bool IsLdrRegFpOffset(Instr instr); 1159 static bool IsStrRegFpNegOffset(Instr instr); 1160 static bool IsLdrRegFpNegOffset(Instr instr); 1161 static bool IsLdrPcImmediateOffset(Instr instr); 1162 static bool IsTstImmediate(Instr instr); 1163 static bool IsCmpRegister(Instr instr); 1164 static bool IsCmpImmediate(Instr instr); 1165 static Register GetCmpImmediateRegister(Instr instr); 1166 static int GetCmpImmediateRawImmediate(Instr instr); 1167 static bool IsNop(Instr instr, int type = NON_MARKING_NOP); 1168 1169 // Check if is time to emit a constant pool for pending reloc info entries 1170 void CheckConstPool(bool force_emit, bool require_jump); 1171 1172 protected: 1173 bool emit_debug_code() const { return emit_debug_code_; } 1174 1175 int buffer_space() const { return reloc_info_writer.pos() - pc_; } 1176 1177 // Read/patch instructions 1178 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } 1179 void instr_at_put(int pos, Instr instr) { 1180 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; 1181 } 1182 1183 // Decode branch instruction at pos and return branch target pos 1184 int target_at(int pos); 1185 1186 // Patch branch instruction at pos to branch to given branch target pos 1187 void target_at_put(int pos, int target_pos); 1188 1189 // Block the emission of the constant pool before pc_offset 1190 void BlockConstPoolBefore(int pc_offset) { 1191 if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset; 1192 } 1193 1194 void StartBlockConstPool() { 1195 const_pool_blocked_nesting_++; 1196 } 1197 void EndBlockConstPool() { 1198 const_pool_blocked_nesting_--; 1199 } 1200 bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; } 1201 1202 private: 1203 // Code buffer: 1204 // The buffer into which code and relocation info are generated. 1205 byte* buffer_; 1206 int buffer_size_; 1207 // True if the assembler owns the buffer, false if buffer is external. 1208 bool own_buffer_; 1209 1210 // Buffer size and constant pool distance are checked together at regular 1211 // intervals of kBufferCheckInterval emitted bytes 1212 static const int kBufferCheckInterval = 1*KB/2; 1213 int next_buffer_check_; // pc offset of next buffer check 1214 1215 // Code generation 1216 // The relocation writer's position is at least kGap bytes below the end of 1217 // the generated instructions. This is so that multi-instruction sequences do 1218 // not have to check for overflow. The same is true for writes of large 1219 // relocation info entries. 1220 static const int kGap = 32; 1221 byte* pc_; // the program counter; moves forward 1222 1223 // Constant pool generation 1224 // Pools are emitted in the instruction stream, preferably after unconditional 1225 // jumps or after returns from functions (in dead code locations). 1226 // If a long code sequence does not contain unconditional jumps, it is 1227 // necessary to emit the constant pool before the pool gets too far from the 1228 // location it is accessed from. In this case, we emit a jump over the emitted 1229 // constant pool. 1230 // Constants in the pool may be addresses of functions that gets relocated; 1231 // if so, a relocation info entry is associated to the constant pool entry. 1232 1233 // Repeated checking whether the constant pool should be emitted is rather 1234 // expensive. By default we only check again once a number of instructions 1235 // has been generated. That also means that the sizing of the buffers is not 1236 // an exact science, and that we rely on some slop to not overrun buffers. 1237 static const int kCheckConstIntervalInst = 32; 1238 static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; 1239 1240 1241 // Pools are emitted after function return and in dead code at (more or less) 1242 // regular intervals of kDistBetweenPools bytes 1243 static const int kDistBetweenPools = 1*KB; 1244 1245 // Constants in pools are accessed via pc relative addressing, which can 1246 // reach +/-4KB thereby defining a maximum distance between the instruction 1247 // and the accessed constant. We satisfy this constraint by limiting the 1248 // distance between pools. 1249 static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval; 1250 1251 // Emission of the constant pool may be blocked in some code sequences. 1252 int const_pool_blocked_nesting_; // Block emission if this is not zero. 1253 int no_const_pool_before_; // Block emission before this pc offset. 1254 1255 // Keep track of the last emitted pool to guarantee a maximal distance 1256 int last_const_pool_end_; // pc offset following the last constant pool 1257 1258 // Relocation info generation 1259 // Each relocation is encoded as a variable size value 1260 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; 1261 RelocInfoWriter reloc_info_writer; 1262 // Relocation info records are also used during code generation as temporary 1263 // containers for constants and code target addresses until they are emitted 1264 // to the constant pool. These pending relocation info records are temporarily 1265 // stored in a separate buffer until a constant pool is emitted. 1266 // If every instruction in a long sequence is accessing the pool, we need one 1267 // pending relocation entry per instruction. 1268 static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize; 1269 RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info 1270 int num_prinfo_; // number of pending reloc info entries in the buffer 1271 1272 // The bound position, before this we cannot do instruction elimination. 1273 int last_bound_pos_; 1274 1275 // Code emission 1276 inline void CheckBuffer(); 1277 void GrowBuffer(); 1278 inline void emit(Instr x); 1279 1280 // Instruction generation 1281 void addrmod1(Instr instr, Register rn, Register rd, const Operand& x); 1282 void addrmod2(Instr instr, Register rd, const MemOperand& x); 1283 void addrmod3(Instr instr, Register rd, const MemOperand& x); 1284 void addrmod4(Instr instr, Register rn, RegList rl); 1285 void addrmod5(Instr instr, CRegister crd, const MemOperand& x); 1286 1287 // Labels 1288 void print(Label* L); 1289 void bind_to(Label* L, int pos); 1290 void link_to(Label* L, Label* appendix); 1291 void next(Label* L); 1292 1293 // Record reloc info for current pc_ 1294 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); 1295 1296 friend class RegExpMacroAssemblerARM; 1297 friend class RelocInfo; 1298 friend class CodePatcher; 1299 friend class BlockConstPoolScope; 1300 1301 PositionsRecorder positions_recorder_; 1302 bool allow_peephole_optimization_; 1303 bool emit_debug_code_; 1304 friend class PositionsRecorder; 1305 friend class EnsureSpace; 1306}; 1307 1308 1309class EnsureSpace BASE_EMBEDDED { 1310 public: 1311 explicit EnsureSpace(Assembler* assembler) { 1312 assembler->CheckBuffer(); 1313 } 1314}; 1315 1316 1317} } // namespace v8::internal 1318 1319#endif // V8_ARM_ASSEMBLER_ARM_H_ 1320