assembler-mips.h revision 3fb3ca8c7ca439d408449a395897395c0faae8d1
1// Copyright (c) 1994-2006 Sun Microsystems Inc. 2// All Rights Reserved. 3// 4// Redistribution and use in source and binary forms, with or without 5// modification, are permitted provided that the following conditions are 6// met: 7// 8// - Redistributions of source code must retain the above copyright notice, 9// this list of conditions and the following disclaimer. 10// 11// - Redistribution in binary form must reproduce the above copyright 12// notice, this list of conditions and the following disclaimer in the 13// documentation and/or other materials provided with the distribution. 14// 15// - Neither the name of Sun Microsystems or the names of contributors may 16// be used to endorse or promote products derived from this software without 17// specific prior written permission. 18// 19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31// The original source code covered by the above license above has been 32// modified significantly by Google Inc. 33// Copyright 2011 the V8 project authors. All rights reserved. 34 35 36#ifndef V8_MIPS_ASSEMBLER_MIPS_H_ 37#define V8_MIPS_ASSEMBLER_MIPS_H_ 38 39#include <stdio.h> 40#include "assembler.h" 41#include "constants-mips.h" 42#include "serialize.h" 43 44namespace v8 { 45namespace internal { 46 47// CPU Registers. 48// 49// 1) We would prefer to use an enum, but enum values are assignment- 50// compatible with int, which has caused code-generation bugs. 51// 52// 2) We would prefer to use a class instead of a struct but we don't like 53// the register initialization to depend on the particular initialization 54// order (which appears to be different on OS X, Linux, and Windows for the 55// installed versions of C++ we tried). Using a struct permits C-style 56// "initialization". Also, the Register objects cannot be const as this 57// forces initialization stubs in MSVC, making us dependent on initialization 58// order. 59// 60// 3) By not using an enum, we are possibly preventing the compiler from 61// doing certain constant folds, which may significantly reduce the 62// code generated for some assembly instructions (because they boil down 63// to a few constants). If this is a problem, we could change the code 64// such that we use an enum in optimized mode, and the struct in debug 65// mode. This way we get the compile-time error checking in debug mode 66// and best performance in optimized code. 67 68 69// ----------------------------------------------------------------------------- 70// Implementation of Register and FPURegister. 71 72// Core register. 73struct Register { 74 static const int kNumRegisters = v8::internal::kNumRegisters; 75 static const int kNumAllocatableRegisters = 14; // v0 through t7. 76 static const int kSizeInBytes = 4; 77 78 static int ToAllocationIndex(Register reg) { 79 return reg.code() - 2; // zero_reg and 'at' are skipped. 80 } 81 82 static Register FromAllocationIndex(int index) { 83 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 84 return from_code(index + 2); // zero_reg and 'at' are skipped. 85 } 86 87 static const char* AllocationIndexToString(int index) { 88 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 89 const char* const names[] = { 90 "v0", 91 "v1", 92 "a0", 93 "a1", 94 "a2", 95 "a3", 96 "t0", 97 "t1", 98 "t2", 99 "t3", 100 "t4", 101 "t5", 102 "t6", 103 "t7", 104 }; 105 return names[index]; 106 } 107 108 static Register from_code(int code) { 109 Register r = { code }; 110 return r; 111 } 112 113 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } 114 bool is(Register reg) const { return code_ == reg.code_; } 115 int code() const { 116 ASSERT(is_valid()); 117 return code_; 118 } 119 int bit() const { 120 ASSERT(is_valid()); 121 return 1 << code_; 122 } 123 124 // Unfortunately we can't make this private in a struct. 125 int code_; 126}; 127 128const Register no_reg = { -1 }; 129 130const Register zero_reg = { 0 }; 131const Register at = { 1 }; 132const Register v0 = { 2 }; 133const Register v1 = { 3 }; 134const Register a0 = { 4 }; 135const Register a1 = { 5 }; 136const Register a2 = { 6 }; 137const Register a3 = { 7 }; 138const Register t0 = { 8 }; 139const Register t1 = { 9 }; 140const Register t2 = { 10 }; 141const Register t3 = { 11 }; 142const Register t4 = { 12 }; 143const Register t5 = { 13 }; 144const Register t6 = { 14 }; 145const Register t7 = { 15 }; 146const Register s0 = { 16 }; 147const Register s1 = { 17 }; 148const Register s2 = { 18 }; 149const Register s3 = { 19 }; 150const Register s4 = { 20 }; 151const Register s5 = { 21 }; 152const Register s6 = { 22 }; 153const Register s7 = { 23 }; 154const Register t8 = { 24 }; 155const Register t9 = { 25 }; 156const Register k0 = { 26 }; 157const Register k1 = { 27 }; 158const Register gp = { 28 }; 159const Register sp = { 29 }; 160const Register s8_fp = { 30 }; 161const Register ra = { 31 }; 162 163 164int ToNumber(Register reg); 165 166Register ToRegister(int num); 167 168// Coprocessor register. 169struct FPURegister { 170 static const int kNumRegisters = v8::internal::kNumFPURegisters; 171 // f0 has been excluded from allocation. This is following ia32 172 // where xmm0 is excluded. 173 static const int kNumAllocatableRegisters = 15; 174 175 static int ToAllocationIndex(FPURegister reg) { 176 ASSERT(reg.code() != 0); 177 ASSERT(reg.code() % 2 == 0); 178 return (reg.code() / 2) - 1; 179 } 180 181 static FPURegister FromAllocationIndex(int index) { 182 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 183 return from_code((index + 1) * 2); 184 } 185 186 static const char* AllocationIndexToString(int index) { 187 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 188 const char* const names[] = { 189 "f2", 190 "f4", 191 "f6", 192 "f8", 193 "f10", 194 "f12", 195 "f14", 196 "f16", 197 "f18", 198 "f20", 199 "f22", 200 "f24", 201 "f26", 202 "f28", 203 "f30" 204 }; 205 return names[index]; 206 } 207 208 static FPURegister from_code(int code) { 209 FPURegister r = { code }; 210 return r; 211 } 212 213 bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; } 214 bool is(FPURegister creg) const { return code_ == creg.code_; } 215 int code() const { 216 ASSERT(is_valid()); 217 return code_; 218 } 219 int bit() const { 220 ASSERT(is_valid()); 221 return 1 << code_; 222 } 223 void setcode(int f) { 224 code_ = f; 225 ASSERT(is_valid()); 226 } 227 // Unfortunately we can't make this private in a struct. 228 int code_; 229}; 230 231typedef FPURegister DoubleRegister; 232 233const FPURegister no_creg = { -1 }; 234 235const FPURegister f0 = { 0 }; // Return value in hard float mode. 236const FPURegister f1 = { 1 }; 237const FPURegister f2 = { 2 }; 238const FPURegister f3 = { 3 }; 239const FPURegister f4 = { 4 }; 240const FPURegister f5 = { 5 }; 241const FPURegister f6 = { 6 }; 242const FPURegister f7 = { 7 }; 243const FPURegister f8 = { 8 }; 244const FPURegister f9 = { 9 }; 245const FPURegister f10 = { 10 }; 246const FPURegister f11 = { 11 }; 247const FPURegister f12 = { 12 }; // Arg 0 in hard float mode. 248const FPURegister f13 = { 13 }; 249const FPURegister f14 = { 14 }; // Arg 1 in hard float mode. 250const FPURegister f15 = { 15 }; 251const FPURegister f16 = { 16 }; 252const FPURegister f17 = { 17 }; 253const FPURegister f18 = { 18 }; 254const FPURegister f19 = { 19 }; 255const FPURegister f20 = { 20 }; 256const FPURegister f21 = { 21 }; 257const FPURegister f22 = { 22 }; 258const FPURegister f23 = { 23 }; 259const FPURegister f24 = { 24 }; 260const FPURegister f25 = { 25 }; 261const FPURegister f26 = { 26 }; 262const FPURegister f27 = { 27 }; 263const FPURegister f28 = { 28 }; 264const FPURegister f29 = { 29 }; 265const FPURegister f30 = { 30 }; 266const FPURegister f31 = { 31 }; 267 268// FPU (coprocessor 1) control registers. 269// Currently only FCSR (#31) is implemented. 270struct FPUControlRegister { 271 bool is_valid() const { return code_ == kFCSRRegister; } 272 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } 273 int code() const { 274 ASSERT(is_valid()); 275 return code_; 276 } 277 int bit() const { 278 ASSERT(is_valid()); 279 return 1 << code_; 280 } 281 void setcode(int f) { 282 code_ = f; 283 ASSERT(is_valid()); 284 } 285 // Unfortunately we can't make this private in a struct. 286 int code_; 287}; 288 289const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; 290const FPUControlRegister FCSR = { kFCSRRegister }; 291 292 293// ----------------------------------------------------------------------------- 294// Machine instruction Operands. 295 296// Class Operand represents a shifter operand in data processing instructions. 297class Operand BASE_EMBEDDED { 298 public: 299 // Immediate. 300 INLINE(explicit Operand(int32_t immediate, 301 RelocInfo::Mode rmode = RelocInfo::NONE)); 302 INLINE(explicit Operand(const ExternalReference& f)); 303 INLINE(explicit Operand(const char* s)); 304 INLINE(explicit Operand(Object** opp)); 305 INLINE(explicit Operand(Context** cpp)); 306 explicit Operand(Handle<Object> handle); 307 INLINE(explicit Operand(Smi* value)); 308 309 // Register. 310 INLINE(explicit Operand(Register rm)); 311 312 // Return true if this is a register operand. 313 INLINE(bool is_reg() const); 314 315 Register rm() const { return rm_; } 316 317 private: 318 Register rm_; 319 int32_t imm32_; // Valid if rm_ == no_reg. 320 RelocInfo::Mode rmode_; 321 322 friend class Assembler; 323 friend class MacroAssembler; 324}; 325 326 327// On MIPS we have only one adressing mode with base_reg + offset. 328// Class MemOperand represents a memory operand in load and store instructions. 329class MemOperand : public Operand { 330 public: 331 explicit MemOperand(Register rn, int32_t offset = 0); 332 int32_t offset() const { return offset_; } 333 334 private: 335 int32_t offset_; 336 337 friend class Assembler; 338}; 339 340 341// CpuFeatures keeps track of which features are supported by the target CPU. 342// Supported features must be enabled by a Scope before use. 343class CpuFeatures : public AllStatic { 344 public: 345 // Detect features of the target CPU. Set safe defaults if the serializer 346 // is enabled (snapshots must be portable). 347 static void Probe(); 348 349 // Check whether a feature is supported by the target CPU. 350 static bool IsSupported(CpuFeature f) { 351 ASSERT(initialized_); 352 if (f == FPU && !FLAG_enable_fpu) return false; 353 return (supported_ & (1u << f)) != 0; 354 } 355 356 357#ifdef DEBUG 358 // Check whether a feature is currently enabled. 359 static bool IsEnabled(CpuFeature f) { 360 ASSERT(initialized_); 361 Isolate* isolate = Isolate::UncheckedCurrent(); 362 if (isolate == NULL) { 363 // When no isolate is available, work as if we're running in 364 // release mode. 365 return IsSupported(f); 366 } 367 unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); 368 return (enabled & (1u << f)) != 0; 369 } 370#endif 371 372 // Enable a specified feature within a scope. 373 class Scope BASE_EMBEDDED { 374#ifdef DEBUG 375 376 public: 377 explicit Scope(CpuFeature f) { 378 unsigned mask = 1u << f; 379 ASSERT(CpuFeatures::IsSupported(f)); 380 ASSERT(!Serializer::enabled() || 381 (CpuFeatures::found_by_runtime_probing_ & mask) == 0); 382 isolate_ = Isolate::UncheckedCurrent(); 383 old_enabled_ = 0; 384 if (isolate_ != NULL) { 385 old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); 386 isolate_->set_enabled_cpu_features(old_enabled_ | mask); 387 } 388 } 389 ~Scope() { 390 ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); 391 if (isolate_ != NULL) { 392 isolate_->set_enabled_cpu_features(old_enabled_); 393 } 394 } 395 396 private: 397 Isolate* isolate_; 398 unsigned old_enabled_; 399#else 400 401 public: 402 explicit Scope(CpuFeature f) {} 403#endif 404 }; 405 406 class TryForceFeatureScope BASE_EMBEDDED { 407 public: 408 explicit TryForceFeatureScope(CpuFeature f) 409 : old_supported_(CpuFeatures::supported_) { 410 if (CanForce()) { 411 CpuFeatures::supported_ |= (1u << f); 412 } 413 } 414 415 ~TryForceFeatureScope() { 416 if (CanForce()) { 417 CpuFeatures::supported_ = old_supported_; 418 } 419 } 420 421 private: 422 static bool CanForce() { 423 // It's only safe to temporarily force support of CPU features 424 // when there's only a single isolate, which is guaranteed when 425 // the serializer is enabled. 426 return Serializer::enabled(); 427 } 428 429 const unsigned old_supported_; 430 }; 431 432 private: 433#ifdef DEBUG 434 static bool initialized_; 435#endif 436 static unsigned supported_; 437 static unsigned found_by_runtime_probing_; 438 439 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); 440}; 441 442 443class Assembler : public AssemblerBase { 444 public: 445 // Create an assembler. Instructions and relocation information are emitted 446 // into a buffer, with the instructions starting from the beginning and the 447 // relocation information starting from the end of the buffer. See CodeDesc 448 // for a detailed comment on the layout (globals.h). 449 // 450 // If the provided buffer is NULL, the assembler allocates and grows its own 451 // buffer, and buffer_size determines the initial buffer size. The buffer is 452 // owned by the assembler and deallocated upon destruction of the assembler. 453 // 454 // If the provided buffer is not NULL, the assembler uses the provided buffer 455 // for code generation and assumes its size to be buffer_size. If the buffer 456 // is too small, a fatal error occurs. No deallocation of the buffer is done 457 // upon destruction of the assembler. 458 Assembler(Isolate* isolate, void* buffer, int buffer_size); 459 ~Assembler(); 460 461 // Overrides the default provided by FLAG_debug_code. 462 void set_emit_debug_code(bool value) { emit_debug_code_ = value; } 463 464 // GetCode emits any pending (non-emitted) code and fills the descriptor 465 // desc. GetCode() is idempotent; it returns the same result if no other 466 // Assembler functions are invoked in between GetCode() calls. 467 void GetCode(CodeDesc* desc); 468 469 // Label operations & relative jumps (PPUM Appendix D). 470 // 471 // Takes a branch opcode (cc) and a label (L) and generates 472 // either a backward branch or a forward branch and links it 473 // to the label fixup chain. Usage: 474 // 475 // Label L; // unbound label 476 // j(cc, &L); // forward branch to unbound label 477 // bind(&L); // bind label to the current pc 478 // j(cc, &L); // backward branch to bound label 479 // bind(&L); // illegal: a label may be bound only once 480 // 481 // Note: The same Label can be used for forward and backward branches 482 // but it may be bound only once. 483 void bind(Label* L); // Binds an unbound label L to current code position. 484 // Determines if Label is bound and near enough so that branch instruction 485 // can be used to reach it, instead of jump instruction. 486 bool is_near(Label* L); 487 488 // Returns the branch offset to the given label from the current code 489 // position. Links the label to the current position if it is still unbound. 490 // Manages the jump elimination optimization if the second parameter is true. 491 int32_t branch_offset(Label* L, bool jump_elimination_allowed); 492 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { 493 int32_t o = branch_offset(L, jump_elimination_allowed); 494 ASSERT((o & 3) == 0); // Assert the offset is aligned. 495 return o >> 2; 496 } 497 uint32_t jump_address(Label* L); 498 499 // Puts a labels target address at the given position. 500 // The high 8 bits are set to zero. 501 void label_at_put(Label* L, int at_offset); 502 503 // Read/Modify the code target address in the branch/call instruction at pc. 504 static Address target_address_at(Address pc); 505 static void set_target_address_at(Address pc, Address target); 506 507 // This sets the branch destination (which gets loaded at the call address). 508 // This is for calls and branches within generated code. 509 inline static void set_target_at(Address instruction_payload, 510 Address target) { 511 set_target_address_at(instruction_payload, target); 512 } 513 514 // This sets the branch destination. 515 // This is for calls and branches to runtime code. 516 inline static void set_external_target_at(Address instruction_payload, 517 Address target) { 518 set_target_address_at(instruction_payload, target); 519 } 520 521 // Size of an instruction. 522 static const int kInstrSize = sizeof(Instr); 523 524 // Difference between address of current opcode and target address offset. 525 static const int kBranchPCOffset = 4; 526 527 // Here we are patching the address in the LUI/ORI instruction pair. 528 // These values are used in the serialization process and must be zero for 529 // MIPS platform, as Code, Embedded Object or External-reference pointers 530 // are split across two consecutive instructions and don't exist separately 531 // in the code, so the serializer should not step forwards in memory after 532 // a target is resolved and written. 533 static const int kCallTargetSize = 0 * kInstrSize; 534 static const int kExternalTargetSize = 0 * kInstrSize; 535 536 // Number of consecutive instructions used to store 32bit constant. 537 // Used in RelocInfo::target_address_address() function to tell serializer 538 // address of the instruction that follows LUI/ORI instruction pair. 539 static const int kInstructionsFor32BitConstant = 2; 540 541 // Distance between the instruction referring to the address of the call 542 // target and the return address. 543 static const int kCallTargetAddressOffset = 4 * kInstrSize; 544 545 // Distance between start of patched return sequence and the emitted address 546 // to jump to. 547 static const int kPatchReturnSequenceAddressOffset = 0; 548 549 // Distance between start of patched debug break slot and the emitted address 550 // to jump to. 551 static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; 552 553 // Difference between address of current opcode and value read from pc 554 // register. 555 static const int kPcLoadDelta = 4; 556 557 // Number of instructions used for the JS return sequence. The constant is 558 // used by the debugger to patch the JS return sequence. 559 static const int kJSReturnSequenceInstructions = 7; 560 static const int kDebugBreakSlotInstructions = 4; 561 static const int kDebugBreakSlotLength = 562 kDebugBreakSlotInstructions * kInstrSize; 563 564 565 // --------------------------------------------------------------------------- 566 // Code generation. 567 568 // Insert the smallest number of nop instructions 569 // possible to align the pc offset to a multiple 570 // of m. m must be a power of 2 (>= 4). 571 void Align(int m); 572 // Aligns code to something that's optimal for a jump target for the platform. 573 void CodeTargetAlign(); 574 575 // Different nop operations are used by the code generator to detect certain 576 // states of the generated code. 577 enum NopMarkerTypes { 578 NON_MARKING_NOP = 0, 579 DEBUG_BREAK_NOP, 580 // IC markers. 581 PROPERTY_ACCESS_INLINED, 582 PROPERTY_ACCESS_INLINED_CONTEXT, 583 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, 584 // Helper values. 585 LAST_CODE_MARKER, 586 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED 587 }; 588 589 // Type == 0 is the default non-marking type. 590 void nop(unsigned int type = 0) { 591 ASSERT(type < 32); 592 sll(zero_reg, zero_reg, type, true); 593 } 594 595 596 // --------Branch-and-jump-instructions---------- 597 // We don't use likely variant of instructions. 598 void b(int16_t offset); 599 void b(Label* L) { b(branch_offset(L, false)>>2); } 600 void bal(int16_t offset); 601 void bal(Label* L) { bal(branch_offset(L, false)>>2); } 602 603 void beq(Register rs, Register rt, int16_t offset); 604 void beq(Register rs, Register rt, Label* L) { 605 beq(rs, rt, branch_offset(L, false) >> 2); 606 } 607 void bgez(Register rs, int16_t offset); 608 void bgezal(Register rs, int16_t offset); 609 void bgtz(Register rs, int16_t offset); 610 void blez(Register rs, int16_t offset); 611 void bltz(Register rs, int16_t offset); 612 void bltzal(Register rs, int16_t offset); 613 void bne(Register rs, Register rt, int16_t offset); 614 void bne(Register rs, Register rt, Label* L) { 615 bne(rs, rt, branch_offset(L, false)>>2); 616 } 617 618 // Never use the int16_t b(l)cond version with a branch offset 619 // instead of using the Label* version. 620 621 // Jump targets must be in the current 256 MB-aligned region. ie 28 bits. 622 void j(int32_t target); 623 void jal(int32_t target); 624 void jalr(Register rs, Register rd = ra); 625 void jr(Register target); 626 627 628 //-------Data-processing-instructions--------- 629 630 // Arithmetic. 631 void addu(Register rd, Register rs, Register rt); 632 void subu(Register rd, Register rs, Register rt); 633 void mult(Register rs, Register rt); 634 void multu(Register rs, Register rt); 635 void div(Register rs, Register rt); 636 void divu(Register rs, Register rt); 637 void mul(Register rd, Register rs, Register rt); 638 639 void addiu(Register rd, Register rs, int32_t j); 640 641 // Logical. 642 void and_(Register rd, Register rs, Register rt); 643 void or_(Register rd, Register rs, Register rt); 644 void xor_(Register rd, Register rs, Register rt); 645 void nor(Register rd, Register rs, Register rt); 646 647 void andi(Register rd, Register rs, int32_t j); 648 void ori(Register rd, Register rs, int32_t j); 649 void xori(Register rd, Register rs, int32_t j); 650 void lui(Register rd, int32_t j); 651 652 // Shifts. 653 // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop 654 // and may cause problems in normal code. coming_from_nop makes sure this 655 // doesn't happen. 656 void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false); 657 void sllv(Register rd, Register rt, Register rs); 658 void srl(Register rd, Register rt, uint16_t sa); 659 void srlv(Register rd, Register rt, Register rs); 660 void sra(Register rt, Register rd, uint16_t sa); 661 void srav(Register rt, Register rd, Register rs); 662 void rotr(Register rd, Register rt, uint16_t sa); 663 void rotrv(Register rd, Register rt, Register rs); 664 665 666 //------------Memory-instructions------------- 667 668 void lb(Register rd, const MemOperand& rs); 669 void lbu(Register rd, const MemOperand& rs); 670 void lh(Register rd, const MemOperand& rs); 671 void lhu(Register rd, const MemOperand& rs); 672 void lw(Register rd, const MemOperand& rs); 673 void lwl(Register rd, const MemOperand& rs); 674 void lwr(Register rd, const MemOperand& rs); 675 void sb(Register rd, const MemOperand& rs); 676 void sh(Register rd, const MemOperand& rs); 677 void sw(Register rd, const MemOperand& rs); 678 void swl(Register rd, const MemOperand& rs); 679 void swr(Register rd, const MemOperand& rs); 680 681 682 //-------------Misc-instructions-------------- 683 684 // Break / Trap instructions. 685 void break_(uint32_t code, bool break_as_stop = false); 686 void stop(const char* msg, uint32_t code = kMaxStopCode); 687 void tge(Register rs, Register rt, uint16_t code); 688 void tgeu(Register rs, Register rt, uint16_t code); 689 void tlt(Register rs, Register rt, uint16_t code); 690 void tltu(Register rs, Register rt, uint16_t code); 691 void teq(Register rs, Register rt, uint16_t code); 692 void tne(Register rs, Register rt, uint16_t code); 693 694 // Move from HI/LO register. 695 void mfhi(Register rd); 696 void mflo(Register rd); 697 698 // Set on less than. 699 void slt(Register rd, Register rs, Register rt); 700 void sltu(Register rd, Register rs, Register rt); 701 void slti(Register rd, Register rs, int32_t j); 702 void sltiu(Register rd, Register rs, int32_t j); 703 704 // Conditional move. 705 void movz(Register rd, Register rs, Register rt); 706 void movn(Register rd, Register rs, Register rt); 707 void movt(Register rd, Register rs, uint16_t cc = 0); 708 void movf(Register rd, Register rs, uint16_t cc = 0); 709 710 // Bit twiddling. 711 void clz(Register rd, Register rs); 712 void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); 713 void ext_(Register rt, Register rs, uint16_t pos, uint16_t size); 714 715 //--------Coprocessor-instructions---------------- 716 717 // Load, store, and move. 718 void lwc1(FPURegister fd, const MemOperand& src); 719 void ldc1(FPURegister fd, const MemOperand& src); 720 721 void swc1(FPURegister fs, const MemOperand& dst); 722 void sdc1(FPURegister fs, const MemOperand& dst); 723 724 void mtc1(Register rt, FPURegister fs); 725 void mfc1(Register rt, FPURegister fs); 726 727 void ctc1(Register rt, FPUControlRegister fs); 728 void cfc1(Register rt, FPUControlRegister fs); 729 730 // Arithmetic. 731 void add_d(FPURegister fd, FPURegister fs, FPURegister ft); 732 void sub_d(FPURegister fd, FPURegister fs, FPURegister ft); 733 void mul_d(FPURegister fd, FPURegister fs, FPURegister ft); 734 void div_d(FPURegister fd, FPURegister fs, FPURegister ft); 735 void abs_d(FPURegister fd, FPURegister fs); 736 void mov_d(FPURegister fd, FPURegister fs); 737 void neg_d(FPURegister fd, FPURegister fs); 738 void sqrt_d(FPURegister fd, FPURegister fs); 739 740 // Conversion. 741 void cvt_w_s(FPURegister fd, FPURegister fs); 742 void cvt_w_d(FPURegister fd, FPURegister fs); 743 void trunc_w_s(FPURegister fd, FPURegister fs); 744 void trunc_w_d(FPURegister fd, FPURegister fs); 745 void round_w_s(FPURegister fd, FPURegister fs); 746 void round_w_d(FPURegister fd, FPURegister fs); 747 void floor_w_s(FPURegister fd, FPURegister fs); 748 void floor_w_d(FPURegister fd, FPURegister fs); 749 void ceil_w_s(FPURegister fd, FPURegister fs); 750 void ceil_w_d(FPURegister fd, FPURegister fs); 751 752 void cvt_l_s(FPURegister fd, FPURegister fs); 753 void cvt_l_d(FPURegister fd, FPURegister fs); 754 void trunc_l_s(FPURegister fd, FPURegister fs); 755 void trunc_l_d(FPURegister fd, FPURegister fs); 756 void round_l_s(FPURegister fd, FPURegister fs); 757 void round_l_d(FPURegister fd, FPURegister fs); 758 void floor_l_s(FPURegister fd, FPURegister fs); 759 void floor_l_d(FPURegister fd, FPURegister fs); 760 void ceil_l_s(FPURegister fd, FPURegister fs); 761 void ceil_l_d(FPURegister fd, FPURegister fs); 762 763 void cvt_s_w(FPURegister fd, FPURegister fs); 764 void cvt_s_l(FPURegister fd, FPURegister fs); 765 void cvt_s_d(FPURegister fd, FPURegister fs); 766 767 void cvt_d_w(FPURegister fd, FPURegister fs); 768 void cvt_d_l(FPURegister fd, FPURegister fs); 769 void cvt_d_s(FPURegister fd, FPURegister fs); 770 771 // Conditions and branches. 772 void c(FPUCondition cond, SecondaryField fmt, 773 FPURegister ft, FPURegister fs, uint16_t cc = 0); 774 775 void bc1f(int16_t offset, uint16_t cc = 0); 776 void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } 777 void bc1t(int16_t offset, uint16_t cc = 0); 778 void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } 779 void fcmp(FPURegister src1, const double src2, FPUCondition cond); 780 781 // Check the code size generated from label to here. 782 int SizeOfCodeGeneratedSince(Label* label) { 783 return pc_offset() - label->pos(); 784 } 785 786 // Check the number of instructions generated from label to here. 787 int InstructionsGeneratedSince(Label* label) { 788 return SizeOfCodeGeneratedSince(label) / kInstrSize; 789 } 790 791 // Class for scoping postponing the trampoline pool generation. 792 class BlockTrampolinePoolScope { 793 public: 794 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { 795 assem_->StartBlockTrampolinePool(); 796 } 797 ~BlockTrampolinePoolScope() { 798 assem_->EndBlockTrampolinePool(); 799 } 800 801 private: 802 Assembler* assem_; 803 804 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); 805 }; 806 807 // Class for postponing the assembly buffer growth. Typically used for 808 // sequences of instructions that must be emitted as a unit, before 809 // buffer growth (and relocation) can occur. 810 // This blocking scope is not nestable. 811 class BlockGrowBufferScope { 812 public: 813 explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { 814 assem_->StartBlockGrowBuffer(); 815 } 816 ~BlockGrowBufferScope() { 817 assem_->EndBlockGrowBuffer(); 818 } 819 820 private: 821 Assembler* assem_; 822 823 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); 824 }; 825 826 // Debugging. 827 828 // Mark address of the ExitJSFrame code. 829 void RecordJSReturn(); 830 831 // Mark address of a debug break slot. 832 void RecordDebugBreakSlot(); 833 834 // Record the AST id of the CallIC being compiled, so that it can be placed 835 // in the relocation information. 836 void SetRecordedAstId(unsigned ast_id) { 837 ASSERT(recorded_ast_id_ == kNoASTId); 838 recorded_ast_id_ = ast_id; 839 } 840 841 unsigned RecordedAstId() { 842 ASSERT(recorded_ast_id_ != kNoASTId); 843 return recorded_ast_id_; 844 } 845 846 void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; } 847 848 // Record a comment relocation entry that can be used by a disassembler. 849 // Use --code-comments to enable. 850 void RecordComment(const char* msg); 851 852 static int RelocateInternalReference(byte* pc, intptr_t pc_delta); 853 854 // Writes a single byte or word of data in the code stream. Used for 855 // inline tables, e.g., jump-tables. 856 void db(uint8_t data); 857 void dd(uint32_t data); 858 859 int32_t pc_offset() const { return pc_ - buffer_; } 860 861 PositionsRecorder* positions_recorder() { return &positions_recorder_; } 862 863 // Postpone the generation of the trampoline pool for the specified number of 864 // instructions. 865 void BlockTrampolinePoolFor(int instructions); 866 867 // Check if there is less than kGap bytes available in the buffer. 868 // If this is the case, we need to grow the buffer before emitting 869 // an instruction or relocation information. 870 inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } 871 872 // Get the number of bytes available in the buffer. 873 inline int available_space() const { return reloc_info_writer.pos() - pc_; } 874 875 // Read/patch instructions. 876 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } 877 static void instr_at_put(byte* pc, Instr instr) { 878 *reinterpret_cast<Instr*>(pc) = instr; 879 } 880 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } 881 void instr_at_put(int pos, Instr instr) { 882 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; 883 } 884 885 // Check if an instruction is a branch of some kind. 886 static bool IsBranch(Instr instr); 887 static bool IsBeq(Instr instr); 888 static bool IsBne(Instr instr); 889 890 static bool IsJump(Instr instr); 891 static bool IsJ(Instr instr); 892 static bool IsLui(Instr instr); 893 static bool IsOri(Instr instr); 894 895 static bool IsNop(Instr instr, unsigned int type); 896 static bool IsPop(Instr instr); 897 static bool IsPush(Instr instr); 898 static bool IsLwRegFpOffset(Instr instr); 899 static bool IsSwRegFpOffset(Instr instr); 900 static bool IsLwRegFpNegOffset(Instr instr); 901 static bool IsSwRegFpNegOffset(Instr instr); 902 903 static Register GetRtReg(Instr instr); 904 static Register GetRsReg(Instr instr); 905 static Register GetRdReg(Instr instr); 906 907 static uint32_t GetRt(Instr instr); 908 static uint32_t GetRtField(Instr instr); 909 static uint32_t GetRs(Instr instr); 910 static uint32_t GetRsField(Instr instr); 911 static uint32_t GetRd(Instr instr); 912 static uint32_t GetRdField(Instr instr); 913 static uint32_t GetSa(Instr instr); 914 static uint32_t GetSaField(Instr instr); 915 static uint32_t GetOpcodeField(Instr instr); 916 static uint32_t GetFunction(Instr instr); 917 static uint32_t GetFunctionField(Instr instr); 918 static uint32_t GetImmediate16(Instr instr); 919 static uint32_t GetLabelConst(Instr instr); 920 921 static int32_t GetBranchOffset(Instr instr); 922 static bool IsLw(Instr instr); 923 static int16_t GetLwOffset(Instr instr); 924 static Instr SetLwOffset(Instr instr, int16_t offset); 925 926 static bool IsSw(Instr instr); 927 static Instr SetSwOffset(Instr instr, int16_t offset); 928 static bool IsAddImmediate(Instr instr); 929 static Instr SetAddImmediateOffset(Instr instr, int16_t offset); 930 931 static bool IsAndImmediate(Instr instr); 932 933 void CheckTrampolinePool(); 934 935 protected: 936 // Relocation for a type-recording IC has the AST id added to it. This 937 // member variable is a way to pass the information from the call site to 938 // the relocation info. 939 unsigned recorded_ast_id_; 940 941 bool emit_debug_code() const { return emit_debug_code_; } 942 943 int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } 944 945 // Decode branch instruction at pos and return branch target pos. 946 int target_at(int32_t pos); 947 948 // Patch branch instruction at pos to branch to given branch target pos. 949 void target_at_put(int32_t pos, int32_t target_pos); 950 951 // Say if we need to relocate with this mode. 952 bool MustUseReg(RelocInfo::Mode rmode); 953 954 // Record reloc info for current pc_. 955 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); 956 957 // Block the emission of the trampoline pool before pc_offset. 958 void BlockTrampolinePoolBefore(int pc_offset) { 959 if (no_trampoline_pool_before_ < pc_offset) 960 no_trampoline_pool_before_ = pc_offset; 961 } 962 963 void StartBlockTrampolinePool() { 964 trampoline_pool_blocked_nesting_++; 965 } 966 967 void EndBlockTrampolinePool() { 968 trampoline_pool_blocked_nesting_--; 969 } 970 971 bool is_trampoline_pool_blocked() const { 972 return trampoline_pool_blocked_nesting_ > 0; 973 } 974 975 bool has_exception() const { 976 return internal_trampoline_exception_; 977 } 978 979 bool is_trampoline_emitted() const { 980 return trampoline_emitted_; 981 } 982 983 // Temporarily block automatic assembly buffer growth. 984 void StartBlockGrowBuffer() { 985 ASSERT(!block_buffer_growth_); 986 block_buffer_growth_ = true; 987 } 988 989 void EndBlockGrowBuffer() { 990 ASSERT(block_buffer_growth_); 991 block_buffer_growth_ = false; 992 } 993 994 bool is_buffer_growth_blocked() const { 995 return block_buffer_growth_; 996 } 997 998 private: 999 // Code buffer: 1000 // The buffer into which code and relocation info are generated. 1001 byte* buffer_; 1002 int buffer_size_; 1003 // True if the assembler owns the buffer, false if buffer is external. 1004 bool own_buffer_; 1005 1006 // Buffer size and constant pool distance are checked together at regular 1007 // intervals of kBufferCheckInterval emitted bytes. 1008 static const int kBufferCheckInterval = 1*KB/2; 1009 1010 // Code generation. 1011 // The relocation writer's position is at least kGap bytes below the end of 1012 // the generated instructions. This is so that multi-instruction sequences do 1013 // not have to check for overflow. The same is true for writes of large 1014 // relocation info entries. 1015 static const int kGap = 32; 1016 byte* pc_; // The program counter - moves forward. 1017 1018 1019 // Repeated checking whether the trampoline pool should be emitted is rather 1020 // expensive. By default we only check again once a number of instructions 1021 // has been generated. 1022 static const int kCheckConstIntervalInst = 32; 1023 static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; 1024 1025 int next_buffer_check_; // pc offset of next buffer check. 1026 1027 // Emission of the trampoline pool may be blocked in some code sequences. 1028 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. 1029 int no_trampoline_pool_before_; // Block emission before this pc offset. 1030 1031 // Keep track of the last emitted pool to guarantee a maximal distance. 1032 int last_trampoline_pool_end_; // pc offset of the end of the last pool. 1033 1034 // Automatic growth of the assembly buffer may be blocked for some sequences. 1035 bool block_buffer_growth_; // Block growth when true. 1036 1037 // Relocation information generation. 1038 // Each relocation is encoded as a variable size value. 1039 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; 1040 RelocInfoWriter reloc_info_writer; 1041 1042 // The bound position, before this we cannot do instruction elimination. 1043 int last_bound_pos_; 1044 1045 // Code emission. 1046 inline void CheckBuffer(); 1047 void GrowBuffer(); 1048 inline void emit(Instr x); 1049 inline void CheckTrampolinePoolQuick(); 1050 1051 // Instruction generation. 1052 // We have 3 different kind of encoding layout on MIPS. 1053 // However due to many different types of objects encoded in the same fields 1054 // we have quite a few aliases for each mode. 1055 // Using the same structure to refer to Register and FPURegister would spare a 1056 // few aliases, but mixing both does not look clean to me. 1057 // Anyway we could surely implement this differently. 1058 1059 void GenInstrRegister(Opcode opcode, 1060 Register rs, 1061 Register rt, 1062 Register rd, 1063 uint16_t sa = 0, 1064 SecondaryField func = NULLSF); 1065 1066 void GenInstrRegister(Opcode opcode, 1067 Register rs, 1068 Register rt, 1069 uint16_t msb, 1070 uint16_t lsb, 1071 SecondaryField func); 1072 1073 void GenInstrRegister(Opcode opcode, 1074 SecondaryField fmt, 1075 FPURegister ft, 1076 FPURegister fs, 1077 FPURegister fd, 1078 SecondaryField func = NULLSF); 1079 1080 void GenInstrRegister(Opcode opcode, 1081 SecondaryField fmt, 1082 Register rt, 1083 FPURegister fs, 1084 FPURegister fd, 1085 SecondaryField func = NULLSF); 1086 1087 void GenInstrRegister(Opcode opcode, 1088 SecondaryField fmt, 1089 Register rt, 1090 FPUControlRegister fs, 1091 SecondaryField func = NULLSF); 1092 1093 1094 void GenInstrImmediate(Opcode opcode, 1095 Register rs, 1096 Register rt, 1097 int32_t j); 1098 void GenInstrImmediate(Opcode opcode, 1099 Register rs, 1100 SecondaryField SF, 1101 int32_t j); 1102 void GenInstrImmediate(Opcode opcode, 1103 Register r1, 1104 FPURegister r2, 1105 int32_t j); 1106 1107 1108 void GenInstrJump(Opcode opcode, 1109 uint32_t address); 1110 1111 // Helpers. 1112 void LoadRegPlusOffsetToAt(const MemOperand& src); 1113 1114 // Labels. 1115 void print(Label* L); 1116 void bind_to(Label* L, int pos); 1117 void next(Label* L); 1118 1119 // One trampoline consists of: 1120 // - space for trampoline slots, 1121 // - space for labels. 1122 // 1123 // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. 1124 // Space for trampoline slots preceeds space for labels. Each label is of one 1125 // instruction size, so total amount for labels is equal to 1126 // label_count * kInstrSize. 1127 class Trampoline { 1128 public: 1129 Trampoline() { 1130 start_ = 0; 1131 next_slot_ = 0; 1132 free_slot_count_ = 0; 1133 end_ = 0; 1134 } 1135 Trampoline(int start, int slot_count) { 1136 start_ = start; 1137 next_slot_ = start; 1138 free_slot_count_ = slot_count; 1139 end_ = start + slot_count * kTrampolineSlotsSize; 1140 } 1141 int start() { 1142 return start_; 1143 } 1144 int end() { 1145 return end_; 1146 } 1147 int take_slot() { 1148 int trampoline_slot = kInvalidSlotPos; 1149 if (free_slot_count_ <= 0) { 1150 // We have run out of space on trampolines. 1151 // Make sure we fail in debug mode, so we become aware of each case 1152 // when this happens. 1153 ASSERT(0); 1154 // Internal exception will be caught. 1155 } else { 1156 trampoline_slot = next_slot_; 1157 free_slot_count_--; 1158 next_slot_ += kTrampolineSlotsSize; 1159 } 1160 return trampoline_slot; 1161 } 1162 private: 1163 int start_; 1164 int end_; 1165 int next_slot_; 1166 int free_slot_count_; 1167 }; 1168 1169 int32_t get_trampoline_entry(int32_t pos); 1170 int unbound_labels_count_; 1171 // If trampoline is emitted, generated code is becoming large. As this is 1172 // already a slow case which can possibly break our code generation for the 1173 // extreme case, we use this information to trigger different mode of 1174 // branch instruction generation, where we use jump instructions rather 1175 // than regular branch instructions. 1176 bool trampoline_emitted_; 1177 static const int kTrampolineSlotsSize = 4 * kInstrSize; 1178 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; 1179 static const int kInvalidSlotPos = -1; 1180 1181 Trampoline trampoline_; 1182 bool internal_trampoline_exception_; 1183 1184 friend class RegExpMacroAssemblerMIPS; 1185 friend class RelocInfo; 1186 friend class CodePatcher; 1187 friend class BlockTrampolinePoolScope; 1188 1189 PositionsRecorder positions_recorder_; 1190 bool emit_debug_code_; 1191 friend class PositionsRecorder; 1192 friend class EnsureSpace; 1193}; 1194 1195 1196class EnsureSpace BASE_EMBEDDED { 1197 public: 1198 explicit EnsureSpace(Assembler* assembler) { 1199 assembler->CheckBuffer(); 1200 } 1201}; 1202 1203} } // namespace v8::internal 1204 1205#endif // V8_ARM_ASSEMBLER_MIPS_H_ 1206