1// Copyright 2014 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_ 6#define V8_PPC_MACRO_ASSEMBLER_PPC_H_ 7 8#include "src/assembler.h" 9#include "src/bailout-reason.h" 10#include "src/frames.h" 11#include "src/globals.h" 12 13namespace v8 { 14namespace internal { 15 16// Give alias names to registers for calling conventions. 17const Register kReturnRegister0 = {Register::kCode_r3}; 18const Register kReturnRegister1 = {Register::kCode_r4}; 19const Register kReturnRegister2 = {Register::kCode_r5}; 20const Register kJSFunctionRegister = {Register::kCode_r4}; 21const Register kContextRegister = {Register::kCode_r30}; 22const Register kAllocateSizeRegister = {Register::kCode_r4}; 23const Register kInterpreterAccumulatorRegister = {Register::kCode_r3}; 24const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15}; 25const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16}; 26const Register kInterpreterDispatchTableRegister = {Register::kCode_r17}; 27const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3}; 28const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r6}; 29const Register kRuntimeCallFunctionRegister = {Register::kCode_r4}; 30const Register kRuntimeCallArgCountRegister = {Register::kCode_r3}; 31 32// ---------------------------------------------------------------------------- 33// Static helper functions 34 35// Generate a MemOperand for loading a field from an object. 36inline MemOperand FieldMemOperand(Register object, int offset) { 37 return MemOperand(object, offset - kHeapObjectTag); 38} 39 40 41// Flags used for AllocateHeapNumber 42enum TaggingMode { 43 // Tag the result. 44 TAG_RESULT, 45 // Don't tag 46 DONT_TAG_RESULT 47}; 48 49 50enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 51enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 52enum PointersToHereCheck { 53 kPointersToHereMaybeInteresting, 54 kPointersToHereAreAlwaysInteresting 55}; 56enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; 57 58 59Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, 60 Register reg3 = no_reg, 61 Register reg4 = no_reg, 62 Register reg5 = no_reg, 63 Register reg6 = no_reg); 64 65 66#ifdef DEBUG 67bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg, 68 Register reg4 = no_reg, Register reg5 = no_reg, 69 Register reg6 = no_reg, Register reg7 = no_reg, 70 Register reg8 = no_reg, Register reg9 = no_reg, 71 Register reg10 = no_reg); 72#endif 73 74// These exist to provide portability between 32 and 64bit 75#if V8_TARGET_ARCH_PPC64 76#define LoadPX ldx 77#define LoadPUX ldux 78#define StorePX stdx 79#define StorePUX stdux 80#define ShiftLeftImm sldi 81#define ShiftRightImm srdi 82#define ClearLeftImm clrldi 83#define ClearRightImm clrrdi 84#define ShiftRightArithImm sradi 85#define ShiftLeft_ sld 86#define ShiftRight_ srd 87#define ShiftRightArith srad 88#define Mul mulld 89#define Div divd 90#else 91#define LoadPX lwzx 92#define LoadPUX lwzux 93#define StorePX stwx 94#define StorePUX stwux 95#define ShiftLeftImm slwi 96#define ShiftRightImm srwi 97#define ClearLeftImm clrlwi 98#define ClearRightImm clrrwi 99#define ShiftRightArithImm srawi 100#define ShiftLeft_ slw 101#define ShiftRight_ srw 102#define ShiftRightArith sraw 103#define Mul mullw 104#define Div divw 105#endif 106 107 108// MacroAssembler implements a collection of frequently used macros. 109class MacroAssembler : public Assembler { 110 public: 111 MacroAssembler(Isolate* isolate, void* buffer, int size, 112 CodeObjectRequired create_code_object); 113 114 115 // Returns the size of a call in instructions. Note, the value returned is 116 // only valid as long as no entries are added to the constant pool between 117 // checking the call size and emitting the actual call. 118 static int CallSize(Register target); 119 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); 120 static int CallSizeNotPredictableCodeSize(Address target, 121 RelocInfo::Mode rmode, 122 Condition cond = al); 123 124 // Jump, Call, and Ret pseudo instructions implementing inter-working. 125 void Jump(Register target); 126 void JumpToJSEntry(Register target); 127 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al, 128 CRegister cr = cr7); 129 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 130 void Call(Register target); 131 void CallJSEntry(Register target); 132 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al); 133 int CallSize(Handle<Code> code, 134 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 135 TypeFeedbackId ast_id = TypeFeedbackId::None(), 136 Condition cond = al); 137 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 138 TypeFeedbackId ast_id = TypeFeedbackId::None(), 139 Condition cond = al); 140 void Ret() { blr(); } 141 void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); } 142 143 // Emit code to discard a non-negative number of pointer-sized elements 144 // from the stack, clobbering only the sp register. 145 void Drop(int count); 146 void Drop(Register count, Register scratch = r0); 147 148 void Ret(int drop) { 149 Drop(drop); 150 blr(); 151 } 152 153 void Call(Label* target); 154 155 // Register move. May do nothing if the registers are identical. 156 void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); } 157 void Move(Register dst, Handle<Object> value); 158 void Move(Register dst, Register src, Condition cond = al); 159 void Move(DoubleRegister dst, DoubleRegister src); 160 161 void MultiPush(RegList regs, Register location = sp); 162 void MultiPop(RegList regs, Register location = sp); 163 164 void MultiPushDoubles(RegList dregs, Register location = sp); 165 void MultiPopDoubles(RegList dregs, Register location = sp); 166 167 // Load an object from the root table. 168 void LoadRoot(Register destination, Heap::RootListIndex index, 169 Condition cond = al); 170 // Store an object to the root table. 171 void StoreRoot(Register source, Heap::RootListIndex index, 172 Condition cond = al); 173 174 // --------------------------------------------------------------------------- 175 // GC Support 176 177 void IncrementalMarkingRecordWriteHelper(Register object, Register value, 178 Register address); 179 180 enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd }; 181 182 // Record in the remembered set the fact that we have a pointer to new space 183 // at the address pointed to by the addr register. Only works if addr is not 184 // in new space. 185 void RememberedSetHelper(Register object, // Used for debug code. 186 Register addr, Register scratch, 187 SaveFPRegsMode save_fp, 188 RememberedSetFinalAction and_then); 189 190 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, 191 Label* condition_met); 192 193 // Check if object is in new space. Jumps if the object is not in new space. 194 // The register scratch can be object itself, but scratch will be clobbered. 195 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) { 196 InNewSpace(object, scratch, eq, branch); 197 } 198 199 // Check if object is in new space. Jumps if the object is in new space. 200 // The register scratch can be object itself, but it will be clobbered. 201 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) { 202 InNewSpace(object, scratch, ne, branch); 203 } 204 205 // Check if an object has a given incremental marking color. 206 void HasColor(Register object, Register scratch0, Register scratch1, 207 Label* has_color, int first_bit, int second_bit); 208 209 void JumpIfBlack(Register object, Register scratch0, Register scratch1, 210 Label* on_black); 211 212 // Checks the color of an object. If the object is white we jump to the 213 // incremental marker. 214 void JumpIfWhite(Register value, Register scratch1, Register scratch2, 215 Register scratch3, Label* value_is_white); 216 217 // Notify the garbage collector that we wrote a pointer into an object. 218 // |object| is the object being stored into, |value| is the object being 219 // stored. value and scratch registers are clobbered by the operation. 220 // The offset is the offset from the start of the object, not the offset from 221 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off). 222 void RecordWriteField( 223 Register object, int offset, Register value, Register scratch, 224 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, 225 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 226 SmiCheck smi_check = INLINE_SMI_CHECK, 227 PointersToHereCheck pointers_to_here_check_for_value = 228 kPointersToHereMaybeInteresting); 229 230 // As above, but the offset has the tag presubtracted. For use with 231 // MemOperand(reg, off). 232 inline void RecordWriteContextSlot( 233 Register context, int offset, Register value, Register scratch, 234 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, 235 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 236 SmiCheck smi_check = INLINE_SMI_CHECK, 237 PointersToHereCheck pointers_to_here_check_for_value = 238 kPointersToHereMaybeInteresting) { 239 RecordWriteField(context, offset + kHeapObjectTag, value, scratch, 240 lr_status, save_fp, remembered_set_action, smi_check, 241 pointers_to_here_check_for_value); 242 } 243 244 // Notify the garbage collector that we wrote a code entry into a 245 // JSFunction. Only scratch is clobbered by the operation. 246 void RecordWriteCodeEntryField(Register js_function, Register code_entry, 247 Register scratch); 248 249 void RecordWriteForMap(Register object, Register map, Register dst, 250 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp); 251 252 // For a given |object| notify the garbage collector that the slot |address| 253 // has been written. |value| is the object being stored. The value and 254 // address registers are clobbered by the operation. 255 void RecordWrite( 256 Register object, Register address, Register value, 257 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, 258 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 259 SmiCheck smi_check = INLINE_SMI_CHECK, 260 PointersToHereCheck pointers_to_here_check_for_value = 261 kPointersToHereMaybeInteresting); 262 263 void Push(Register src) { push(src); } 264 265 // Push a handle. 266 void Push(Handle<Object> handle); 267 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 268 269 // Push two registers. Pushes leftmost register first (to highest address). 270 void Push(Register src1, Register src2) { 271 StorePU(src2, MemOperand(sp, -2 * kPointerSize)); 272 StoreP(src1, MemOperand(sp, kPointerSize)); 273 } 274 275 // Push three registers. Pushes leftmost register first (to highest address). 276 void Push(Register src1, Register src2, Register src3) { 277 StorePU(src3, MemOperand(sp, -3 * kPointerSize)); 278 StoreP(src2, MemOperand(sp, kPointerSize)); 279 StoreP(src1, MemOperand(sp, 2 * kPointerSize)); 280 } 281 282 // Push four registers. Pushes leftmost register first (to highest address). 283 void Push(Register src1, Register src2, Register src3, Register src4) { 284 StorePU(src4, MemOperand(sp, -4 * kPointerSize)); 285 StoreP(src3, MemOperand(sp, kPointerSize)); 286 StoreP(src2, MemOperand(sp, 2 * kPointerSize)); 287 StoreP(src1, MemOperand(sp, 3 * kPointerSize)); 288 } 289 290 // Push five registers. Pushes leftmost register first (to highest address). 291 void Push(Register src1, Register src2, Register src3, Register src4, 292 Register src5) { 293 StorePU(src5, MemOperand(sp, -5 * kPointerSize)); 294 StoreP(src4, MemOperand(sp, kPointerSize)); 295 StoreP(src3, MemOperand(sp, 2 * kPointerSize)); 296 StoreP(src2, MemOperand(sp, 3 * kPointerSize)); 297 StoreP(src1, MemOperand(sp, 4 * kPointerSize)); 298 } 299 300 void Pop(Register dst) { pop(dst); } 301 302 // Pop two registers. Pops rightmost register first (from lower address). 303 void Pop(Register src1, Register src2) { 304 LoadP(src2, MemOperand(sp, 0)); 305 LoadP(src1, MemOperand(sp, kPointerSize)); 306 addi(sp, sp, Operand(2 * kPointerSize)); 307 } 308 309 // Pop three registers. Pops rightmost register first (from lower address). 310 void Pop(Register src1, Register src2, Register src3) { 311 LoadP(src3, MemOperand(sp, 0)); 312 LoadP(src2, MemOperand(sp, kPointerSize)); 313 LoadP(src1, MemOperand(sp, 2 * kPointerSize)); 314 addi(sp, sp, Operand(3 * kPointerSize)); 315 } 316 317 // Pop four registers. Pops rightmost register first (from lower address). 318 void Pop(Register src1, Register src2, Register src3, Register src4) { 319 LoadP(src4, MemOperand(sp, 0)); 320 LoadP(src3, MemOperand(sp, kPointerSize)); 321 LoadP(src2, MemOperand(sp, 2 * kPointerSize)); 322 LoadP(src1, MemOperand(sp, 3 * kPointerSize)); 323 addi(sp, sp, Operand(4 * kPointerSize)); 324 } 325 326 // Pop five registers. Pops rightmost register first (from lower address). 327 void Pop(Register src1, Register src2, Register src3, Register src4, 328 Register src5) { 329 LoadP(src5, MemOperand(sp, 0)); 330 LoadP(src4, MemOperand(sp, kPointerSize)); 331 LoadP(src3, MemOperand(sp, 2 * kPointerSize)); 332 LoadP(src2, MemOperand(sp, 3 * kPointerSize)); 333 LoadP(src1, MemOperand(sp, 4 * kPointerSize)); 334 addi(sp, sp, Operand(5 * kPointerSize)); 335 } 336 337 // Push a fixed frame, consisting of lr, fp, constant pool. 338 void PushCommonFrame(Register marker_reg = no_reg); 339 340 // Push a standard frame, consisting of lr, fp, constant pool, 341 // context and JS function 342 void PushStandardFrame(Register function_reg); 343 344 void PopCommonFrame(Register marker_reg = no_reg); 345 346 // Restore caller's frame pointer and return address prior to being 347 // overwritten by tail call stack preparation. 348 void RestoreFrameStateForTailCall(); 349 350 // Push and pop the registers that can hold pointers, as defined by the 351 // RegList constant kSafepointSavedRegisters. 352 void PushSafepointRegisters(); 353 void PopSafepointRegisters(); 354 // Store value in register src in the safepoint stack slot for 355 // register dst. 356 void StoreToSafepointRegisterSlot(Register src, Register dst); 357 // Load the value of the src register from its safepoint stack slot 358 // into register dst. 359 void LoadFromSafepointRegisterSlot(Register dst, Register src); 360 361 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache 362 // from C. 363 // Does not handle errors. 364 void FlushICache(Register address, size_t size, Register scratch); 365 366 // If the value is a NaN, canonicalize the value else, do nothing. 367 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); 368 void CanonicalizeNaN(const DoubleRegister value) { 369 CanonicalizeNaN(value, value); 370 } 371 372 // Converts the integer (untagged smi) in |src| to a double, storing 373 // the result to |dst| 374 void ConvertIntToDouble(Register src, DoubleRegister dst); 375 376 // Converts the unsigned integer (untagged smi) in |src| to 377 // a double, storing the result to |dst| 378 void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst); 379 380 // Converts the integer (untagged smi) in |src| to 381 // a float, storing the result in |dst| 382 void ConvertIntToFloat(Register src, DoubleRegister dst); 383 384 // Converts the unsigned integer (untagged smi) in |src| to 385 // a float, storing the result in |dst| 386 void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst); 387 388#if V8_TARGET_ARCH_PPC64 389 void ConvertInt64ToFloat(Register src, DoubleRegister double_dst); 390 void ConvertInt64ToDouble(Register src, DoubleRegister double_dst); 391 void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst); 392 void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst); 393#endif 394 395 // Converts the double_input to an integer. Note that, upon return, 396 // the contents of double_dst will also hold the fixed point representation. 397 void ConvertDoubleToInt64(const DoubleRegister double_input, 398#if !V8_TARGET_ARCH_PPC64 399 const Register dst_hi, 400#endif 401 const Register dst, const DoubleRegister double_dst, 402 FPRoundingMode rounding_mode = kRoundToZero); 403 404#if V8_TARGET_ARCH_PPC64 405 // Converts the double_input to an unsigned integer. Note that, upon return, 406 // the contents of double_dst will also hold the fixed point representation. 407 void ConvertDoubleToUnsignedInt64( 408 const DoubleRegister double_input, const Register dst, 409 const DoubleRegister double_dst, 410 FPRoundingMode rounding_mode = kRoundToZero); 411#endif 412 413#if !V8_TARGET_ARCH_PPC64 414 void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low, 415 Register src_high, Register scratch, Register shift); 416 void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low, 417 Register src_high, uint32_t shift); 418 void ShiftRightPair(Register dst_low, Register dst_high, Register src_low, 419 Register src_high, Register scratch, Register shift); 420 void ShiftRightPair(Register dst_low, Register dst_high, Register src_low, 421 Register src_high, uint32_t shift); 422 void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low, 423 Register src_high, Register scratch, Register shift); 424 void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low, 425 Register src_high, uint32_t shift); 426#endif 427 428 // Generates function and stub prologue code. 429 void StubPrologue(StackFrame::Type type, Register base = no_reg, 430 int prologue_offset = 0); 431 void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0); 432 433 // Enter exit frame. 434 // stack_space - extra stack space, used for parameters before call to C. 435 // At least one slot (for the return address) should be provided. 436 void EnterExitFrame(bool save_doubles, int stack_space = 1); 437 438 // Leave the current exit frame. Expects the return value in r0. 439 // Expect the number of values, pushed prior to the exit frame, to 440 // remove in a register (or no_reg, if there is nothing to remove). 441 void LeaveExitFrame(bool save_doubles, Register argument_count, 442 bool restore_context, 443 bool argument_count_is_length = false); 444 445 // Get the actual activation frame alignment for target environment. 446 static int ActivationFrameAlignment(); 447 448 void LoadContext(Register dst, int context_chain_length); 449 450 // Load the global object from the current context. 451 void LoadGlobalObject(Register dst) { 452 LoadNativeContextSlot(Context::EXTENSION_INDEX, dst); 453 } 454 455 // Load the global proxy from the current context. 456 void LoadGlobalProxy(Register dst) { 457 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); 458 } 459 460 // Conditionally load the cached Array transitioned map of type 461 // transitioned_kind from the native context if the map in register 462 // map_in_out is the cached Array map in the native context of 463 // expected_kind. 464 void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, 465 ElementsKind transitioned_kind, 466 Register map_in_out, 467 Register scratch, 468 Label* no_map_match); 469 470 void LoadNativeContextSlot(int index, Register dst); 471 472 // Load the initial map from the global function. The registers 473 // function and map can be the same, function is then overwritten. 474 void LoadGlobalFunctionInitialMap(Register function, Register map, 475 Register scratch); 476 477 void InitializeRootRegister() { 478 ExternalReference roots_array_start = 479 ExternalReference::roots_array_start(isolate()); 480 mov(kRootRegister, Operand(roots_array_start)); 481 } 482 483 // ---------------------------------------------------------------- 484 // new PPC macro-assembler interfaces that are slightly higher level 485 // than assembler-ppc and may generate variable length sequences 486 487 // load a literal signed int value <value> to GPR <dst> 488 void LoadIntLiteral(Register dst, int value); 489 490 // load an SMI value <value> to GPR <dst> 491 void LoadSmiLiteral(Register dst, Smi* smi); 492 493 // load a literal double value <value> to FPR <result> 494 void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch); 495 496 void LoadWord(Register dst, const MemOperand& mem, Register scratch); 497 void LoadWordArith(Register dst, const MemOperand& mem, 498 Register scratch = no_reg); 499 void StoreWord(Register src, const MemOperand& mem, Register scratch); 500 501 void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch); 502 void LoadHalfWordArith(Register dst, const MemOperand& mem, 503 Register scratch = no_reg); 504 void StoreHalfWord(Register src, const MemOperand& mem, Register scratch); 505 506 void LoadByte(Register dst, const MemOperand& mem, Register scratch); 507 void StoreByte(Register src, const MemOperand& mem, Register scratch); 508 509 void LoadRepresentation(Register dst, const MemOperand& mem, Representation r, 510 Register scratch = no_reg); 511 void StoreRepresentation(Register src, const MemOperand& mem, 512 Representation r, Register scratch = no_reg); 513 514 void LoadDouble(DoubleRegister dst, const MemOperand& mem, 515 Register scratch = no_reg); 516 void LoadDoubleU(DoubleRegister dst, const MemOperand& mem, 517 Register scratch = no_reg); 518 519 void LoadSingle(DoubleRegister dst, const MemOperand& mem, 520 Register scratch = no_reg); 521 void LoadSingleU(DoubleRegister dst, const MemOperand& mem, 522 Register scratch = no_reg); 523 524 void StoreDouble(DoubleRegister src, const MemOperand& mem, 525 Register scratch = no_reg); 526 void StoreDoubleU(DoubleRegister src, const MemOperand& mem, 527 Register scratch = no_reg); 528 529 void StoreSingle(DoubleRegister src, const MemOperand& mem, 530 Register scratch = no_reg); 531 void StoreSingleU(DoubleRegister src, const MemOperand& mem, 532 Register scratch = no_reg); 533 534 // Move values between integer and floating point registers. 535 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch); 536 void MovUnsignedIntToDouble(DoubleRegister dst, Register src, 537 Register scratch); 538 void MovInt64ToDouble(DoubleRegister dst, 539#if !V8_TARGET_ARCH_PPC64 540 Register src_hi, 541#endif 542 Register src); 543#if V8_TARGET_ARCH_PPC64 544 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi, 545 Register src_lo, Register scratch); 546#endif 547 void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch); 548 void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch); 549 void MovDoubleLowToInt(Register dst, DoubleRegister src); 550 void MovDoubleHighToInt(Register dst, DoubleRegister src); 551 void MovDoubleToInt64( 552#if !V8_TARGET_ARCH_PPC64 553 Register dst_hi, 554#endif 555 Register dst, DoubleRegister src); 556 void MovIntToFloat(DoubleRegister dst, Register src); 557 void MovFloatToInt(Register dst, DoubleRegister src); 558 559 void Add(Register dst, Register src, intptr_t value, Register scratch); 560 void Cmpi(Register src1, const Operand& src2, Register scratch, 561 CRegister cr = cr7); 562 void Cmpli(Register src1, const Operand& src2, Register scratch, 563 CRegister cr = cr7); 564 void Cmpwi(Register src1, const Operand& src2, Register scratch, 565 CRegister cr = cr7); 566 void Cmplwi(Register src1, const Operand& src2, Register scratch, 567 CRegister cr = cr7); 568 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); 569 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); 570 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); 571 572 void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch); 573 void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch); 574 void CmpSmiLiteral(Register src1, Smi* smi, Register scratch, 575 CRegister cr = cr7); 576 void CmplSmiLiteral(Register src1, Smi* smi, Register scratch, 577 CRegister cr = cr7); 578 void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch, 579 RCBit rc = LeaveRC); 580 581 // Set new rounding mode RN to FPSCR 582 void SetRoundingMode(FPRoundingMode RN); 583 584 // reset rounding mode to default (kRoundToNearest) 585 void ResetRoundingMode(); 586 587 // These exist to provide portability between 32 and 64bit 588 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg); 589 void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg); 590 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg); 591 void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg); 592 593 // --------------------------------------------------------------------------- 594 // JavaScript invokes 595 596 // Removes current frame and its arguments from the stack preserving 597 // the arguments and a return address pushed to the stack for the next call. 598 // Both |callee_args_count| and |caller_args_count_reg| do not include 599 // receiver. |callee_args_count| is not modified, |caller_args_count_reg| 600 // is trashed. 601 void PrepareForTailCall(const ParameterCount& callee_args_count, 602 Register caller_args_count_reg, Register scratch0, 603 Register scratch1); 604 605 // Invoke the JavaScript function code by either calling or jumping. 606 void InvokeFunctionCode(Register function, Register new_target, 607 const ParameterCount& expected, 608 const ParameterCount& actual, InvokeFlag flag, 609 const CallWrapper& call_wrapper); 610 611 void FloodFunctionIfStepping(Register fun, Register new_target, 612 const ParameterCount& expected, 613 const ParameterCount& actual); 614 615 // Invoke the JavaScript function in the given register. Changes the 616 // current context to the context in the function before invoking. 617 void InvokeFunction(Register function, Register new_target, 618 const ParameterCount& actual, InvokeFlag flag, 619 const CallWrapper& call_wrapper); 620 621 void InvokeFunction(Register function, const ParameterCount& expected, 622 const ParameterCount& actual, InvokeFlag flag, 623 const CallWrapper& call_wrapper); 624 625 void InvokeFunction(Handle<JSFunction> function, 626 const ParameterCount& expected, 627 const ParameterCount& actual, InvokeFlag flag, 628 const CallWrapper& call_wrapper); 629 630 void IsObjectJSStringType(Register object, Register scratch, Label* fail); 631 632 void IsObjectNameType(Register object, Register scratch, Label* fail); 633 634 // --------------------------------------------------------------------------- 635 // Debugger Support 636 637 void DebugBreak(); 638 639 // --------------------------------------------------------------------------- 640 // Exception handling 641 642 // Push a new stack handler and link into stack handler chain. 643 void PushStackHandler(); 644 645 // Unlink the stack handler on top of the stack from the stack handler chain. 646 // Must preserve the result register. 647 void PopStackHandler(); 648 649 // --------------------------------------------------------------------------- 650 // Inline caching support 651 652 // Generate code for checking access rights - used for security checks 653 // on access to global objects across environments. The holder register 654 // is left untouched, whereas both scratch registers are clobbered. 655 void CheckAccessGlobalProxy(Register holder_reg, Register scratch, 656 Label* miss); 657 658 void GetNumberHash(Register t0, Register scratch); 659 660 void LoadFromNumberDictionary(Label* miss, Register elements, Register key, 661 Register result, Register t0, Register t1, 662 Register t2); 663 664 665 inline void MarkCode(NopMarkerTypes type) { nop(type); } 666 667 // Check if the given instruction is a 'type' marker. 668 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type)) 669 // These instructions are generated to mark special location in the code, 670 // like some special IC code. 671 static inline bool IsMarkedCode(Instr instr, int type) { 672 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); 673 return IsNop(instr, type); 674 } 675 676 677 static inline int GetCodeMarker(Instr instr) { 678 int dst_reg_offset = 12; 679 int dst_mask = 0xf << dst_reg_offset; 680 int src_mask = 0xf; 681 int dst_reg = (instr & dst_mask) >> dst_reg_offset; 682 int src_reg = instr & src_mask; 683 uint32_t non_register_mask = ~(dst_mask | src_mask); 684 uint32_t mov_mask = al | 13 << 21; 685 686 // Return <n> if we have a mov rn rn, else return -1. 687 int type = ((instr & non_register_mask) == mov_mask) && 688 (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) && 689 (dst_reg < LAST_CODE_MARKER) 690 ? src_reg 691 : -1; 692 DCHECK((type == -1) || 693 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); 694 return type; 695 } 696 697 698 // --------------------------------------------------------------------------- 699 // Allocation support 700 701 // Allocate an object in new space or old space. The object_size is 702 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 703 // is passed. If the space is exhausted control continues at the gc_required 704 // label. The allocated object is returned in result. If the flag 705 // tag_allocated_object is true the result is tagged as as a heap object. 706 // All registers are clobbered also when control continues at the gc_required 707 // label. 708 void Allocate(int object_size, Register result, Register scratch1, 709 Register scratch2, Label* gc_required, AllocationFlags flags); 710 711 void Allocate(Register object_size, Register result, Register result_end, 712 Register scratch, Label* gc_required, AllocationFlags flags); 713 714 // FastAllocate is right now only used for folded allocations. It just 715 // increments the top pointer without checking against limit. This can only 716 // be done if it was proved earlier that the allocation will succeed. 717 void FastAllocate(int object_size, Register result, Register scratch1, 718 Register scratch2, AllocationFlags flags); 719 720 void FastAllocate(Register object_size, Register result, Register result_end, 721 Register scratch, AllocationFlags flags); 722 723 void AllocateTwoByteString(Register result, Register length, 724 Register scratch1, Register scratch2, 725 Register scratch3, Label* gc_required); 726 void AllocateOneByteString(Register result, Register length, 727 Register scratch1, Register scratch2, 728 Register scratch3, Label* gc_required); 729 void AllocateTwoByteConsString(Register result, Register length, 730 Register scratch1, Register scratch2, 731 Label* gc_required); 732 void AllocateOneByteConsString(Register result, Register length, 733 Register scratch1, Register scratch2, 734 Label* gc_required); 735 void AllocateTwoByteSlicedString(Register result, Register length, 736 Register scratch1, Register scratch2, 737 Label* gc_required); 738 void AllocateOneByteSlicedString(Register result, Register length, 739 Register scratch1, Register scratch2, 740 Label* gc_required); 741 742 // Allocates a heap number or jumps to the gc_required label if the young 743 // space is full and a scavenge is needed. All registers are clobbered also 744 // when control continues at the gc_required label. 745 void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, 746 Register heap_number_map, Label* gc_required, 747 MutableMode mode = IMMUTABLE); 748 void AllocateHeapNumberWithValue(Register result, DoubleRegister value, 749 Register scratch1, Register scratch2, 750 Register heap_number_map, 751 Label* gc_required); 752 753 // Allocate and initialize a JSValue wrapper with the specified {constructor} 754 // and {value}. 755 void AllocateJSValue(Register result, Register constructor, Register value, 756 Register scratch1, Register scratch2, 757 Label* gc_required); 758 759 // Copies a number of bytes from src to dst. All registers are clobbered. On 760 // exit src and dst will point to the place just after where the last byte was 761 // read or written and length will be zero. 762 void CopyBytes(Register src, Register dst, Register length, Register scratch); 763 764 // Initialize fields with filler values. |count| fields starting at 765 // |current_address| are overwritten with the value in |filler|. At the end 766 // the loop, |current_address| points at the next uninitialized field. 767 // |count| is assumed to be non-zero. 768 void InitializeNFieldsWithFiller(Register current_address, Register count, 769 Register filler); 770 771 // Initialize fields with filler values. Fields starting at |current_address| 772 // not including |end_address| are overwritten with the value in |filler|. At 773 // the end the loop, |current_address| takes the value of |end_address|. 774 void InitializeFieldsWithFiller(Register current_address, 775 Register end_address, Register filler); 776 777 // --------------------------------------------------------------------------- 778 // Support functions. 779 780 // Machine code version of Map::GetConstructor(). 781 // |temp| holds |result|'s map when done, and |temp2| its instance type. 782 void GetMapConstructor(Register result, Register map, Register temp, 783 Register temp2); 784 785 // Try to get function prototype of a function and puts the value in 786 // the result register. Checks that the function really is a 787 // function and jumps to the miss label if the fast checks fail. The 788 // function register will be untouched; the other registers may be 789 // clobbered. 790 void TryGetFunctionPrototype(Register function, Register result, 791 Register scratch, Label* miss); 792 793 // Compare object type for heap object. heap_object contains a non-Smi 794 // whose object type should be compared with the given type. This both 795 // sets the flags and leaves the object type in the type_reg register. 796 // It leaves the map in the map register (unless the type_reg and map register 797 // are the same register). It leaves the heap object in the heap_object 798 // register unless the heap_object register is the same register as one of the 799 // other registers. 800 // Type_reg can be no_reg. In that case ip is used. 801 void CompareObjectType(Register heap_object, Register map, Register type_reg, 802 InstanceType type); 803 804 // Compare instance type in a map. map contains a valid map object whose 805 // object type should be compared with the given type. This both 806 // sets the flags and leaves the object type in the type_reg register. 807 void CompareInstanceType(Register map, Register type_reg, InstanceType type); 808 809 810 // Check if a map for a JSObject indicates that the object has fast elements. 811 // Jump to the specified label if it does not. 812 void CheckFastElements(Register map, Register scratch, Label* fail); 813 814 // Check if a map for a JSObject indicates that the object can have both smi 815 // and HeapObject elements. Jump to the specified label if it does not. 816 void CheckFastObjectElements(Register map, Register scratch, Label* fail); 817 818 // Check if a map for a JSObject indicates that the object has fast smi only 819 // elements. Jump to the specified label if it does not. 820 void CheckFastSmiElements(Register map, Register scratch, Label* fail); 821 822 // Check to see if maybe_number can be stored as a double in 823 // FastDoubleElements. If it can, store it at the index specified by key in 824 // the FastDoubleElements array elements. Otherwise jump to fail. 825 void StoreNumberToDoubleElements(Register value_reg, Register key_reg, 826 Register elements_reg, Register scratch1, 827 DoubleRegister double_scratch, Label* fail, 828 int elements_offset = 0); 829 830 // Compare an object's map with the specified map and its transitioned 831 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are 832 // set with result of map compare. If multiple map compares are required, the 833 // compare sequences branches to early_success. 834 void CompareMap(Register obj, Register scratch, Handle<Map> map, 835 Label* early_success); 836 837 // As above, but the map of the object is already loaded into the register 838 // which is preserved by the code generated. 839 void CompareMap(Register obj_map, Handle<Map> map, Label* early_success); 840 841 // Check if the map of an object is equal to a specified map and branch to 842 // label if not. Skip the smi check if not required (object is known to be a 843 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 844 // against maps that are ElementsKind transition maps of the specified map. 845 void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail, 846 SmiCheckType smi_check_type); 847 848 849 void CheckMap(Register obj, Register scratch, Heap::RootListIndex index, 850 Label* fail, SmiCheckType smi_check_type); 851 852 853 // Check if the map of an object is equal to a specified weak map and branch 854 // to a specified target if equal. Skip the smi check if not required 855 // (object is known to be a heap object) 856 void DispatchWeakMap(Register obj, Register scratch1, Register scratch2, 857 Handle<WeakCell> cell, Handle<Code> success, 858 SmiCheckType smi_check_type); 859 860 // Compare the given value and the value of weak cell. 861 void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch, 862 CRegister cr = cr7); 863 864 void GetWeakValue(Register value, Handle<WeakCell> cell); 865 866 // Load the value of the weak cell in the value register. Branch to the given 867 // miss label if the weak cell was cleared. 868 void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss); 869 870 // Compare the object in a register to a value from the root list. 871 // Uses the ip register as scratch. 872 void CompareRoot(Register obj, Heap::RootListIndex index); 873 void PushRoot(Heap::RootListIndex index) { 874 LoadRoot(r0, index); 875 Push(r0); 876 } 877 878 // Compare the object in a register to a value and jump if they are equal. 879 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) { 880 CompareRoot(with, index); 881 beq(if_equal); 882 } 883 884 // Compare the object in a register to a value and jump if they are not equal. 885 void JumpIfNotRoot(Register with, Heap::RootListIndex index, 886 Label* if_not_equal) { 887 CompareRoot(with, index); 888 bne(if_not_equal); 889 } 890 891 // Load and check the instance type of an object for being a string. 892 // Loads the type into the second argument register. 893 // Returns a condition that will be enabled if the object was a string. 894 Condition IsObjectStringType(Register obj, Register type) { 895 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset)); 896 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); 897 andi(r0, type, Operand(kIsNotStringMask)); 898 DCHECK_EQ(0u, kStringTag); 899 return eq; 900 } 901 902 903 // Picks out an array index from the hash field. 904 // Register use: 905 // hash - holds the index's hash. Clobbered. 906 // index - holds the overwritten index on exit. 907 void IndexFromHash(Register hash, Register index); 908 909 // Get the number of least significant bits from a register 910 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 911 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 912 913 // Load the value of a smi object into a double register. 914 void SmiToDouble(DoubleRegister value, Register smi); 915 916 // Check if a double can be exactly represented as a signed 32-bit integer. 917 // CR_EQ in cr7 is set if true. 918 void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1, 919 Register scratch2, DoubleRegister double_scratch); 920 921 // Check if a double is equal to -0.0. 922 // CR_EQ in cr7 holds the result. 923 void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1, 924 Register scratch2); 925 926 // Check the sign of a double. 927 // CR_LT in cr7 holds the result. 928 void TestDoubleSign(DoubleRegister input, Register scratch); 929 void TestHeapNumberSign(Register input, Register scratch); 930 931 // Try to convert a double to a signed 32-bit integer. 932 // CR_EQ in cr7 is set and result assigned if the conversion is exact. 933 void TryDoubleToInt32Exact(Register result, DoubleRegister double_input, 934 Register scratch, DoubleRegister double_scratch); 935 936 // Floor a double and writes the value to the result register. 937 // Go to exact if the conversion is exact (to be able to test -0), 938 // fall through calling code if an overflow occurred, else go to done. 939 // In return, input_high is loaded with high bits of input. 940 void TryInt32Floor(Register result, DoubleRegister double_input, 941 Register input_high, Register scratch, 942 DoubleRegister double_scratch, Label* done, Label* exact); 943 944 // Performs a truncating conversion of a floating point number as used by 945 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 946 // succeeds, otherwise falls through if result is saturated. On return 947 // 'result' either holds answer, or is clobbered on fall through. 948 // 949 // Only public for the test code in test-code-stubs-arm.cc. 950 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, 951 Label* done); 952 953 // Performs a truncating conversion of a floating point number as used by 954 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 955 // Exits with 'result' holding the answer. 956 void TruncateDoubleToI(Register result, DoubleRegister double_input); 957 958 // Performs a truncating conversion of a heap number as used by 959 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 960 // must be different registers. Exits with 'result' holding the answer. 961 void TruncateHeapNumberToI(Register result, Register object); 962 963 // Converts the smi or heap number in object to an int32 using the rules 964 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 965 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 966 // different registers. 967 void TruncateNumberToI(Register object, Register result, 968 Register heap_number_map, Register scratch1, 969 Label* not_int32); 970 971 // Overflow handling functions. 972 // Usage: call the appropriate arithmetic function and then call one of the 973 // flow control functions with the corresponding label. 974 975 // Compute dst = left + right, setting condition codes. dst may be same as 976 // either left or right (or a unique register). left and right must not be 977 // the same register. 978 void AddAndCheckForOverflow(Register dst, Register left, Register right, 979 Register overflow_dst, Register scratch = r0); 980 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right, 981 Register overflow_dst, Register scratch = r0); 982 983 // Compute dst = left - right, setting condition codes. dst may be same as 984 // either left or right (or a unique register). left and right must not be 985 // the same register. 986 void SubAndCheckForOverflow(Register dst, Register left, Register right, 987 Register overflow_dst, Register scratch = r0); 988 989 void BranchOnOverflow(Label* label) { blt(label, cr0); } 990 991 void BranchOnNoOverflow(Label* label) { bge(label, cr0); } 992 993 void RetOnOverflow(void) { Ret(lt, cr0); } 994 995 void RetOnNoOverflow(void) { Ret(ge, cr0); } 996 997 // --------------------------------------------------------------------------- 998 // Runtime calls 999 1000 // Call a code stub. 1001 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(), 1002 Condition cond = al); 1003 1004 // Call a code stub. 1005 void TailCallStub(CodeStub* stub, Condition cond = al); 1006 1007 // Call a runtime routine. 1008 void CallRuntime(const Runtime::Function* f, int num_arguments, 1009 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 1010 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { 1011 const Runtime::Function* function = Runtime::FunctionForId(fid); 1012 CallRuntime(function, function->nargs, kSaveFPRegs); 1013 } 1014 1015 // Convenience function: Same as above, but takes the fid instead. 1016 void CallRuntime(Runtime::FunctionId fid, 1017 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1018 const Runtime::Function* function = Runtime::FunctionForId(fid); 1019 CallRuntime(function, function->nargs, save_doubles); 1020 } 1021 1022 // Convenience function: Same as above, but takes the fid instead. 1023 void CallRuntime(Runtime::FunctionId fid, int num_arguments, 1024 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 1025 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); 1026 } 1027 1028 // Convenience function: call an external reference. 1029 void CallExternalReference(const ExternalReference& ext, int num_arguments); 1030 1031 // Convenience function: tail call a runtime routine (jump). 1032 void TailCallRuntime(Runtime::FunctionId fid); 1033 1034 int CalculateStackPassedWords(int num_reg_arguments, 1035 int num_double_arguments); 1036 1037 // Before calling a C-function from generated code, align arguments on stack. 1038 // After aligning the frame, non-register arguments must be stored in 1039 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments 1040 // are word sized. If double arguments are used, this function assumes that 1041 // all double arguments are stored before core registers; otherwise the 1042 // correct alignment of the double values is not guaranteed. 1043 // Some compilers/platforms require the stack to be aligned when calling 1044 // C++ code. 1045 // Needs a scratch register to do some arithmetic. This register will be 1046 // trashed. 1047 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, 1048 Register scratch); 1049 void PrepareCallCFunction(int num_reg_arguments, Register scratch); 1050 1051 // There are two ways of passing double arguments on ARM, depending on 1052 // whether soft or hard floating point ABI is used. These functions 1053 // abstract parameter passing for the three different ways we call 1054 // C functions from generated code. 1055 void MovToFloatParameter(DoubleRegister src); 1056 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); 1057 void MovToFloatResult(DoubleRegister src); 1058 1059 // Calls a C function and cleans up the space for arguments allocated 1060 // by PrepareCallCFunction. The called function is not allowed to trigger a 1061 // garbage collection, since that might move the code and invalidate the 1062 // return address (unless this is somehow accounted for by the called 1063 // function). 1064 void CallCFunction(ExternalReference function, int num_arguments); 1065 void CallCFunction(Register function, int num_arguments); 1066 void CallCFunction(ExternalReference function, int num_reg_arguments, 1067 int num_double_arguments); 1068 void CallCFunction(Register function, int num_reg_arguments, 1069 int num_double_arguments); 1070 1071 void MovFromFloatParameter(DoubleRegister dst); 1072 void MovFromFloatResult(DoubleRegister dst); 1073 1074 // Jump to a runtime routine. 1075 void JumpToExternalReference(const ExternalReference& builtin); 1076 1077 Handle<Object> CodeObject() { 1078 DCHECK(!code_object_.is_null()); 1079 return code_object_; 1080 } 1081 1082 1083 // Emit code for a truncating division by a constant. The dividend register is 1084 // unchanged and ip gets clobbered. Dividend and result must be different. 1085 void TruncatingDiv(Register result, Register dividend, int32_t divisor); 1086 1087 // --------------------------------------------------------------------------- 1088 // StatsCounter support 1089 1090 void SetCounter(StatsCounter* counter, int value, Register scratch1, 1091 Register scratch2); 1092 void IncrementCounter(StatsCounter* counter, int value, Register scratch1, 1093 Register scratch2); 1094 void DecrementCounter(StatsCounter* counter, int value, Register scratch1, 1095 Register scratch2); 1096 1097 1098 // --------------------------------------------------------------------------- 1099 // Debugging 1100 1101 // Calls Abort(msg) if the condition cond is not satisfied. 1102 // Use --debug_code to enable. 1103 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7); 1104 void AssertFastElements(Register elements); 1105 1106 // Like Assert(), but always enabled. 1107 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7); 1108 1109 // Print a message to stdout and abort execution. 1110 void Abort(BailoutReason reason); 1111 1112 // Verify restrictions about code generated in stubs. 1113 void set_generating_stub(bool value) { generating_stub_ = value; } 1114 bool generating_stub() { return generating_stub_; } 1115 void set_has_frame(bool value) { has_frame_ = value; } 1116 bool has_frame() { return has_frame_; } 1117 inline bool AllowThisStubCall(CodeStub* stub); 1118 1119 // --------------------------------------------------------------------------- 1120 // Number utilities 1121 1122 // Check whether the value of reg is a power of two and not zero. If not 1123 // control continues at the label not_power_of_two. If reg is a power of two 1124 // the register scratch contains the value of (reg - 1) when control falls 1125 // through. 1126 void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch, 1127 Label* not_power_of_two_or_zero); 1128 // Check whether the value of reg is a power of two and not zero. 1129 // Control falls through if it is, with scratch containing the mask 1130 // value (reg - 1). 1131 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is 1132 // zero or negative, or jumps to the 'not_power_of_two' label if the value is 1133 // strictly positive but not a power of two. 1134 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch, 1135 Label* zero_and_neg, 1136 Label* not_power_of_two); 1137 1138 // --------------------------------------------------------------------------- 1139 // Bit testing/extraction 1140 // 1141 // Bit numbering is such that the least significant bit is bit 0 1142 // (for consistency between 32/64-bit). 1143 1144 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src 1145 // and, if !test, shift them into the least significant bits of dst. 1146 inline void ExtractBitRange(Register dst, Register src, int rangeStart, 1147 int rangeEnd, RCBit rc = LeaveRC, 1148 bool test = false) { 1149 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer); 1150 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd; 1151 int width = rangeStart - rangeEnd + 1; 1152 if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) { 1153 // Prefer faster andi when applicable. 1154 andi(dst, src, Operand(((1 << width) - 1) << rangeEnd)); 1155 } else { 1156#if V8_TARGET_ARCH_PPC64 1157 rldicl(dst, src, rotate, kBitsPerPointer - width, rc); 1158#else 1159 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, 1160 rc); 1161#endif 1162 } 1163 } 1164 1165 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber, 1166 RCBit rc = LeaveRC, bool test = false) { 1167 ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test); 1168 } 1169 1170 // Extract consecutive bits (defined by mask) from src and place them 1171 // into the least significant bits of dst. 1172 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask, 1173 RCBit rc = LeaveRC, bool test = false) { 1174 int start = kBitsPerPointer - 1; 1175 int end; 1176 uintptr_t bit = (1L << start); 1177 1178 while (bit && (mask & bit) == 0) { 1179 start--; 1180 bit >>= 1; 1181 } 1182 end = start; 1183 bit >>= 1; 1184 1185 while (bit && (mask & bit)) { 1186 end--; 1187 bit >>= 1; 1188 } 1189 1190 // 1-bits in mask must be contiguous 1191 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0); 1192 1193 ExtractBitRange(dst, src, start, end, rc, test); 1194 } 1195 1196 // Test single bit in value. 1197 inline void TestBit(Register value, int bitNumber, Register scratch = r0) { 1198 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true); 1199 } 1200 1201 // Test consecutive bit range in value. Range is defined by 1202 // rangeStart - rangeEnd. 1203 inline void TestBitRange(Register value, int rangeStart, int rangeEnd, 1204 Register scratch = r0) { 1205 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true); 1206 } 1207 1208 // Test consecutive bit range in value. Range is defined by mask. 1209 inline void TestBitMask(Register value, uintptr_t mask, 1210 Register scratch = r0) { 1211 ExtractBitMask(scratch, value, mask, SetRC, true); 1212 } 1213 1214 1215 // --------------------------------------------------------------------------- 1216 // Smi utilities 1217 1218 // Shift left by kSmiShift 1219 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); } 1220 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) { 1221 ShiftLeftImm(dst, src, Operand(kSmiShift), rc); 1222 } 1223 1224#if !V8_TARGET_ARCH_PPC64 1225 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). 1226 void SmiTagCheckOverflow(Register reg, Register overflow); 1227 void SmiTagCheckOverflow(Register dst, Register src, Register overflow); 1228 1229 inline void JumpIfNotSmiCandidate(Register value, Register scratch, 1230 Label* not_smi_label) { 1231 // High bits must be identical to fit into an Smi 1232 STATIC_ASSERT(kSmiShift == 1); 1233 addis(scratch, value, Operand(0x40000000u >> 16)); 1234 cmpi(scratch, Operand::Zero()); 1235 blt(not_smi_label); 1236 } 1237#endif 1238 inline void TestUnsignedSmiCandidate(Register value, Register scratch) { 1239 // The test is different for unsigned int values. Since we need 1240 // the value to be in the range of a positive smi, we can't 1241 // handle any of the high bits being set in the value. 1242 TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift, 1243 scratch); 1244 } 1245 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch, 1246 Label* not_smi_label) { 1247 TestUnsignedSmiCandidate(value, scratch); 1248 bne(not_smi_label, cr0); 1249 } 1250 1251 void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); } 1252 1253 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) { 1254 ShiftRightArithImm(dst, src, kSmiShift, rc); 1255 } 1256 1257 void SmiToPtrArrayOffset(Register dst, Register src) { 1258#if V8_TARGET_ARCH_PPC64 1259 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); 1260 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2); 1261#else 1262 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2); 1263 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift)); 1264#endif 1265 } 1266 1267 void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); } 1268 1269 void SmiToShortArrayOffset(Register dst, Register src) { 1270#if V8_TARGET_ARCH_PPC64 1271 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1); 1272 ShiftRightArithImm(dst, src, kSmiShift - 1); 1273#else 1274 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1); 1275 if (!dst.is(src)) { 1276 mr(dst, src); 1277 } 1278#endif 1279 } 1280 1281 void SmiToIntArrayOffset(Register dst, Register src) { 1282#if V8_TARGET_ARCH_PPC64 1283 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2); 1284 ShiftRightArithImm(dst, src, kSmiShift - 2); 1285#else 1286 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2); 1287 ShiftLeftImm(dst, src, Operand(2 - kSmiShift)); 1288#endif 1289 } 1290 1291#define SmiToFloatArrayOffset SmiToIntArrayOffset 1292 1293 void SmiToDoubleArrayOffset(Register dst, Register src) { 1294#if V8_TARGET_ARCH_PPC64 1295 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2); 1296 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2); 1297#else 1298 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2); 1299 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift)); 1300#endif 1301 } 1302 1303 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) { 1304 if (kSmiShift < elementSizeLog2) { 1305 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift)); 1306 } else if (kSmiShift > elementSizeLog2) { 1307 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2); 1308 } else if (!dst.is(src)) { 1309 mr(dst, src); 1310 } 1311 } 1312 1313 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2, 1314 bool isSmi) { 1315 if (isSmi) { 1316 SmiToArrayOffset(dst, src, elementSizeLog2); 1317 } else { 1318 ShiftLeftImm(dst, src, Operand(elementSizeLog2)); 1319 } 1320 } 1321 1322 // Untag the source value into destination and jump if source is a smi. 1323 // Souce and destination can be the same register. 1324 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1325 1326 // Untag the source value into destination and jump if source is not a smi. 1327 // Souce and destination can be the same register. 1328 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1329 1330 inline void TestIfSmi(Register value, Register scratch) { 1331 TestBitRange(value, kSmiTagSize - 1, 0, scratch); 1332 } 1333 1334 inline void TestIfPositiveSmi(Register value, Register scratch) { 1335#if V8_TARGET_ARCH_PPC64 1336 rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC); 1337#else 1338 rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), 1339 kBitsPerPointer - 1, SetRC); 1340#endif 1341 } 1342 1343 // Jump the register contains a smi. 1344 inline void JumpIfSmi(Register value, Label* smi_label) { 1345 TestIfSmi(value, r0); 1346 beq(smi_label, cr0); // branch if SMI 1347 } 1348 // Jump if either of the registers contain a non-smi. 1349 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { 1350 TestIfSmi(value, r0); 1351 bne(not_smi_label, cr0); 1352 } 1353 // Jump if either of the registers contain a non-smi. 1354 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1355 // Jump if either of the registers contain a smi. 1356 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1357 1358 // Abort execution if argument is a number, enabled via --debug-code. 1359 void AssertNotNumber(Register object); 1360 1361 // Abort execution if argument is a smi, enabled via --debug-code. 1362 void AssertNotSmi(Register object); 1363 void AssertSmi(Register object); 1364 1365 1366#if V8_TARGET_ARCH_PPC64 1367 inline void TestIfInt32(Register value, Register scratch, 1368 CRegister cr = cr7) { 1369 // High bits must be identical to fit into an 32-bit integer 1370 extsw(scratch, value); 1371 cmp(scratch, value, cr); 1372 } 1373#else 1374 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch, 1375 CRegister cr = cr7) { 1376 // High bits must be identical to fit into an 32-bit integer 1377 srawi(scratch, lo_word, 31); 1378 cmp(scratch, hi_word, cr); 1379 } 1380#endif 1381 1382#if V8_TARGET_ARCH_PPC64 1383 // Ensure it is permissable to read/write int value directly from 1384 // upper half of the smi. 1385 STATIC_ASSERT(kSmiTag == 0); 1386 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); 1387#endif 1388#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN 1389#define SmiWordOffset(offset) (offset + kPointerSize / 2) 1390#else 1391#define SmiWordOffset(offset) offset 1392#endif 1393 1394 // Abort execution if argument is not a string, enabled via --debug-code. 1395 void AssertString(Register object); 1396 1397 // Abort execution if argument is not a name, enabled via --debug-code. 1398 void AssertName(Register object); 1399 1400 void AssertFunction(Register object); 1401 1402 // Abort execution if argument is not a JSBoundFunction, 1403 // enabled via --debug-code. 1404 void AssertBoundFunction(Register object); 1405 1406 // Abort execution if argument is not a JSGeneratorObject, 1407 // enabled via --debug-code. 1408 void AssertGeneratorObject(Register object); 1409 1410 // Abort execution if argument is not a JSReceiver, enabled via --debug-code. 1411 void AssertReceiver(Register object); 1412 1413 // Abort execution if argument is not undefined or an AllocationSite, enabled 1414 // via --debug-code. 1415 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1416 1417 // Abort execution if reg is not the root value with the given index, 1418 // enabled via --debug-code. 1419 void AssertIsRoot(Register reg, Heap::RootListIndex index); 1420 1421 // --------------------------------------------------------------------------- 1422 // HeapNumber utilities 1423 1424 void JumpIfNotHeapNumber(Register object, Register heap_number_map, 1425 Register scratch, Label* on_not_heap_number); 1426 1427 // --------------------------------------------------------------------------- 1428 // String utilities 1429 1430 // Checks if both objects are sequential one-byte strings and jumps to label 1431 // if either is not. Assumes that neither object is a smi. 1432 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1, 1433 Register object2, 1434 Register scratch1, 1435 Register scratch2, 1436 Label* failure); 1437 1438 // Checks if both objects are sequential one-byte strings and jumps to label 1439 // if either is not. 1440 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second, 1441 Register scratch1, 1442 Register scratch2, 1443 Label* not_flat_one_byte_strings); 1444 1445 // Checks if both instance types are sequential one-byte strings and jumps to 1446 // label if either is not. 1447 void JumpIfBothInstanceTypesAreNotSequentialOneByte( 1448 Register first_object_instance_type, Register second_object_instance_type, 1449 Register scratch1, Register scratch2, Label* failure); 1450 1451 // Check if instance type is sequential one-byte string and jump to label if 1452 // it is not. 1453 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, 1454 Label* failure); 1455 1456 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); 1457 1458 void EmitSeqStringSetCharCheck(Register string, Register index, 1459 Register value, uint32_t encoding_mask); 1460 1461 // --------------------------------------------------------------------------- 1462 // Patching helpers. 1463 1464 // Decode offset from constant pool load instruction(s). 1465 // Caller must place the instruction word at <location> in <result>. 1466 void DecodeConstantPoolOffset(Register result, Register location); 1467 1468 void ClampUint8(Register output_reg, Register input_reg); 1469 1470 // Saturate a value into 8-bit unsigned integer 1471 // if input_value < 0, output_value is 0 1472 // if input_value > 255, output_value is 255 1473 // otherwise output_value is the (int)input_value (round to nearest) 1474 void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg, 1475 DoubleRegister temp_double_reg); 1476 1477 1478 void LoadInstanceDescriptors(Register map, Register descriptors); 1479 void EnumLength(Register dst, Register map); 1480 void NumberOfOwnDescriptors(Register dst, Register map); 1481 void LoadAccessor(Register dst, Register holder, int accessor_index, 1482 AccessorComponent accessor); 1483 1484 template <typename Field> 1485 void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) { 1486 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift, 1487 rc); 1488 } 1489 1490 template <typename Field> 1491 void DecodeField(Register reg, RCBit rc = LeaveRC) { 1492 DecodeField<Field>(reg, reg, rc); 1493 } 1494 1495 template <typename Field> 1496 void DecodeFieldToSmi(Register dst, Register src) { 1497#if V8_TARGET_ARCH_PPC64 1498 DecodeField<Field>(dst, src); 1499 SmiTag(dst); 1500#else 1501 // 32-bit can do this in one instruction: 1502 int start = Field::kSize + kSmiShift - 1; 1503 int end = kSmiShift; 1504 int rotate = kSmiShift - Field::kShift; 1505 if (rotate < 0) { 1506 rotate += kBitsPerPointer; 1507 } 1508 rlwinm(dst, src, rotate, kBitsPerPointer - start - 1, 1509 kBitsPerPointer - end - 1); 1510#endif 1511 } 1512 1513 template <typename Field> 1514 void DecodeFieldToSmi(Register reg) { 1515 DecodeFieldToSmi<Field>(reg, reg); 1516 } 1517 1518 // Load the type feedback vector from a JavaScript frame. 1519 void EmitLoadTypeFeedbackVector(Register vector); 1520 1521 // Activation support. 1522 void EnterFrame(StackFrame::Type type, 1523 bool load_constant_pool_pointer_reg = false); 1524 // Returns the pc offset at which the frame ends. 1525 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0); 1526 1527 // Expects object in r3 and returns map with validated enum cache 1528 // in r3. Assumes that any other register can be used as a scratch. 1529 void CheckEnumCache(Label* call_runtime); 1530 1531 // AllocationMemento support. Arrays may have an associated 1532 // AllocationMemento object that can be checked for in order to pretransition 1533 // to another type. 1534 // On entry, receiver_reg should point to the array object. 1535 // scratch_reg gets clobbered. 1536 // If allocation info is present, condition flags are set to eq. 1537 void TestJSArrayForAllocationMemento(Register receiver_reg, 1538 Register scratch_reg, 1539 Register scratch2_reg, 1540 Label* no_memento_found); 1541 1542 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1543 Register scratch_reg, 1544 Register scratch2_reg, 1545 Label* memento_found) { 1546 Label no_memento_found; 1547 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg, 1548 &no_memento_found); 1549 beq(memento_found); 1550 bind(&no_memento_found); 1551 } 1552 1553 // Jumps to found label if a prototype map has dictionary elements. 1554 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1555 Register scratch1, Label* found); 1556 1557 // Loads the constant pool pointer (kConstantPoolRegister). 1558 void LoadConstantPoolPointerRegisterFromCodeTargetAddress( 1559 Register code_target_address); 1560 void LoadConstantPoolPointerRegister(); 1561 void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0); 1562 1563 void AbortConstantPoolBuilding() { 1564#ifdef DEBUG 1565 // Avoid DCHECK(!is_linked()) failure in ~Label() 1566 bind(ConstantPoolPosition()); 1567#endif 1568 } 1569 1570 private: 1571 static const int kSmiShift = kSmiTagSize + kSmiShiftSize; 1572 1573 void CallCFunctionHelper(Register function, int num_reg_arguments, 1574 int num_double_arguments); 1575 1576 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al, 1577 CRegister cr = cr7); 1578 1579 // Helper functions for generating invokes. 1580 void InvokePrologue(const ParameterCount& expected, 1581 const ParameterCount& actual, Label* done, 1582 bool* definitely_mismatches, InvokeFlag flag, 1583 const CallWrapper& call_wrapper); 1584 1585 void InitializeNewString(Register string, Register length, 1586 Heap::RootListIndex map_index, Register scratch1, 1587 Register scratch2); 1588 1589 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1590 void InNewSpace(Register object, Register scratch, 1591 Condition cond, // eq for new space, ne otherwise. 1592 Label* branch); 1593 1594 // Helper for finding the mark bits for an address. Afterwards, the 1595 // bitmap register points at the word with the mark bits and the mask 1596 // the position of the first bit. Leaves addr_reg unchanged. 1597 inline void GetMarkBits(Register addr_reg, Register bitmap_reg, 1598 Register mask_reg); 1599 1600 static const RegList kSafepointSavedRegisters; 1601 static const int kNumSafepointSavedRegisters; 1602 1603 // Compute memory operands for safepoint stack slots. 1604 static int SafepointRegisterStackIndex(int reg_code); 1605 MemOperand SafepointRegisterSlot(Register reg); 1606 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1607 1608 bool generating_stub_; 1609 bool has_frame_; 1610 // This handle will be patched with the code object on installation. 1611 Handle<Object> code_object_; 1612 1613 // Needs access to SafepointRegisterStackIndex for compiled frame 1614 // traversal. 1615 friend class StandardFrame; 1616}; 1617 1618 1619// The code patcher is used to patch (typically) small parts of code e.g. for 1620// debugging and other types of instrumentation. When using the code patcher 1621// the exact number of bytes specified must be emitted. It is not legal to emit 1622// relocation information. If any of these constraints are violated it causes 1623// an assertion to fail. 1624class CodePatcher { 1625 public: 1626 enum FlushICache { FLUSH, DONT_FLUSH }; 1627 1628 CodePatcher(Isolate* isolate, byte* address, int instructions, 1629 FlushICache flush_cache = FLUSH); 1630 ~CodePatcher(); 1631 1632 // Macro assembler to emit code. 1633 MacroAssembler* masm() { return &masm_; } 1634 1635 // Emit an instruction directly. 1636 void Emit(Instr instr); 1637 1638 // Emit the condition part of an instruction leaving the rest of the current 1639 // instruction unchanged. 1640 void EmitCondition(Condition cond); 1641 1642 private: 1643 byte* address_; // The address of the code being patched. 1644 int size_; // Number of bytes of the expected patch size. 1645 MacroAssembler masm_; // Macro assembler used to generate the code. 1646 FlushICache flush_cache_; // Whether to flush the I cache after patching. 1647}; 1648 1649 1650// ----------------------------------------------------------------------------- 1651// Static helper functions. 1652 1653inline MemOperand ContextMemOperand(Register context, int index = 0) { 1654 return MemOperand(context, Context::SlotOffset(index)); 1655} 1656 1657 1658inline MemOperand NativeContextMemOperand() { 1659 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX); 1660} 1661 1662 1663#ifdef GENERATED_CODE_COVERAGE 1664#define CODE_COVERAGE_STRINGIFY(x) #x 1665#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1666#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1667#define ACCESS_MASM(masm) \ 1668 masm->stop(__FILE_LINE__); \ 1669 masm-> 1670#else 1671#define ACCESS_MASM(masm) masm-> 1672#endif 1673} // namespace internal 1674} // namespace v8 1675 1676#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_ 1677