macro-assembler-ppc.h revision 958fae7ec3f466955f8e5b50fa5b8d38b9e91675
1// Copyright 2014 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_ 6#define V8_PPC_MACRO_ASSEMBLER_PPC_H_ 7 8#include "src/assembler.h" 9#include "src/bailout-reason.h" 10#include "src/frames.h" 11#include "src/globals.h" 12 13namespace v8 { 14namespace internal { 15 16// ---------------------------------------------------------------------------- 17// Static helper functions 18 19// Generate a MemOperand for loading a field from an object. 20inline MemOperand FieldMemOperand(Register object, int offset) { 21 return MemOperand(object, offset - kHeapObjectTag); 22} 23 24 25// Flags used for AllocateHeapNumber 26enum TaggingMode { 27 // Tag the result. 28 TAG_RESULT, 29 // Don't tag 30 DONT_TAG_RESULT 31}; 32 33 34enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 35enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 36enum PointersToHereCheck { 37 kPointersToHereMaybeInteresting, 38 kPointersToHereAreAlwaysInteresting 39}; 40enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; 41 42 43Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, 44 Register reg3 = no_reg, 45 Register reg4 = no_reg, 46 Register reg5 = no_reg, 47 Register reg6 = no_reg); 48 49 50#ifdef DEBUG 51bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg, 52 Register reg4 = no_reg, Register reg5 = no_reg, 53 Register reg6 = no_reg, Register reg7 = no_reg, 54 Register reg8 = no_reg); 55#endif 56 57// These exist to provide portability between 32 and 64bit 58#if V8_TARGET_ARCH_PPC64 59#define LoadPU ldu 60#define LoadPX ldx 61#define LoadPUX ldux 62#define StorePU stdu 63#define StorePX stdx 64#define StorePUX stdux 65#define ShiftLeftImm sldi 66#define ShiftRightImm srdi 67#define ClearLeftImm clrldi 68#define ClearRightImm clrrdi 69#define ShiftRightArithImm sradi 70#define ShiftLeft_ sld 71#define ShiftRight_ srd 72#define ShiftRightArith srad 73#define Mul mulld 74#define Div divd 75#else 76#define LoadPU lwzu 77#define LoadPX lwzx 78#define LoadPUX lwzux 79#define StorePU stwu 80#define StorePX stwx 81#define StorePUX stwux 82#define ShiftLeftImm slwi 83#define ShiftRightImm srwi 84#define ClearLeftImm clrlwi 85#define ClearRightImm clrrwi 86#define ShiftRightArithImm srawi 87#define ShiftLeft_ slw 88#define ShiftRight_ srw 89#define ShiftRightArith sraw 90#define Mul mullw 91#define Div divw 92#endif 93 94 95// MacroAssembler implements a collection of frequently used macros. 96class MacroAssembler : public Assembler { 97 public: 98 // The isolate parameter can be NULL if the macro assembler should 99 // not use isolate-dependent functionality. In this case, it's the 100 // responsibility of the caller to never invoke such function on the 101 // macro assembler. 102 MacroAssembler(Isolate* isolate, void* buffer, int size); 103 104 105 // Returns the size of a call in instructions. Note, the value returned is 106 // only valid as long as no entries are added to the constant pool between 107 // checking the call size and emitting the actual call. 108 static int CallSize(Register target); 109 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); 110 static int CallSizeNotPredictableCodeSize(Address target, 111 RelocInfo::Mode rmode, 112 Condition cond = al); 113 114 // Jump, Call, and Ret pseudo instructions implementing inter-working. 115 void Jump(Register target); 116 void JumpToJSEntry(Register target); 117 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al, 118 CRegister cr = cr7); 119 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 120 void Call(Register target); 121 void CallJSEntry(Register target); 122 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al); 123 int CallSize(Handle<Code> code, 124 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 125 TypeFeedbackId ast_id = TypeFeedbackId::None(), 126 Condition cond = al); 127 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 128 TypeFeedbackId ast_id = TypeFeedbackId::None(), 129 Condition cond = al); 130 void Ret(Condition cond = al); 131 132 // Emit code to discard a non-negative number of pointer-sized elements 133 // from the stack, clobbering only the sp register. 134 void Drop(int count, Condition cond = al); 135 136 void Ret(int drop, Condition cond = al); 137 138 void Call(Label* target); 139 140 // Emit call to the code we are currently generating. 141 void CallSelf() { 142 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location())); 143 Call(self, RelocInfo::CODE_TARGET); 144 } 145 146 // Register move. May do nothing if the registers are identical. 147 void Move(Register dst, Handle<Object> value); 148 void Move(Register dst, Register src, Condition cond = al); 149 void Move(DoubleRegister dst, DoubleRegister src); 150 151 void MultiPush(RegList regs); 152 void MultiPop(RegList regs); 153 154 // Load an object from the root table. 155 void LoadRoot(Register destination, Heap::RootListIndex index, 156 Condition cond = al); 157 // Store an object to the root table. 158 void StoreRoot(Register source, Heap::RootListIndex index, 159 Condition cond = al); 160 161 // --------------------------------------------------------------------------- 162 // GC Support 163 164 void IncrementalMarkingRecordWriteHelper(Register object, Register value, 165 Register address); 166 167 enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd }; 168 169 // Record in the remembered set the fact that we have a pointer to new space 170 // at the address pointed to by the addr register. Only works if addr is not 171 // in new space. 172 void RememberedSetHelper(Register object, // Used for debug code. 173 Register addr, Register scratch, 174 SaveFPRegsMode save_fp, 175 RememberedSetFinalAction and_then); 176 177 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, 178 Label* condition_met); 179 180 void CheckMapDeprecated(Handle<Map> map, Register scratch, 181 Label* if_deprecated); 182 183 // Check if object is in new space. Jumps if the object is not in new space. 184 // The register scratch can be object itself, but scratch will be clobbered. 185 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) { 186 InNewSpace(object, scratch, ne, branch); 187 } 188 189 // Check if object is in new space. Jumps if the object is in new space. 190 // The register scratch can be object itself, but it will be clobbered. 191 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) { 192 InNewSpace(object, scratch, eq, branch); 193 } 194 195 // Check if an object has a given incremental marking color. 196 void HasColor(Register object, Register scratch0, Register scratch1, 197 Label* has_color, int first_bit, int second_bit); 198 199 void JumpIfBlack(Register object, Register scratch0, Register scratch1, 200 Label* on_black); 201 202 // Checks the color of an object. If the object is already grey or black 203 // then we just fall through, since it is already live. If it is white and 204 // we can determine that it doesn't need to be scanned, then we just mark it 205 // black and fall through. For the rest we jump to the label so the 206 // incremental marker can fix its assumptions. 207 void EnsureNotWhite(Register object, Register scratch1, Register scratch2, 208 Register scratch3, Label* object_is_white_and_not_data); 209 210 // Detects conservatively whether an object is data-only, i.e. it does need to 211 // be scanned by the garbage collector. 212 void JumpIfDataObject(Register value, Register scratch, 213 Label* not_data_object); 214 215 // Notify the garbage collector that we wrote a pointer into an object. 216 // |object| is the object being stored into, |value| is the object being 217 // stored. value and scratch registers are clobbered by the operation. 218 // The offset is the offset from the start of the object, not the offset from 219 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). 220 void RecordWriteField( 221 Register object, int offset, Register value, Register scratch, 222 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, 223 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 224 SmiCheck smi_check = INLINE_SMI_CHECK, 225 PointersToHereCheck pointers_to_here_check_for_value = 226 kPointersToHereMaybeInteresting); 227 228 // As above, but the offset has the tag presubtracted. For use with 229 // MemOperand(reg, off). 230 inline void RecordWriteContextSlot( 231 Register context, int offset, Register value, Register scratch, 232 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, 233 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 234 SmiCheck smi_check = INLINE_SMI_CHECK, 235 PointersToHereCheck pointers_to_here_check_for_value = 236 kPointersToHereMaybeInteresting) { 237 RecordWriteField(context, offset + kHeapObjectTag, value, scratch, 238 lr_status, save_fp, remembered_set_action, smi_check, 239 pointers_to_here_check_for_value); 240 } 241 242 void RecordWriteForMap(Register object, Register map, Register dst, 243 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp); 244 245 // For a given |object| notify the garbage collector that the slot |address| 246 // has been written. |value| is the object being stored. The value and 247 // address registers are clobbered by the operation. 248 void RecordWrite( 249 Register object, Register address, Register value, 250 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, 251 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 252 SmiCheck smi_check = INLINE_SMI_CHECK, 253 PointersToHereCheck pointers_to_here_check_for_value = 254 kPointersToHereMaybeInteresting); 255 256 void Push(Register src) { push(src); } 257 258 // Push a handle. 259 void Push(Handle<Object> handle); 260 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 261 262 // Push two registers. Pushes leftmost register first (to highest address). 263 void Push(Register src1, Register src2) { 264 StorePU(src2, MemOperand(sp, -2 * kPointerSize)); 265 StoreP(src1, MemOperand(sp, kPointerSize)); 266 } 267 268 // Push three registers. Pushes leftmost register first (to highest address). 269 void Push(Register src1, Register src2, Register src3) { 270 StorePU(src3, MemOperand(sp, -3 * kPointerSize)); 271 StoreP(src2, MemOperand(sp, kPointerSize)); 272 StoreP(src1, MemOperand(sp, 2 * kPointerSize)); 273 } 274 275 // Push four registers. Pushes leftmost register first (to highest address). 276 void Push(Register src1, Register src2, Register src3, Register src4) { 277 StorePU(src4, MemOperand(sp, -4 * kPointerSize)); 278 StoreP(src3, MemOperand(sp, kPointerSize)); 279 StoreP(src2, MemOperand(sp, 2 * kPointerSize)); 280 StoreP(src1, MemOperand(sp, 3 * kPointerSize)); 281 } 282 283 // Push five registers. Pushes leftmost register first (to highest address). 284 void Push(Register src1, Register src2, Register src3, Register src4, 285 Register src5) { 286 StorePU(src5, MemOperand(sp, -5 * kPointerSize)); 287 StoreP(src4, MemOperand(sp, kPointerSize)); 288 StoreP(src3, MemOperand(sp, 2 * kPointerSize)); 289 StoreP(src2, MemOperand(sp, 3 * kPointerSize)); 290 StoreP(src1, MemOperand(sp, 4 * kPointerSize)); 291 } 292 293 void Pop(Register dst) { pop(dst); } 294 295 // Pop two registers. Pops rightmost register first (from lower address). 296 void Pop(Register src1, Register src2) { 297 LoadP(src2, MemOperand(sp, 0)); 298 LoadP(src1, MemOperand(sp, kPointerSize)); 299 addi(sp, sp, Operand(2 * kPointerSize)); 300 } 301 302 // Pop three registers. Pops rightmost register first (from lower address). 303 void Pop(Register src1, Register src2, Register src3) { 304 LoadP(src3, MemOperand(sp, 0)); 305 LoadP(src2, MemOperand(sp, kPointerSize)); 306 LoadP(src1, MemOperand(sp, 2 * kPointerSize)); 307 addi(sp, sp, Operand(3 * kPointerSize)); 308 } 309 310 // Pop four registers. Pops rightmost register first (from lower address). 311 void Pop(Register src1, Register src2, Register src3, Register src4) { 312 LoadP(src4, MemOperand(sp, 0)); 313 LoadP(src3, MemOperand(sp, kPointerSize)); 314 LoadP(src2, MemOperand(sp, 2 * kPointerSize)); 315 LoadP(src1, MemOperand(sp, 3 * kPointerSize)); 316 addi(sp, sp, Operand(4 * kPointerSize)); 317 } 318 319 // Pop five registers. Pops rightmost register first (from lower address). 320 void Pop(Register src1, Register src2, Register src3, Register src4, 321 Register src5) { 322 LoadP(src5, MemOperand(sp, 0)); 323 LoadP(src4, MemOperand(sp, kPointerSize)); 324 LoadP(src3, MemOperand(sp, 2 * kPointerSize)); 325 LoadP(src2, MemOperand(sp, 3 * kPointerSize)); 326 LoadP(src1, MemOperand(sp, 4 * kPointerSize)); 327 addi(sp, sp, Operand(5 * kPointerSize)); 328 } 329 330 // Push a fixed frame, consisting of lr, fp, context and 331 // JS function / marker id if marker_reg is a valid register. 332 void PushFixedFrame(Register marker_reg = no_reg); 333 void PopFixedFrame(Register marker_reg = no_reg); 334 335 // Push and pop the registers that can hold pointers, as defined by the 336 // RegList constant kSafepointSavedRegisters. 337 void PushSafepointRegisters(); 338 void PopSafepointRegisters(); 339 // Store value in register src in the safepoint stack slot for 340 // register dst. 341 void StoreToSafepointRegisterSlot(Register src, Register dst); 342 // Load the value of the src register from its safepoint stack slot 343 // into register dst. 344 void LoadFromSafepointRegisterSlot(Register dst, Register src); 345 346 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache 347 // from C. 348 // Does not handle errors. 349 void FlushICache(Register address, size_t size, Register scratch); 350 351 // If the value is a NaN, canonicalize the value else, do nothing. 352 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); 353 void CanonicalizeNaN(const DoubleRegister value) { 354 CanonicalizeNaN(value, value); 355 } 356 357 // Converts the integer (untagged smi) in |src| to a double, storing 358 // the result to |double_dst| 359 void ConvertIntToDouble(Register src, DoubleRegister double_dst); 360 361 // Converts the unsigned integer (untagged smi) in |src| to 362 // a double, storing the result to |double_dst| 363 void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst); 364 365 // Converts the integer (untagged smi) in |src| to 366 // a float, storing the result in |dst| 367 // Warning: The value in |int_scrach| will be changed in the process! 368 void ConvertIntToFloat(const DoubleRegister dst, const Register src, 369 const Register int_scratch); 370 371 // Converts the double_input to an integer. Note that, upon return, 372 // the contents of double_dst will also hold the fixed point representation. 373 void ConvertDoubleToInt64(const DoubleRegister double_input, 374#if !V8_TARGET_ARCH_PPC64 375 const Register dst_hi, 376#endif 377 const Register dst, const DoubleRegister double_dst, 378 FPRoundingMode rounding_mode = kRoundToZero); 379 380 // Generates function and stub prologue code. 381 void StubPrologue(int prologue_offset = 0); 382 void Prologue(bool code_pre_aging, int prologue_offset = 0); 383 384 // Enter exit frame. 385 // stack_space - extra stack space, used for alignment before call to C. 386 void EnterExitFrame(bool save_doubles, int stack_space = 0); 387 388 // Leave the current exit frame. Expects the return value in r0. 389 // Expect the number of values, pushed prior to the exit frame, to 390 // remove in a register (or no_reg, if there is nothing to remove). 391 void LeaveExitFrame(bool save_doubles, Register argument_count, 392 bool restore_context); 393 394 // Get the actual activation frame alignment for target environment. 395 static int ActivationFrameAlignment(); 396 397 void LoadContext(Register dst, int context_chain_length); 398 399 // Conditionally load the cached Array transitioned map of type 400 // transitioned_kind from the native context if the map in register 401 // map_in_out is the cached Array map in the native context of 402 // expected_kind. 403 void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, 404 ElementsKind transitioned_kind, 405 Register map_in_out, 406 Register scratch, 407 Label* no_map_match); 408 409 void LoadGlobalFunction(int index, Register function); 410 411 // Load the initial map from the global function. The registers 412 // function and map can be the same, function is then overwritten. 413 void LoadGlobalFunctionInitialMap(Register function, Register map, 414 Register scratch); 415 416 void InitializeRootRegister() { 417 ExternalReference roots_array_start = 418 ExternalReference::roots_array_start(isolate()); 419 mov(kRootRegister, Operand(roots_array_start)); 420 } 421 422 // ---------------------------------------------------------------- 423 // new PPC macro-assembler interfaces that are slightly higher level 424 // than assembler-ppc and may generate variable length sequences 425 426 // load a literal signed int value <value> to GPR <dst> 427 void LoadIntLiteral(Register dst, int value); 428 429 // load an SMI value <value> to GPR <dst> 430 void LoadSmiLiteral(Register dst, Smi* smi); 431 432 // load a literal double value <value> to FPR <result> 433 void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch); 434 435 void LoadWord(Register dst, const MemOperand& mem, Register scratch); 436 437 void LoadWordArith(Register dst, const MemOperand& mem, 438 Register scratch = no_reg); 439 440 void StoreWord(Register src, const MemOperand& mem, Register scratch); 441 442 void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch); 443 444 void StoreHalfWord(Register src, const MemOperand& mem, Register scratch); 445 446 void LoadByte(Register dst, const MemOperand& mem, Register scratch); 447 448 void StoreByte(Register src, const MemOperand& mem, Register scratch); 449 450 void LoadRepresentation(Register dst, const MemOperand& mem, Representation r, 451 Register scratch = no_reg); 452 453 void StoreRepresentation(Register src, const MemOperand& mem, 454 Representation r, Register scratch = no_reg); 455 456 // Move values between integer and floating point registers. 457 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch); 458 void MovUnsignedIntToDouble(DoubleRegister dst, Register src, 459 Register scratch); 460 void MovInt64ToDouble(DoubleRegister dst, 461#if !V8_TARGET_ARCH_PPC64 462 Register src_hi, 463#endif 464 Register src); 465#if V8_TARGET_ARCH_PPC64 466 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi, 467 Register src_lo, Register scratch); 468#endif 469 void MovDoubleLowToInt(Register dst, DoubleRegister src); 470 void MovDoubleHighToInt(Register dst, DoubleRegister src); 471 void MovDoubleToInt64( 472#if !V8_TARGET_ARCH_PPC64 473 Register dst_hi, 474#endif 475 Register dst, DoubleRegister src); 476 477 void Add(Register dst, Register src, intptr_t value, Register scratch); 478 void Cmpi(Register src1, const Operand& src2, Register scratch, 479 CRegister cr = cr7); 480 void Cmpli(Register src1, const Operand& src2, Register scratch, 481 CRegister cr = cr7); 482 void Cmpwi(Register src1, const Operand& src2, Register scratch, 483 CRegister cr = cr7); 484 void Cmplwi(Register src1, const Operand& src2, Register scratch, 485 CRegister cr = cr7); 486 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); 487 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); 488 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); 489 490 void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch); 491 void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch); 492 void CmpSmiLiteral(Register src1, Smi* smi, Register scratch, 493 CRegister cr = cr7); 494 void CmplSmiLiteral(Register src1, Smi* smi, Register scratch, 495 CRegister cr = cr7); 496 void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch, 497 RCBit rc = LeaveRC); 498 499 // Set new rounding mode RN to FPSCR 500 void SetRoundingMode(FPRoundingMode RN); 501 502 // reset rounding mode to default (kRoundToNearest) 503 void ResetRoundingMode(); 504 505 // These exist to provide portability between 32 and 64bit 506 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg); 507 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg); 508 509 // --------------------------------------------------------------------------- 510 // JavaScript invokes 511 512 // Invoke the JavaScript function code by either calling or jumping. 513 void InvokeCode(Register code, const ParameterCount& expected, 514 const ParameterCount& actual, InvokeFlag flag, 515 const CallWrapper& call_wrapper); 516 517 // Invoke the JavaScript function in the given register. Changes the 518 // current context to the context in the function before invoking. 519 void InvokeFunction(Register function, const ParameterCount& actual, 520 InvokeFlag flag, const CallWrapper& call_wrapper); 521 522 void InvokeFunction(Register function, const ParameterCount& expected, 523 const ParameterCount& actual, InvokeFlag flag, 524 const CallWrapper& call_wrapper); 525 526 void InvokeFunction(Handle<JSFunction> function, 527 const ParameterCount& expected, 528 const ParameterCount& actual, InvokeFlag flag, 529 const CallWrapper& call_wrapper); 530 531 void IsObjectJSObjectType(Register heap_object, Register map, 532 Register scratch, Label* fail); 533 534 void IsInstanceJSObjectType(Register map, Register scratch, Label* fail); 535 536 void IsObjectJSStringType(Register object, Register scratch, Label* fail); 537 538 void IsObjectNameType(Register object, Register scratch, Label* fail); 539 540 // --------------------------------------------------------------------------- 541 // Debugger Support 542 543 void DebugBreak(); 544 545 // --------------------------------------------------------------------------- 546 // Exception handling 547 548 // Push a new try handler and link into try handler chain. 549 void PushTryHandler(StackHandler::Kind kind, int handler_index); 550 551 // Unlink the stack handler on top of the stack from the try handler chain. 552 // Must preserve the result register. 553 void PopTryHandler(); 554 555 // Passes thrown value to the handler of top of the try handler chain. 556 void Throw(Register value); 557 558 // Propagates an uncatchable exception to the top of the current JS stack's 559 // handler chain. 560 void ThrowUncatchable(Register value); 561 562 // --------------------------------------------------------------------------- 563 // Inline caching support 564 565 // Generate code for checking access rights - used for security checks 566 // on access to global objects across environments. The holder register 567 // is left untouched, whereas both scratch registers are clobbered. 568 void CheckAccessGlobalProxy(Register holder_reg, Register scratch, 569 Label* miss); 570 571 void GetNumberHash(Register t0, Register scratch); 572 573 void LoadFromNumberDictionary(Label* miss, Register elements, Register key, 574 Register result, Register t0, Register t1, 575 Register t2); 576 577 578 inline void MarkCode(NopMarkerTypes type) { nop(type); } 579 580 // Check if the given instruction is a 'type' marker. 581 // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type)) 582 // These instructions are generated to mark special location in the code, 583 // like some special IC code. 584 static inline bool IsMarkedCode(Instr instr, int type) { 585 DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)); 586 return IsNop(instr, type); 587 } 588 589 590 static inline int GetCodeMarker(Instr instr) { 591 int dst_reg_offset = 12; 592 int dst_mask = 0xf << dst_reg_offset; 593 int src_mask = 0xf; 594 int dst_reg = (instr & dst_mask) >> dst_reg_offset; 595 int src_reg = instr & src_mask; 596 uint32_t non_register_mask = ~(dst_mask | src_mask); 597 uint32_t mov_mask = al | 13 << 21; 598 599 // Return <n> if we have a mov rn rn, else return -1. 600 int type = ((instr & non_register_mask) == mov_mask) && 601 (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) && 602 (dst_reg < LAST_CODE_MARKER) 603 ? src_reg 604 : -1; 605 DCHECK((type == -1) || 606 ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER))); 607 return type; 608 } 609 610 611 // --------------------------------------------------------------------------- 612 // Allocation support 613 614 // Allocate an object in new space or old pointer space. The object_size is 615 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS 616 // is passed. If the space is exhausted control continues at the gc_required 617 // label. The allocated object is returned in result. If the flag 618 // tag_allocated_object is true the result is tagged as as a heap object. 619 // All registers are clobbered also when control continues at the gc_required 620 // label. 621 void Allocate(int object_size, Register result, Register scratch1, 622 Register scratch2, Label* gc_required, AllocationFlags flags); 623 624 void Allocate(Register object_size, Register result, Register scratch1, 625 Register scratch2, Label* gc_required, AllocationFlags flags); 626 627 // Undo allocation in new space. The object passed and objects allocated after 628 // it will no longer be allocated. The caller must make sure that no pointers 629 // are left to the object(s) no longer allocated as they would be invalid when 630 // allocation is undone. 631 void UndoAllocationInNewSpace(Register object, Register scratch); 632 633 634 void AllocateTwoByteString(Register result, Register length, 635 Register scratch1, Register scratch2, 636 Register scratch3, Label* gc_required); 637 void AllocateOneByteString(Register result, Register length, 638 Register scratch1, Register scratch2, 639 Register scratch3, Label* gc_required); 640 void AllocateTwoByteConsString(Register result, Register length, 641 Register scratch1, Register scratch2, 642 Label* gc_required); 643 void AllocateOneByteConsString(Register result, Register length, 644 Register scratch1, Register scratch2, 645 Label* gc_required); 646 void AllocateTwoByteSlicedString(Register result, Register length, 647 Register scratch1, Register scratch2, 648 Label* gc_required); 649 void AllocateOneByteSlicedString(Register result, Register length, 650 Register scratch1, Register scratch2, 651 Label* gc_required); 652 653 // Allocates a heap number or jumps to the gc_required label if the young 654 // space is full and a scavenge is needed. All registers are clobbered also 655 // when control continues at the gc_required label. 656 void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, 657 Register heap_number_map, Label* gc_required, 658 TaggingMode tagging_mode = TAG_RESULT, 659 MutableMode mode = IMMUTABLE); 660 void AllocateHeapNumberWithValue(Register result, DoubleRegister value, 661 Register scratch1, Register scratch2, 662 Register heap_number_map, 663 Label* gc_required); 664 665 // Copies a fixed number of fields of heap objects from src to dst. 666 void CopyFields(Register dst, Register src, RegList temps, int field_count); 667 668 // Copies a number of bytes from src to dst. All registers are clobbered. On 669 // exit src and dst will point to the place just after where the last byte was 670 // read or written and length will be zero. 671 void CopyBytes(Register src, Register dst, Register length, Register scratch); 672 673 // Initialize fields with filler values. |count| fields starting at 674 // |start_offset| are overwritten with the value in |filler|. At the end the 675 // loop, |start_offset| points at the next uninitialized field. |count| is 676 // assumed to be non-zero. 677 void InitializeNFieldsWithFiller(Register start_offset, Register count, 678 Register filler); 679 680 // Initialize fields with filler values. Fields starting at |start_offset| 681 // not including end_offset are overwritten with the value in |filler|. At 682 // the end the loop, |start_offset| takes the value of |end_offset|. 683 void InitializeFieldsWithFiller(Register start_offset, Register end_offset, 684 Register filler); 685 686 // --------------------------------------------------------------------------- 687 // Support functions. 688 689 // Try to get function prototype of a function and puts the value in 690 // the result register. Checks that the function really is a 691 // function and jumps to the miss label if the fast checks fail. The 692 // function register will be untouched; the other registers may be 693 // clobbered. 694 void TryGetFunctionPrototype(Register function, Register result, 695 Register scratch, Label* miss, 696 bool miss_on_bound_function = false); 697 698 // Compare object type for heap object. heap_object contains a non-Smi 699 // whose object type should be compared with the given type. This both 700 // sets the flags and leaves the object type in the type_reg register. 701 // It leaves the map in the map register (unless the type_reg and map register 702 // are the same register). It leaves the heap object in the heap_object 703 // register unless the heap_object register is the same register as one of the 704 // other registers. 705 // Type_reg can be no_reg. In that case ip is used. 706 void CompareObjectType(Register heap_object, Register map, Register type_reg, 707 InstanceType type); 708 709 // Compare object type for heap object. Branch to false_label if type 710 // is lower than min_type or greater than max_type. 711 // Load map into the register map. 712 void CheckObjectTypeRange(Register heap_object, Register map, 713 InstanceType min_type, InstanceType max_type, 714 Label* false_label); 715 716 // Compare instance type in a map. map contains a valid map object whose 717 // object type should be compared with the given type. This both 718 // sets the flags and leaves the object type in the type_reg register. 719 void CompareInstanceType(Register map, Register type_reg, InstanceType type); 720 721 722 // Check if a map for a JSObject indicates that the object has fast elements. 723 // Jump to the specified label if it does not. 724 void CheckFastElements(Register map, Register scratch, Label* fail); 725 726 // Check if a map for a JSObject indicates that the object can have both smi 727 // and HeapObject elements. Jump to the specified label if it does not. 728 void CheckFastObjectElements(Register map, Register scratch, Label* fail); 729 730 // Check if a map for a JSObject indicates that the object has fast smi only 731 // elements. Jump to the specified label if it does not. 732 void CheckFastSmiElements(Register map, Register scratch, Label* fail); 733 734 // Check to see if maybe_number can be stored as a double in 735 // FastDoubleElements. If it can, store it at the index specified by key in 736 // the FastDoubleElements array elements. Otherwise jump to fail. 737 void StoreNumberToDoubleElements(Register value_reg, Register key_reg, 738 Register elements_reg, Register scratch1, 739 DoubleRegister double_scratch, Label* fail, 740 int elements_offset = 0); 741 742 // Compare an object's map with the specified map and its transitioned 743 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are 744 // set with result of map compare. If multiple map compares are required, the 745 // compare sequences branches to early_success. 746 void CompareMap(Register obj, Register scratch, Handle<Map> map, 747 Label* early_success); 748 749 // As above, but the map of the object is already loaded into the register 750 // which is preserved by the code generated. 751 void CompareMap(Register obj_map, Handle<Map> map, Label* early_success); 752 753 // Check if the map of an object is equal to a specified map and branch to 754 // label if not. Skip the smi check if not required (object is known to be a 755 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 756 // against maps that are ElementsKind transition maps of the specified map. 757 void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail, 758 SmiCheckType smi_check_type); 759 760 761 void CheckMap(Register obj, Register scratch, Heap::RootListIndex index, 762 Label* fail, SmiCheckType smi_check_type); 763 764 765 // Check if the map of an object is equal to a specified map and branch to a 766 // specified target if equal. Skip the smi check if not required (object is 767 // known to be a heap object) 768 void DispatchMap(Register obj, Register scratch, Handle<Map> map, 769 Handle<Code> success, SmiCheckType smi_check_type); 770 771 772 // Compare the object in a register to a value from the root list. 773 // Uses the ip register as scratch. 774 void CompareRoot(Register obj, Heap::RootListIndex index); 775 776 777 // Load and check the instance type of an object for being a string. 778 // Loads the type into the second argument register. 779 // Returns a condition that will be enabled if the object was a string. 780 Condition IsObjectStringType(Register obj, Register type) { 781 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset)); 782 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); 783 andi(r0, type, Operand(kIsNotStringMask)); 784 DCHECK_EQ(0, kStringTag); 785 return eq; 786 } 787 788 789 // Picks out an array index from the hash field. 790 // Register use: 791 // hash - holds the index's hash. Clobbered. 792 // index - holds the overwritten index on exit. 793 void IndexFromHash(Register hash, Register index); 794 795 // Get the number of least significant bits from a register 796 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 797 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 798 799 // Load the value of a smi object into a double register. 800 void SmiToDouble(DoubleRegister value, Register smi); 801 802 // Check if a double can be exactly represented as a signed 32-bit integer. 803 // CR_EQ in cr7 is set if true. 804 void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1, 805 Register scratch2, DoubleRegister double_scratch); 806 807 // Try to convert a double to a signed 32-bit integer. 808 // CR_EQ in cr7 is set and result assigned if the conversion is exact. 809 void TryDoubleToInt32Exact(Register result, DoubleRegister double_input, 810 Register scratch, DoubleRegister double_scratch); 811 812 // Floor a double and writes the value to the result register. 813 // Go to exact if the conversion is exact (to be able to test -0), 814 // fall through calling code if an overflow occurred, else go to done. 815 // In return, input_high is loaded with high bits of input. 816 void TryInt32Floor(Register result, DoubleRegister double_input, 817 Register input_high, Register scratch, 818 DoubleRegister double_scratch, Label* done, Label* exact); 819 820 // Performs a truncating conversion of a floating point number as used by 821 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 822 // succeeds, otherwise falls through if result is saturated. On return 823 // 'result' either holds answer, or is clobbered on fall through. 824 // 825 // Only public for the test code in test-code-stubs-arm.cc. 826 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, 827 Label* done); 828 829 // Performs a truncating conversion of a floating point number as used by 830 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 831 // Exits with 'result' holding the answer. 832 void TruncateDoubleToI(Register result, DoubleRegister double_input); 833 834 // Performs a truncating conversion of a heap number as used by 835 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 836 // must be different registers. Exits with 'result' holding the answer. 837 void TruncateHeapNumberToI(Register result, Register object); 838 839 // Converts the smi or heap number in object to an int32 using the rules 840 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 841 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 842 // different registers. 843 void TruncateNumberToI(Register object, Register result, 844 Register heap_number_map, Register scratch1, 845 Label* not_int32); 846 847 // Overflow handling functions. 848 // Usage: call the appropriate arithmetic function and then call one of the 849 // flow control functions with the corresponding label. 850 851 // Compute dst = left + right, setting condition codes. dst may be same as 852 // either left or right (or a unique register). left and right must not be 853 // the same register. 854 void AddAndCheckForOverflow(Register dst, Register left, Register right, 855 Register overflow_dst, Register scratch = r0); 856 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right, 857 Register overflow_dst, Register scratch = r0); 858 859 // Compute dst = left - right, setting condition codes. dst may be same as 860 // either left or right (or a unique register). left and right must not be 861 // the same register. 862 void SubAndCheckForOverflow(Register dst, Register left, Register right, 863 Register overflow_dst, Register scratch = r0); 864 865 void BranchOnOverflow(Label* label) { blt(label, cr0); } 866 867 void BranchOnNoOverflow(Label* label) { bge(label, cr0); } 868 869 void RetOnOverflow(void) { 870 Label label; 871 872 blt(&label, cr0); 873 Ret(); 874 bind(&label); 875 } 876 877 void RetOnNoOverflow(void) { 878 Label label; 879 880 bge(&label, cr0); 881 Ret(); 882 bind(&label); 883 } 884 885 // Pushes <count> double values to <location>, starting from d<first>. 886 void SaveFPRegs(Register location, int first, int count); 887 888 // Pops <count> double values from <location>, starting from d<first>. 889 void RestoreFPRegs(Register location, int first, int count); 890 891 // --------------------------------------------------------------------------- 892 // Runtime calls 893 894 // Call a code stub. 895 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(), 896 Condition cond = al); 897 898 // Call a code stub. 899 void TailCallStub(CodeStub* stub, Condition cond = al); 900 901 // Call a runtime routine. 902 void CallRuntime(const Runtime::Function* f, int num_arguments, 903 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 904 void CallRuntimeSaveDoubles(Runtime::FunctionId id) { 905 const Runtime::Function* function = Runtime::FunctionForId(id); 906 CallRuntime(function, function->nargs, kSaveFPRegs); 907 } 908 909 // Convenience function: Same as above, but takes the fid instead. 910 void CallRuntime(Runtime::FunctionId id, int num_arguments, 911 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 912 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); 913 } 914 915 // Convenience function: call an external reference. 916 void CallExternalReference(const ExternalReference& ext, int num_arguments); 917 918 // Tail call of a runtime routine (jump). 919 // Like JumpToExternalReference, but also takes care of passing the number 920 // of parameters. 921 void TailCallExternalReference(const ExternalReference& ext, 922 int num_arguments, int result_size); 923 924 // Convenience function: tail call a runtime routine (jump). 925 void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, 926 int result_size); 927 928 int CalculateStackPassedWords(int num_reg_arguments, 929 int num_double_arguments); 930 931 // Before calling a C-function from generated code, align arguments on stack. 932 // After aligning the frame, non-register arguments must be stored in 933 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments 934 // are word sized. If double arguments are used, this function assumes that 935 // all double arguments are stored before core registers; otherwise the 936 // correct alignment of the double values is not guaranteed. 937 // Some compilers/platforms require the stack to be aligned when calling 938 // C++ code. 939 // Needs a scratch register to do some arithmetic. This register will be 940 // trashed. 941 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, 942 Register scratch); 943 void PrepareCallCFunction(int num_reg_arguments, Register scratch); 944 945 // There are two ways of passing double arguments on ARM, depending on 946 // whether soft or hard floating point ABI is used. These functions 947 // abstract parameter passing for the three different ways we call 948 // C functions from generated code. 949 void MovToFloatParameter(DoubleRegister src); 950 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); 951 void MovToFloatResult(DoubleRegister src); 952 953 // Calls a C function and cleans up the space for arguments allocated 954 // by PrepareCallCFunction. The called function is not allowed to trigger a 955 // garbage collection, since that might move the code and invalidate the 956 // return address (unless this is somehow accounted for by the called 957 // function). 958 void CallCFunction(ExternalReference function, int num_arguments); 959 void CallCFunction(Register function, int num_arguments); 960 void CallCFunction(ExternalReference function, int num_reg_arguments, 961 int num_double_arguments); 962 void CallCFunction(Register function, int num_reg_arguments, 963 int num_double_arguments); 964 965 void MovFromFloatParameter(DoubleRegister dst); 966 void MovFromFloatResult(DoubleRegister dst); 967 968 // Calls an API function. Allocates HandleScope, extracts returned value 969 // from handle and propagates exceptions. Restores context. stack_space 970 // - space to be unwound on exit (includes the call JS arguments space and 971 // the additional space allocated for the fast call). 972 void CallApiFunctionAndReturn(Register function_address, 973 ExternalReference thunk_ref, int stack_space, 974 MemOperand return_value_operand, 975 MemOperand* context_restore_operand); 976 977 // Jump to a runtime routine. 978 void JumpToExternalReference(const ExternalReference& builtin); 979 980 // Invoke specified builtin JavaScript function. Adds an entry to 981 // the unresolved list if the name does not resolve. 982 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, 983 const CallWrapper& call_wrapper = NullCallWrapper()); 984 985 // Store the code object for the given builtin in the target register and 986 // setup the function in r1. 987 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 988 989 // Store the function for the given builtin in the target register. 990 void GetBuiltinFunction(Register target, Builtins::JavaScript id); 991 992 Handle<Object> CodeObject() { 993 DCHECK(!code_object_.is_null()); 994 return code_object_; 995 } 996 997 998 // Emit code for a truncating division by a constant. The dividend register is 999 // unchanged and ip gets clobbered. Dividend and result must be different. 1000 void TruncatingDiv(Register result, Register dividend, int32_t divisor); 1001 1002 // --------------------------------------------------------------------------- 1003 // StatsCounter support 1004 1005 void SetCounter(StatsCounter* counter, int value, Register scratch1, 1006 Register scratch2); 1007 void IncrementCounter(StatsCounter* counter, int value, Register scratch1, 1008 Register scratch2); 1009 void DecrementCounter(StatsCounter* counter, int value, Register scratch1, 1010 Register scratch2); 1011 1012 1013 // --------------------------------------------------------------------------- 1014 // Debugging 1015 1016 // Calls Abort(msg) if the condition cond is not satisfied. 1017 // Use --debug_code to enable. 1018 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7); 1019 void AssertFastElements(Register elements); 1020 1021 // Like Assert(), but always enabled. 1022 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7); 1023 1024 // Print a message to stdout and abort execution. 1025 void Abort(BailoutReason reason); 1026 1027 // Verify restrictions about code generated in stubs. 1028 void set_generating_stub(bool value) { generating_stub_ = value; } 1029 bool generating_stub() { return generating_stub_; } 1030 void set_has_frame(bool value) { has_frame_ = value; } 1031 bool has_frame() { return has_frame_; } 1032 inline bool AllowThisStubCall(CodeStub* stub); 1033 1034 // --------------------------------------------------------------------------- 1035 // Number utilities 1036 1037 // Check whether the value of reg is a power of two and not zero. If not 1038 // control continues at the label not_power_of_two. If reg is a power of two 1039 // the register scratch contains the value of (reg - 1) when control falls 1040 // through. 1041 void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch, 1042 Label* not_power_of_two_or_zero); 1043 // Check whether the value of reg is a power of two and not zero. 1044 // Control falls through if it is, with scratch containing the mask 1045 // value (reg - 1). 1046 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is 1047 // zero or negative, or jumps to the 'not_power_of_two' label if the value is 1048 // strictly positive but not a power of two. 1049 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch, 1050 Label* zero_and_neg, 1051 Label* not_power_of_two); 1052 1053 // --------------------------------------------------------------------------- 1054 // Bit testing/extraction 1055 // 1056 // Bit numbering is such that the least significant bit is bit 0 1057 // (for consistency between 32/64-bit). 1058 1059 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src 1060 // and place them into the least significant bits of dst. 1061 inline void ExtractBitRange(Register dst, Register src, int rangeStart, 1062 int rangeEnd, RCBit rc = LeaveRC) { 1063 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer); 1064 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd; 1065 int width = rangeStart - rangeEnd + 1; 1066#if V8_TARGET_ARCH_PPC64 1067 rldicl(dst, src, rotate, kBitsPerPointer - width, rc); 1068#else 1069 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc); 1070#endif 1071 } 1072 1073 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber, 1074 RCBit rc = LeaveRC) { 1075 ExtractBitRange(dst, src, bitNumber, bitNumber, rc); 1076 } 1077 1078 // Extract consecutive bits (defined by mask) from src and place them 1079 // into the least significant bits of dst. 1080 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask, 1081 RCBit rc = LeaveRC) { 1082 int start = kBitsPerPointer - 1; 1083 int end; 1084 uintptr_t bit = (1L << start); 1085 1086 while (bit && (mask & bit) == 0) { 1087 start--; 1088 bit >>= 1; 1089 } 1090 end = start; 1091 bit >>= 1; 1092 1093 while (bit && (mask & bit)) { 1094 end--; 1095 bit >>= 1; 1096 } 1097 1098 // 1-bits in mask must be contiguous 1099 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0); 1100 1101 ExtractBitRange(dst, src, start, end, rc); 1102 } 1103 1104 // Test single bit in value. 1105 inline void TestBit(Register value, int bitNumber, Register scratch = r0) { 1106 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC); 1107 } 1108 1109 // Test consecutive bit range in value. Range is defined by 1110 // rangeStart - rangeEnd. 1111 inline void TestBitRange(Register value, int rangeStart, int rangeEnd, 1112 Register scratch = r0) { 1113 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC); 1114 } 1115 1116 // Test consecutive bit range in value. Range is defined by mask. 1117 inline void TestBitMask(Register value, uintptr_t mask, 1118 Register scratch = r0) { 1119 ExtractBitMask(scratch, value, mask, SetRC); 1120 } 1121 1122 1123 // --------------------------------------------------------------------------- 1124 // Smi utilities 1125 1126 // Shift left by 1 1127 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); } 1128 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) { 1129 ShiftLeftImm(dst, src, Operand(kSmiShift), rc); 1130 } 1131 1132#if !V8_TARGET_ARCH_PPC64 1133 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). 1134 void SmiTagCheckOverflow(Register reg, Register overflow); 1135 void SmiTagCheckOverflow(Register dst, Register src, Register overflow); 1136 1137 inline void JumpIfNotSmiCandidate(Register value, Register scratch, 1138 Label* not_smi_label) { 1139 // High bits must be identical to fit into an Smi 1140 addis(scratch, value, Operand(0x40000000u >> 16)); 1141 cmpi(scratch, Operand::Zero()); 1142 blt(not_smi_label); 1143 } 1144#endif 1145 inline void TestUnsignedSmiCandidate(Register value, Register scratch) { 1146 // The test is different for unsigned int values. Since we need 1147 // the value to be in the range of a positive smi, we can't 1148 // handle any of the high bits being set in the value. 1149 TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift, 1150 scratch); 1151 } 1152 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch, 1153 Label* not_smi_label) { 1154 TestUnsignedSmiCandidate(value, scratch); 1155 bne(not_smi_label, cr0); 1156 } 1157 1158 void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); } 1159 1160 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) { 1161 ShiftRightArithImm(dst, src, kSmiShift, rc); 1162 } 1163 1164 void SmiToPtrArrayOffset(Register dst, Register src) { 1165#if V8_TARGET_ARCH_PPC64 1166 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); 1167 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2); 1168#else 1169 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2); 1170 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift)); 1171#endif 1172 } 1173 1174 void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); } 1175 1176 void SmiToShortArrayOffset(Register dst, Register src) { 1177#if V8_TARGET_ARCH_PPC64 1178 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1); 1179 ShiftRightArithImm(dst, src, kSmiShift - 1); 1180#else 1181 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1); 1182 if (!dst.is(src)) { 1183 mr(dst, src); 1184 } 1185#endif 1186 } 1187 1188 void SmiToIntArrayOffset(Register dst, Register src) { 1189#if V8_TARGET_ARCH_PPC64 1190 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2); 1191 ShiftRightArithImm(dst, src, kSmiShift - 2); 1192#else 1193 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2); 1194 ShiftLeftImm(dst, src, Operand(2 - kSmiShift)); 1195#endif 1196 } 1197 1198#define SmiToFloatArrayOffset SmiToIntArrayOffset 1199 1200 void SmiToDoubleArrayOffset(Register dst, Register src) { 1201#if V8_TARGET_ARCH_PPC64 1202 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2); 1203 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2); 1204#else 1205 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2); 1206 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift)); 1207#endif 1208 } 1209 1210 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) { 1211 if (kSmiShift < elementSizeLog2) { 1212 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift)); 1213 } else if (kSmiShift > elementSizeLog2) { 1214 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2); 1215 } else if (!dst.is(src)) { 1216 mr(dst, src); 1217 } 1218 } 1219 1220 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2, 1221 bool isSmi) { 1222 if (isSmi) { 1223 SmiToArrayOffset(dst, src, elementSizeLog2); 1224 } else { 1225 ShiftLeftImm(dst, src, Operand(elementSizeLog2)); 1226 } 1227 } 1228 1229 // Untag the source value into destination and jump if source is a smi. 1230 // Souce and destination can be the same register. 1231 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1232 1233 // Untag the source value into destination and jump if source is not a smi. 1234 // Souce and destination can be the same register. 1235 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1236 1237 inline void TestIfSmi(Register value, Register scratch) { 1238 TestBit(value, 0, scratch); // tst(value, Operand(kSmiTagMask)); 1239 } 1240 1241 inline void TestIfPositiveSmi(Register value, Register scratch) { 1242 STATIC_ASSERT((kSmiTagMask | kSmiSignMask) == 1243 (intptr_t)(1UL << (kBitsPerPointer - 1) | 1)); 1244#if V8_TARGET_ARCH_PPC64 1245 rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC); 1246#else 1247 rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC); 1248#endif 1249 } 1250 1251 // Jump the register contains a smi. 1252 inline void JumpIfSmi(Register value, Label* smi_label) { 1253 TestIfSmi(value, r0); 1254 beq(smi_label, cr0); // branch if SMI 1255 } 1256 // Jump if either of the registers contain a non-smi. 1257 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { 1258 TestIfSmi(value, r0); 1259 bne(not_smi_label, cr0); 1260 } 1261 // Jump if either of the registers contain a non-smi. 1262 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1263 // Jump if either of the registers contain a smi. 1264 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1265 1266 // Abort execution if argument is a smi, enabled via --debug-code. 1267 void AssertNotSmi(Register object); 1268 void AssertSmi(Register object); 1269 1270 1271#if V8_TARGET_ARCH_PPC64 1272 inline void TestIfInt32(Register value, Register scratch1, Register scratch2, 1273 CRegister cr = cr7) { 1274 // High bits must be identical to fit into an 32-bit integer 1275 srawi(scratch1, value, 31); 1276 sradi(scratch2, value, 32); 1277 cmp(scratch1, scratch2, cr); 1278 } 1279#else 1280 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch, 1281 CRegister cr = cr7) { 1282 // High bits must be identical to fit into an 32-bit integer 1283 srawi(scratch, lo_word, 31); 1284 cmp(scratch, hi_word, cr); 1285 } 1286#endif 1287 1288 // Abort execution if argument is not a string, enabled via --debug-code. 1289 void AssertString(Register object); 1290 1291 // Abort execution if argument is not a name, enabled via --debug-code. 1292 void AssertName(Register object); 1293 1294 // Abort execution if argument is not undefined or an AllocationSite, enabled 1295 // via --debug-code. 1296 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1297 1298 // Abort execution if reg is not the root value with the given index, 1299 // enabled via --debug-code. 1300 void AssertIsRoot(Register reg, Heap::RootListIndex index); 1301 1302 // --------------------------------------------------------------------------- 1303 // HeapNumber utilities 1304 1305 void JumpIfNotHeapNumber(Register object, Register heap_number_map, 1306 Register scratch, Label* on_not_heap_number); 1307 1308 // --------------------------------------------------------------------------- 1309 // String utilities 1310 1311 // Generate code to do a lookup in the number string cache. If the number in 1312 // the register object is found in the cache the generated code falls through 1313 // with the result in the result register. The object and the result register 1314 // can be the same. If the number is not found in the cache the code jumps to 1315 // the label not_found with only the content of register object unchanged. 1316 void LookupNumberStringCache(Register object, Register result, 1317 Register scratch1, Register scratch2, 1318 Register scratch3, Label* not_found); 1319 1320 // Checks if both objects are sequential one-byte strings and jumps to label 1321 // if either is not. Assumes that neither object is a smi. 1322 void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1, 1323 Register object2, 1324 Register scratch1, 1325 Register scratch2, 1326 Label* failure); 1327 1328 // Checks if both objects are sequential one-byte strings and jumps to label 1329 // if either is not. 1330 void JumpIfNotBothSequentialOneByteStrings(Register first, Register second, 1331 Register scratch1, 1332 Register scratch2, 1333 Label* not_flat_one_byte_strings); 1334 1335 // Checks if both instance types are sequential one-byte strings and jumps to 1336 // label if either is not. 1337 void JumpIfBothInstanceTypesAreNotSequentialOneByte( 1338 Register first_object_instance_type, Register second_object_instance_type, 1339 Register scratch1, Register scratch2, Label* failure); 1340 1341 // Check if instance type is sequential one-byte string and jump to label if 1342 // it is not. 1343 void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch, 1344 Label* failure); 1345 1346 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); 1347 1348 void EmitSeqStringSetCharCheck(Register string, Register index, 1349 Register value, uint32_t encoding_mask); 1350 1351 // --------------------------------------------------------------------------- 1352 // Patching helpers. 1353 1354 // Retrieve/patch the relocated value (lis/ori pair or constant pool load). 1355 void GetRelocatedValue(Register location, Register result, Register scratch); 1356 void SetRelocatedValue(Register location, Register scratch, 1357 Register new_value); 1358 1359 void ClampUint8(Register output_reg, Register input_reg); 1360 1361 // Saturate a value into 8-bit unsigned integer 1362 // if input_value < 0, output_value is 0 1363 // if input_value > 255, output_value is 255 1364 // otherwise output_value is the (int)input_value (round to nearest) 1365 void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg, 1366 DoubleRegister temp_double_reg); 1367 1368 1369 void LoadInstanceDescriptors(Register map, Register descriptors); 1370 void EnumLength(Register dst, Register map); 1371 void NumberOfOwnDescriptors(Register dst, Register map); 1372 1373 template <typename Field> 1374 void DecodeField(Register dst, Register src) { 1375 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift); 1376 } 1377 1378 template <typename Field> 1379 void DecodeField(Register reg) { 1380 DecodeField<Field>(reg, reg); 1381 } 1382 1383 template <typename Field> 1384 void DecodeFieldToSmi(Register dst, Register src) { 1385#if V8_TARGET_ARCH_PPC64 1386 DecodeField<Field>(dst, src); 1387 SmiTag(dst); 1388#else 1389 // 32-bit can do this in one instruction: 1390 int start = Field::kSize + kSmiShift - 1; 1391 int end = kSmiShift; 1392 int rotate = kSmiShift - Field::kShift; 1393 if (rotate < 0) { 1394 rotate += kBitsPerPointer; 1395 } 1396 rlwinm(dst, src, rotate, kBitsPerPointer - start - 1, 1397 kBitsPerPointer - end - 1); 1398#endif 1399 } 1400 1401 template <typename Field> 1402 void DecodeFieldToSmi(Register reg) { 1403 DecodeFieldToSmi<Field>(reg, reg); 1404 } 1405 1406 // Activation support. 1407 void EnterFrame(StackFrame::Type type, 1408 bool load_constant_pool_pointer_reg = false); 1409 // Returns the pc offset at which the frame ends. 1410 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0); 1411 1412 // Expects object in r0 and returns map with validated enum cache 1413 // in r0. Assumes that any other register can be used as a scratch. 1414 void CheckEnumCache(Register null_value, Label* call_runtime); 1415 1416 // AllocationMemento support. Arrays may have an associated 1417 // AllocationMemento object that can be checked for in order to pretransition 1418 // to another type. 1419 // On entry, receiver_reg should point to the array object. 1420 // scratch_reg gets clobbered. 1421 // If allocation info is present, condition flags are set to eq. 1422 void TestJSArrayForAllocationMemento(Register receiver_reg, 1423 Register scratch_reg, 1424 Label* no_memento_found); 1425 1426 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1427 Register scratch_reg, 1428 Label* memento_found) { 1429 Label no_memento_found; 1430 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 1431 &no_memento_found); 1432 beq(memento_found); 1433 bind(&no_memento_found); 1434 } 1435 1436 // Jumps to found label if a prototype map has dictionary elements. 1437 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1438 Register scratch1, Label* found); 1439 1440 private: 1441 static const int kSmiShift = kSmiTagSize + kSmiShiftSize; 1442 1443 void CallCFunctionHelper(Register function, int num_reg_arguments, 1444 int num_double_arguments); 1445 1446 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al, 1447 CRegister cr = cr7); 1448 1449 // Helper functions for generating invokes. 1450 void InvokePrologue(const ParameterCount& expected, 1451 const ParameterCount& actual, Handle<Code> code_constant, 1452 Register code_reg, Label* done, 1453 bool* definitely_mismatches, InvokeFlag flag, 1454 const CallWrapper& call_wrapper); 1455 1456 void InitializeNewString(Register string, Register length, 1457 Heap::RootListIndex map_index, Register scratch1, 1458 Register scratch2); 1459 1460 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 1461 void InNewSpace(Register object, Register scratch, 1462 Condition cond, // eq for new space, ne otherwise. 1463 Label* branch); 1464 1465 // Helper for finding the mark bits for an address. Afterwards, the 1466 // bitmap register points at the word with the mark bits and the mask 1467 // the position of the first bit. Leaves addr_reg unchanged. 1468 inline void GetMarkBits(Register addr_reg, Register bitmap_reg, 1469 Register mask_reg); 1470 1471 // Helper for throwing exceptions. Compute a handler address and jump to 1472 // it. See the implementation for register usage. 1473 void JumpToHandlerEntry(); 1474 1475 // Compute memory operands for safepoint stack slots. 1476 static int SafepointRegisterStackIndex(int reg_code); 1477 MemOperand SafepointRegisterSlot(Register reg); 1478 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1479 1480#if V8_OOL_CONSTANT_POOL 1481 // Loads the constant pool pointer (kConstantPoolRegister). 1482 enum CodeObjectAccessMethod { CAN_USE_IP, CONSTRUCT_INTERNAL_REFERENCE }; 1483 void LoadConstantPoolPointerRegister(CodeObjectAccessMethod access_method, 1484 int ip_code_entry_delta = 0); 1485#endif 1486 1487 bool generating_stub_; 1488 bool has_frame_; 1489 // This handle will be patched with the code object on installation. 1490 Handle<Object> code_object_; 1491 1492 // Needs access to SafepointRegisterStackIndex for compiled frame 1493 // traversal. 1494 friend class StandardFrame; 1495}; 1496 1497 1498// The code patcher is used to patch (typically) small parts of code e.g. for 1499// debugging and other types of instrumentation. When using the code patcher 1500// the exact number of bytes specified must be emitted. It is not legal to emit 1501// relocation information. If any of these constraints are violated it causes 1502// an assertion to fail. 1503class CodePatcher { 1504 public: 1505 enum FlushICache { FLUSH, DONT_FLUSH }; 1506 1507 CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH); 1508 virtual ~CodePatcher(); 1509 1510 // Macro assembler to emit code. 1511 MacroAssembler* masm() { return &masm_; } 1512 1513 // Emit an instruction directly. 1514 void Emit(Instr instr); 1515 1516 // Emit the condition part of an instruction leaving the rest of the current 1517 // instruction unchanged. 1518 void EmitCondition(Condition cond); 1519 1520 private: 1521 byte* address_; // The address of the code being patched. 1522 int size_; // Number of bytes of the expected patch size. 1523 MacroAssembler masm_; // Macro assembler used to generate the code. 1524 FlushICache flush_cache_; // Whether to flush the I cache after patching. 1525}; 1526 1527 1528// ----------------------------------------------------------------------------- 1529// Static helper functions. 1530 1531inline MemOperand ContextOperand(Register context, int index) { 1532 return MemOperand(context, Context::SlotOffset(index)); 1533} 1534 1535 1536inline MemOperand GlobalObjectOperand() { 1537 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); 1538} 1539 1540 1541#ifdef GENERATED_CODE_COVERAGE 1542#define CODE_COVERAGE_STRINGIFY(x) #x 1543#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1544#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1545#define ACCESS_MASM(masm) \ 1546 masm->stop(__FILE_LINE__); \ 1547 masm-> 1548#else 1549#define ACCESS_MASM(masm) masm-> 1550#endif 1551} 1552} // namespace v8::internal 1553 1554#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_ 1555