1// Copyright 2012 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#ifndef V8_X87_MACRO_ASSEMBLER_X87_H_ 6#define V8_X87_MACRO_ASSEMBLER_X87_H_ 7 8#include "src/assembler.h" 9#include "src/bailout-reason.h" 10#include "src/frames.h" 11#include "src/globals.h" 12 13namespace v8 { 14namespace internal { 15 16// Convenience for platform-independent signatures. We do not normally 17// distinguish memory operands from other operands on ia32. 18typedef Operand MemOperand; 19 20enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 21enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; 22enum PointersToHereCheck { 23 kPointersToHereMaybeInteresting, 24 kPointersToHereAreAlwaysInteresting 25}; 26 27 28enum RegisterValueType { 29 REGISTER_VALUE_IS_SMI, 30 REGISTER_VALUE_IS_INT32 31}; 32 33 34#ifdef DEBUG 35bool AreAliased(Register reg1, 36 Register reg2, 37 Register reg3 = no_reg, 38 Register reg4 = no_reg, 39 Register reg5 = no_reg, 40 Register reg6 = no_reg, 41 Register reg7 = no_reg, 42 Register reg8 = no_reg); 43#endif 44 45 46// MacroAssembler implements a collection of frequently used macros. 47class MacroAssembler: public Assembler { 48 public: 49 // The isolate parameter can be NULL if the macro assembler should 50 // not use isolate-dependent functionality. In this case, it's the 51 // responsibility of the caller to never invoke such function on the 52 // macro assembler. 53 MacroAssembler(Isolate* isolate, void* buffer, int size); 54 55 void Load(Register dst, const Operand& src, Representation r); 56 void Store(Register src, const Operand& dst, Representation r); 57 58 // Operations on roots in the root-array. 59 void LoadRoot(Register destination, Heap::RootListIndex index); 60 void StoreRoot(Register source, Register scratch, Heap::RootListIndex index); 61 void CompareRoot(Register with, Register scratch, Heap::RootListIndex index); 62 // These methods can only be used with constant roots (i.e. non-writable 63 // and not in new space). 64 void CompareRoot(Register with, Heap::RootListIndex index); 65 void CompareRoot(const Operand& with, Heap::RootListIndex index); 66 67 // --------------------------------------------------------------------------- 68 // GC Support 69 enum RememberedSetFinalAction { 70 kReturnAtEnd, 71 kFallThroughAtEnd 72 }; 73 74 // Record in the remembered set the fact that we have a pointer to new space 75 // at the address pointed to by the addr register. Only works if addr is not 76 // in new space. 77 void RememberedSetHelper(Register object, // Used for debug code. 78 Register addr, Register scratch, 79 SaveFPRegsMode save_fp, 80 RememberedSetFinalAction and_then); 81 82 void CheckPageFlag(Register object, 83 Register scratch, 84 int mask, 85 Condition cc, 86 Label* condition_met, 87 Label::Distance condition_met_distance = Label::kFar); 88 89 void CheckPageFlagForMap( 90 Handle<Map> map, 91 int mask, 92 Condition cc, 93 Label* condition_met, 94 Label::Distance condition_met_distance = Label::kFar); 95 96 void CheckMapDeprecated(Handle<Map> map, 97 Register scratch, 98 Label* if_deprecated); 99 100 // Check if object is in new space. Jumps if the object is not in new space. 101 // The register scratch can be object itself, but scratch will be clobbered. 102 void JumpIfNotInNewSpace(Register object, 103 Register scratch, 104 Label* branch, 105 Label::Distance distance = Label::kFar) { 106 InNewSpace(object, scratch, zero, branch, distance); 107 } 108 109 // Check if object is in new space. Jumps if the object is in new space. 110 // The register scratch can be object itself, but it will be clobbered. 111 void JumpIfInNewSpace(Register object, 112 Register scratch, 113 Label* branch, 114 Label::Distance distance = Label::kFar) { 115 InNewSpace(object, scratch, not_zero, branch, distance); 116 } 117 118 // Check if an object has a given incremental marking color. Also uses ecx! 119 void HasColor(Register object, 120 Register scratch0, 121 Register scratch1, 122 Label* has_color, 123 Label::Distance has_color_distance, 124 int first_bit, 125 int second_bit); 126 127 void JumpIfBlack(Register object, 128 Register scratch0, 129 Register scratch1, 130 Label* on_black, 131 Label::Distance on_black_distance = Label::kFar); 132 133 // Checks the color of an object. If the object is already grey or black 134 // then we just fall through, since it is already live. If it is white and 135 // we can determine that it doesn't need to be scanned, then we just mark it 136 // black and fall through. For the rest we jump to the label so the 137 // incremental marker can fix its assumptions. 138 void EnsureNotWhite(Register object, 139 Register scratch1, 140 Register scratch2, 141 Label* object_is_white_and_not_data, 142 Label::Distance distance); 143 144 // Notify the garbage collector that we wrote a pointer into an object. 145 // |object| is the object being stored into, |value| is the object being 146 // stored. value and scratch registers are clobbered by the operation. 147 // The offset is the offset from the start of the object, not the offset from 148 // the tagged HeapObject pointer. For use with FieldOperand(reg, off). 149 void RecordWriteField( 150 Register object, int offset, Register value, Register scratch, 151 SaveFPRegsMode save_fp, 152 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 153 SmiCheck smi_check = INLINE_SMI_CHECK, 154 PointersToHereCheck pointers_to_here_check_for_value = 155 kPointersToHereMaybeInteresting); 156 157 // As above, but the offset has the tag presubtracted. For use with 158 // Operand(reg, off). 159 void RecordWriteContextSlot( 160 Register context, int offset, Register value, Register scratch, 161 SaveFPRegsMode save_fp, 162 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 163 SmiCheck smi_check = INLINE_SMI_CHECK, 164 PointersToHereCheck pointers_to_here_check_for_value = 165 kPointersToHereMaybeInteresting) { 166 RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp, 167 remembered_set_action, smi_check, 168 pointers_to_here_check_for_value); 169 } 170 171 // Notify the garbage collector that we wrote a pointer into a fixed array. 172 // |array| is the array being stored into, |value| is the 173 // object being stored. |index| is the array index represented as a 174 // Smi. All registers are clobbered by the operation RecordWriteArray 175 // filters out smis so it does not update the write barrier if the 176 // value is a smi. 177 void RecordWriteArray( 178 Register array, Register value, Register index, SaveFPRegsMode save_fp, 179 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 180 SmiCheck smi_check = INLINE_SMI_CHECK, 181 PointersToHereCheck pointers_to_here_check_for_value = 182 kPointersToHereMaybeInteresting); 183 184 // For page containing |object| mark region covering |address| 185 // dirty. |object| is the object being stored into, |value| is the 186 // object being stored. The address and value registers are clobbered by the 187 // operation. RecordWrite filters out smis so it does not update the 188 // write barrier if the value is a smi. 189 void RecordWrite( 190 Register object, Register address, Register value, SaveFPRegsMode save_fp, 191 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 192 SmiCheck smi_check = INLINE_SMI_CHECK, 193 PointersToHereCheck pointers_to_here_check_for_value = 194 kPointersToHereMaybeInteresting); 195 196 // For page containing |object| mark the region covering the object's map 197 // dirty. |object| is the object being stored into, |map| is the Map object 198 // that was stored. 199 void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1, 200 Register scratch2, SaveFPRegsMode save_fp); 201 202 // --------------------------------------------------------------------------- 203 // Debugger Support 204 205 void DebugBreak(); 206 207 // Generates function and stub prologue code. 208 void StubPrologue(); 209 void Prologue(bool code_pre_aging); 210 211 // Enter specific kind of exit frame. Expects the number of 212 // arguments in register eax and sets up the number of arguments in 213 // register edi and the pointer to the first argument in register 214 // esi. 215 void EnterExitFrame(bool save_doubles); 216 217 void EnterApiExitFrame(int argc); 218 219 // Leave the current exit frame. Expects the return value in 220 // register eax:edx (untouched) and the pointer to the first 221 // argument in register esi. 222 void LeaveExitFrame(bool save_doubles); 223 224 // Leave the current exit frame. Expects the return value in 225 // register eax (untouched). 226 void LeaveApiExitFrame(bool restore_context); 227 228 // Find the function context up the context chain. 229 void LoadContext(Register dst, int context_chain_length); 230 231 // Conditionally load the cached Array transitioned map of type 232 // transitioned_kind from the native context if the map in register 233 // map_in_out is the cached Array map in the native context of 234 // expected_kind. 235 void LoadTransitionedArrayMapConditional( 236 ElementsKind expected_kind, 237 ElementsKind transitioned_kind, 238 Register map_in_out, 239 Register scratch, 240 Label* no_map_match); 241 242 // Load the global function with the given index. 243 void LoadGlobalFunction(int index, Register function); 244 245 // Load the initial map from the global function. The registers 246 // function and map can be the same. 247 void LoadGlobalFunctionInitialMap(Register function, Register map); 248 249 // Push and pop the registers that can hold pointers. 250 void PushSafepointRegisters() { pushad(); } 251 void PopSafepointRegisters() { popad(); } 252 // Store the value in register/immediate src in the safepoint 253 // register stack slot for register dst. 254 void StoreToSafepointRegisterSlot(Register dst, Register src); 255 void StoreToSafepointRegisterSlot(Register dst, Immediate src); 256 void LoadFromSafepointRegisterSlot(Register dst, Register src); 257 258 void LoadHeapObject(Register result, Handle<HeapObject> object); 259 void CmpHeapObject(Register reg, Handle<HeapObject> object); 260 void PushHeapObject(Handle<HeapObject> object); 261 262 void LoadObject(Register result, Handle<Object> object) { 263 AllowDeferredHandleDereference heap_object_check; 264 if (object->IsHeapObject()) { 265 LoadHeapObject(result, Handle<HeapObject>::cast(object)); 266 } else { 267 Move(result, Immediate(object)); 268 } 269 } 270 271 void CmpObject(Register reg, Handle<Object> object) { 272 AllowDeferredHandleDereference heap_object_check; 273 if (object->IsHeapObject()) { 274 CmpHeapObject(reg, Handle<HeapObject>::cast(object)); 275 } else { 276 cmp(reg, Immediate(object)); 277 } 278 } 279 280 // --------------------------------------------------------------------------- 281 // JavaScript invokes 282 283 // Invoke the JavaScript function code by either calling or jumping. 284 void InvokeCode(Register code, 285 const ParameterCount& expected, 286 const ParameterCount& actual, 287 InvokeFlag flag, 288 const CallWrapper& call_wrapper) { 289 InvokeCode(Operand(code), expected, actual, flag, call_wrapper); 290 } 291 292 void InvokeCode(const Operand& code, 293 const ParameterCount& expected, 294 const ParameterCount& actual, 295 InvokeFlag flag, 296 const CallWrapper& call_wrapper); 297 298 // Invoke the JavaScript function in the given register. Changes the 299 // current context to the context in the function before invoking. 300 void InvokeFunction(Register function, 301 const ParameterCount& actual, 302 InvokeFlag flag, 303 const CallWrapper& call_wrapper); 304 305 void InvokeFunction(Register function, 306 const ParameterCount& expected, 307 const ParameterCount& actual, 308 InvokeFlag flag, 309 const CallWrapper& call_wrapper); 310 311 void InvokeFunction(Handle<JSFunction> function, 312 const ParameterCount& expected, 313 const ParameterCount& actual, 314 InvokeFlag flag, 315 const CallWrapper& call_wrapper); 316 317 // Invoke specified builtin JavaScript function. Adds an entry to 318 // the unresolved list if the name does not resolve. 319 void InvokeBuiltin(Builtins::JavaScript id, 320 InvokeFlag flag, 321 const CallWrapper& call_wrapper = NullCallWrapper()); 322 323 // Store the function for the given builtin in the target register. 324 void GetBuiltinFunction(Register target, Builtins::JavaScript id); 325 326 // Store the code object for the given builtin in the target register. 327 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 328 329 // Expression support 330 // Support for constant splitting. 331 bool IsUnsafeImmediate(const Immediate& x); 332 void SafeMove(Register dst, const Immediate& x); 333 void SafePush(const Immediate& x); 334 335 // Compare object type for heap object. 336 // Incoming register is heap_object and outgoing register is map. 337 void CmpObjectType(Register heap_object, InstanceType type, Register map); 338 339 // Compare instance type for map. 340 void CmpInstanceType(Register map, InstanceType type); 341 342 // Check if a map for a JSObject indicates that the object has fast elements. 343 // Jump to the specified label if it does not. 344 void CheckFastElements(Register map, 345 Label* fail, 346 Label::Distance distance = Label::kFar); 347 348 // Check if a map for a JSObject indicates that the object can have both smi 349 // and HeapObject elements. Jump to the specified label if it does not. 350 void CheckFastObjectElements(Register map, 351 Label* fail, 352 Label::Distance distance = Label::kFar); 353 354 // Check if a map for a JSObject indicates that the object has fast smi only 355 // elements. Jump to the specified label if it does not. 356 void CheckFastSmiElements(Register map, 357 Label* fail, 358 Label::Distance distance = Label::kFar); 359 360 // Check to see if maybe_number can be stored as a double in 361 // FastDoubleElements. If it can, store it at the index specified by key in 362 // the FastDoubleElements array elements, otherwise jump to fail. 363 void StoreNumberToDoubleElements(Register maybe_number, 364 Register elements, 365 Register key, 366 Register scratch, 367 Label* fail, 368 int offset = 0); 369 370 // Compare an object's map with the specified map. 371 void CompareMap(Register obj, Handle<Map> map); 372 373 // Check if the map of an object is equal to a specified map and branch to 374 // label if not. Skip the smi check if not required (object is known to be a 375 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match 376 // against maps that are ElementsKind transition maps of the specified map. 377 void CheckMap(Register obj, 378 Handle<Map> map, 379 Label* fail, 380 SmiCheckType smi_check_type); 381 382 // Check if the map of an object is equal to a specified map and branch to a 383 // specified target if equal. Skip the smi check if not required (object is 384 // known to be a heap object) 385 void DispatchMap(Register obj, 386 Register unused, 387 Handle<Map> map, 388 Handle<Code> success, 389 SmiCheckType smi_check_type); 390 391 // Check if the object in register heap_object is a string. Afterwards the 392 // register map contains the object map and the register instance_type 393 // contains the instance_type. The registers map and instance_type can be the 394 // same in which case it contains the instance type afterwards. Either of the 395 // registers map and instance_type can be the same as heap_object. 396 Condition IsObjectStringType(Register heap_object, 397 Register map, 398 Register instance_type); 399 400 // Check if the object in register heap_object is a name. Afterwards the 401 // register map contains the object map and the register instance_type 402 // contains the instance_type. The registers map and instance_type can be the 403 // same in which case it contains the instance type afterwards. Either of the 404 // registers map and instance_type can be the same as heap_object. 405 Condition IsObjectNameType(Register heap_object, 406 Register map, 407 Register instance_type); 408 409 // Check if a heap object's type is in the JSObject range, not including 410 // JSFunction. The object's map will be loaded in the map register. 411 // Any or all of the three registers may be the same. 412 // The contents of the scratch register will always be overwritten. 413 void IsObjectJSObjectType(Register heap_object, 414 Register map, 415 Register scratch, 416 Label* fail); 417 418 // The contents of the scratch register will be overwritten. 419 void IsInstanceJSObjectType(Register map, Register scratch, Label* fail); 420 421 // FCmp is similar to integer cmp, but requires unsigned 422 // jcc instructions (je, ja, jae, jb, jbe, je, and jz). 423 void FCmp(); 424 void FXamMinusZero(); 425 void FXamSign(); 426 void X87CheckIA(); 427 void X87SetRC(int rc); 428 void X87SetFPUCW(int cw); 429 430 void ClampUint8(Register reg); 431 void ClampTOSToUint8(Register result_reg); 432 433 void SlowTruncateToI(Register result_reg, Register input_reg, 434 int offset = HeapNumber::kValueOffset - kHeapObjectTag); 435 436 void TruncateHeapNumberToI(Register result_reg, Register input_reg); 437 void TruncateX87TOSToI(Register result_reg); 438 439 void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode, 440 Label* lost_precision, Label* is_nan, Label* minus_zero, 441 Label::Distance dst = Label::kFar); 442 443 // Smi tagging support. 444 void SmiTag(Register reg) { 445 STATIC_ASSERT(kSmiTag == 0); 446 STATIC_ASSERT(kSmiTagSize == 1); 447 add(reg, reg); 448 } 449 void SmiUntag(Register reg) { 450 sar(reg, kSmiTagSize); 451 } 452 453 // Modifies the register even if it does not contain a Smi! 454 void SmiUntag(Register reg, Label* is_smi) { 455 STATIC_ASSERT(kSmiTagSize == 1); 456 sar(reg, kSmiTagSize); 457 STATIC_ASSERT(kSmiTag == 0); 458 j(not_carry, is_smi); 459 } 460 461 void LoadUint32NoSSE2(Register src); 462 463 // Jump the register contains a smi. 464 inline void JumpIfSmi(Register value, 465 Label* smi_label, 466 Label::Distance distance = Label::kFar) { 467 test(value, Immediate(kSmiTagMask)); 468 j(zero, smi_label, distance); 469 } 470 // Jump if the operand is a smi. 471 inline void JumpIfSmi(Operand value, 472 Label* smi_label, 473 Label::Distance distance = Label::kFar) { 474 test(value, Immediate(kSmiTagMask)); 475 j(zero, smi_label, distance); 476 } 477 // Jump if register contain a non-smi. 478 inline void JumpIfNotSmi(Register value, 479 Label* not_smi_label, 480 Label::Distance distance = Label::kFar) { 481 test(value, Immediate(kSmiTagMask)); 482 j(not_zero, not_smi_label, distance); 483 } 484 485 void LoadInstanceDescriptors(Register map, Register descriptors); 486 void EnumLength(Register dst, Register map); 487 void NumberOfOwnDescriptors(Register dst, Register map); 488 489 template<typename Field> 490 void DecodeField(Register reg) { 491 static const int shift = Field::kShift; 492 static const int mask = Field::kMask >> Field::kShift; 493 if (shift != 0) { 494 sar(reg, shift); 495 } 496 and_(reg, Immediate(mask)); 497 } 498 499 template<typename Field> 500 void DecodeFieldToSmi(Register reg) { 501 static const int shift = Field::kShift; 502 static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize; 503 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); 504 STATIC_ASSERT(kSmiTag == 0); 505 if (shift < kSmiTagSize) { 506 shl(reg, kSmiTagSize - shift); 507 } else if (shift > kSmiTagSize) { 508 sar(reg, shift - kSmiTagSize); 509 } 510 and_(reg, Immediate(mask)); 511 } 512 513 // Abort execution if argument is not a number, enabled via --debug-code. 514 void AssertNumber(Register object); 515 516 // Abort execution if argument is not a smi, enabled via --debug-code. 517 void AssertSmi(Register object); 518 519 // Abort execution if argument is a smi, enabled via --debug-code. 520 void AssertNotSmi(Register object); 521 522 // Abort execution if argument is not a string, enabled via --debug-code. 523 void AssertString(Register object); 524 525 // Abort execution if argument is not a name, enabled via --debug-code. 526 void AssertName(Register object); 527 528 // Abort execution if argument is not undefined or an AllocationSite, enabled 529 // via --debug-code. 530 void AssertUndefinedOrAllocationSite(Register object); 531 532 // --------------------------------------------------------------------------- 533 // Exception handling 534 535 // Push a new try handler and link it into try handler chain. 536 void PushTryHandler(StackHandler::Kind kind, int handler_index); 537 538 // Unlink the stack handler on top of the stack from the try handler chain. 539 void PopTryHandler(); 540 541 // Throw to the top handler in the try hander chain. 542 void Throw(Register value); 543 544 // Throw past all JS frames to the top JS entry frame. 545 void ThrowUncatchable(Register value); 546 547 // --------------------------------------------------------------------------- 548 // Inline caching support 549 550 // Generate code for checking access rights - used for security checks 551 // on access to global objects across environments. The holder register 552 // is left untouched, but the scratch register is clobbered. 553 void CheckAccessGlobalProxy(Register holder_reg, 554 Register scratch1, 555 Register scratch2, 556 Label* miss); 557 558 void GetNumberHash(Register r0, Register scratch); 559 560 void LoadFromNumberDictionary(Label* miss, 561 Register elements, 562 Register key, 563 Register r0, 564 Register r1, 565 Register r2, 566 Register result); 567 568 569 // --------------------------------------------------------------------------- 570 // Allocation support 571 572 // Allocate an object in new space or old pointer space. If the given space 573 // is exhausted control continues at the gc_required label. The allocated 574 // object is returned in result and end of the new object is returned in 575 // result_end. The register scratch can be passed as no_reg in which case 576 // an additional object reference will be added to the reloc info. The 577 // returned pointers in result and result_end have not yet been tagged as 578 // heap objects. If result_contains_top_on_entry is true the content of 579 // result is known to be the allocation top on entry (could be result_end 580 // from a previous call). If result_contains_top_on_entry is true scratch 581 // should be no_reg as it is never used. 582 void Allocate(int object_size, 583 Register result, 584 Register result_end, 585 Register scratch, 586 Label* gc_required, 587 AllocationFlags flags); 588 589 void Allocate(int header_size, 590 ScaleFactor element_size, 591 Register element_count, 592 RegisterValueType element_count_type, 593 Register result, 594 Register result_end, 595 Register scratch, 596 Label* gc_required, 597 AllocationFlags flags); 598 599 void Allocate(Register object_size, 600 Register result, 601 Register result_end, 602 Register scratch, 603 Label* gc_required, 604 AllocationFlags flags); 605 606 // Undo allocation in new space. The object passed and objects allocated after 607 // it will no longer be allocated. Make sure that no pointers are left to the 608 // object(s) no longer allocated as they would be invalid when allocation is 609 // un-done. 610 void UndoAllocationInNewSpace(Register object); 611 612 // Allocate a heap number in new space with undefined value. The 613 // register scratch2 can be passed as no_reg; the others must be 614 // valid registers. Returns tagged pointer in result register, or 615 // jumps to gc_required if new space is full. 616 void AllocateHeapNumber(Register result, 617 Register scratch1, 618 Register scratch2, 619 Label* gc_required, 620 MutableMode mode = IMMUTABLE); 621 622 // Allocate a sequential string. All the header fields of the string object 623 // are initialized. 624 void AllocateTwoByteString(Register result, 625 Register length, 626 Register scratch1, 627 Register scratch2, 628 Register scratch3, 629 Label* gc_required); 630 void AllocateOneByteString(Register result, Register length, 631 Register scratch1, Register scratch2, 632 Register scratch3, Label* gc_required); 633 void AllocateOneByteString(Register result, int length, Register scratch1, 634 Register scratch2, Label* gc_required); 635 636 // Allocate a raw cons string object. Only the map field of the result is 637 // initialized. 638 void AllocateTwoByteConsString(Register result, 639 Register scratch1, 640 Register scratch2, 641 Label* gc_required); 642 void AllocateOneByteConsString(Register result, Register scratch1, 643 Register scratch2, Label* gc_required); 644 645 // Allocate a raw sliced string object. Only the map field of the result is 646 // initialized. 647 void AllocateTwoByteSlicedString(Register result, 648 Register scratch1, 649 Register scratch2, 650 Label* gc_required); 651 void AllocateOneByteSlicedString(Register result, Register scratch1, 652 Register scratch2, Label* gc_required); 653 654 // Copy memory, byte-by-byte, from source to destination. Not optimized for 655 // long or aligned copies. 656 // The contents of index and scratch are destroyed. 657 void CopyBytes(Register source, 658 Register destination, 659 Register length, 660 Register scratch); 661 662 // Initialize fields with filler values. Fields starting at |start_offset| 663 // not including end_offset are overwritten with the value in |filler|. At 664 // the end the loop, |start_offset| takes the value of |end_offset|. 665 void InitializeFieldsWithFiller(Register start_offset, 666 Register end_offset, 667 Register filler); 668 669 // --------------------------------------------------------------------------- 670 // Support functions. 671 672 // Check a boolean-bit of a Smi field. 673 void BooleanBitTest(Register object, int field_offset, int bit_index); 674 675 // Check if result is zero and op is negative. 676 void NegativeZeroTest(Register result, Register op, Label* then_label); 677 678 // Check if result is zero and any of op1 and op2 are negative. 679 // Register scratch is destroyed, and it must be different from op2. 680 void NegativeZeroTest(Register result, Register op1, Register op2, 681 Register scratch, Label* then_label); 682 683 // Try to get function prototype of a function and puts the value in 684 // the result register. Checks that the function really is a 685 // function and jumps to the miss label if the fast checks fail. The 686 // function register will be untouched; the other registers may be 687 // clobbered. 688 void TryGetFunctionPrototype(Register function, 689 Register result, 690 Register scratch, 691 Label* miss, 692 bool miss_on_bound_function = false); 693 694 // Picks out an array index from the hash field. 695 // Register use: 696 // hash - holds the index's hash. Clobbered. 697 // index - holds the overwritten index on exit. 698 void IndexFromHash(Register hash, Register index); 699 700 // --------------------------------------------------------------------------- 701 // Runtime calls 702 703 // Call a code stub. Generate the code if necessary. 704 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); 705 706 // Tail call a code stub (jump). Generate the code if necessary. 707 void TailCallStub(CodeStub* stub); 708 709 // Return from a code stub after popping its arguments. 710 void StubReturn(int argc); 711 712 // Call a runtime routine. 713 void CallRuntime(const Runtime::Function* f, int num_arguments, 714 SaveFPRegsMode save_doubles = kDontSaveFPRegs); 715 void CallRuntimeSaveDoubles(Runtime::FunctionId id) { 716 const Runtime::Function* function = Runtime::FunctionForId(id); 717 CallRuntime(function, function->nargs, kSaveFPRegs); 718 } 719 720 // Convenience function: Same as above, but takes the fid instead. 721 void CallRuntime(Runtime::FunctionId id, int num_arguments, 722 SaveFPRegsMode save_doubles = kDontSaveFPRegs) { 723 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); 724 } 725 726 // Convenience function: call an external reference. 727 void CallExternalReference(ExternalReference ref, int num_arguments); 728 729 // Tail call of a runtime routine (jump). 730 // Like JumpToExternalReference, but also takes care of passing the number 731 // of parameters. 732 void TailCallExternalReference(const ExternalReference& ext, 733 int num_arguments, 734 int result_size); 735 736 // Convenience function: tail call a runtime routine (jump). 737 void TailCallRuntime(Runtime::FunctionId fid, 738 int num_arguments, 739 int result_size); 740 741 // Before calling a C-function from generated code, align arguments on stack. 742 // After aligning the frame, arguments must be stored in esp[0], esp[4], 743 // etc., not pushed. The argument count assumes all arguments are word sized. 744 // Some compilers/platforms require the stack to be aligned when calling 745 // C++ code. 746 // Needs a scratch register to do some arithmetic. This register will be 747 // trashed. 748 void PrepareCallCFunction(int num_arguments, Register scratch); 749 750 // Calls a C function and cleans up the space for arguments allocated 751 // by PrepareCallCFunction. The called function is not allowed to trigger a 752 // garbage collection, since that might move the code and invalidate the 753 // return address (unless this is somehow accounted for by the called 754 // function). 755 void CallCFunction(ExternalReference function, int num_arguments); 756 void CallCFunction(Register function, int num_arguments); 757 758 // Prepares stack to put arguments (aligns and so on). Reserves 759 // space for return value if needed (assumes the return value is a handle). 760 // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1) 761 // etc. Saves context (esi). If space was reserved for return value then 762 // stores the pointer to the reserved slot into esi. 763 void PrepareCallApiFunction(int argc); 764 765 // Calls an API function. Allocates HandleScope, extracts returned value 766 // from handle and propagates exceptions. Clobbers ebx, edi and 767 // caller-save registers. Restores context. On return removes 768 // stack_space * kPointerSize (GCed). 769 void CallApiFunctionAndReturn(Register function_address, 770 ExternalReference thunk_ref, 771 Operand thunk_last_arg, 772 int stack_space, 773 Operand return_value_operand, 774 Operand* context_restore_operand); 775 776 // Jump to a runtime routine. 777 void JumpToExternalReference(const ExternalReference& ext); 778 779 // --------------------------------------------------------------------------- 780 // Utilities 781 782 void Ret(); 783 784 // Return and drop arguments from stack, where the number of arguments 785 // may be bigger than 2^16 - 1. Requires a scratch register. 786 void Ret(int bytes_dropped, Register scratch); 787 788 // Emit code to discard a non-negative number of pointer-sized elements 789 // from the stack, clobbering only the esp register. 790 void Drop(int element_count); 791 792 void Call(Label* target) { call(target); } 793 void Push(Register src) { push(src); } 794 void Pop(Register dst) { pop(dst); } 795 796 // Emit call to the code we are currently generating. 797 void CallSelf() { 798 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location())); 799 call(self, RelocInfo::CODE_TARGET); 800 } 801 802 // Move if the registers are not identical. 803 void Move(Register target, Register source); 804 805 // Move a constant into a destination using the most efficient encoding. 806 void Move(Register dst, const Immediate& x); 807 void Move(const Operand& dst, const Immediate& x); 808 809 // Push a handle value. 810 void Push(Handle<Object> handle) { push(Immediate(handle)); } 811 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 812 813 Handle<Object> CodeObject() { 814 DCHECK(!code_object_.is_null()); 815 return code_object_; 816 } 817 818 // Insert code to verify that the x87 stack has the specified depth (0-7) 819 void VerifyX87StackDepth(uint32_t depth); 820 821 // Emit code for a truncating division by a constant. The dividend register is 822 // unchanged, the result is in edx, and eax gets clobbered. 823 void TruncatingDiv(Register dividend, int32_t divisor); 824 825 // --------------------------------------------------------------------------- 826 // StatsCounter support 827 828 void SetCounter(StatsCounter* counter, int value); 829 void IncrementCounter(StatsCounter* counter, int value); 830 void DecrementCounter(StatsCounter* counter, int value); 831 void IncrementCounter(Condition cc, StatsCounter* counter, int value); 832 void DecrementCounter(Condition cc, StatsCounter* counter, int value); 833 834 835 // --------------------------------------------------------------------------- 836 // Debugging 837 838 // Calls Abort(msg) if the condition cc is not satisfied. 839 // Use --debug_code to enable. 840 void Assert(Condition cc, BailoutReason reason); 841 842 void AssertFastElements(Register elements); 843 844 // Like Assert(), but always enabled. 845 void Check(Condition cc, BailoutReason reason); 846 847 // Print a message to stdout and abort execution. 848 void Abort(BailoutReason reason); 849 850 // Check that the stack is aligned. 851 void CheckStackAlignment(); 852 853 // Verify restrictions about code generated in stubs. 854 void set_generating_stub(bool value) { generating_stub_ = value; } 855 bool generating_stub() { return generating_stub_; } 856 void set_has_frame(bool value) { has_frame_ = value; } 857 bool has_frame() { return has_frame_; } 858 inline bool AllowThisStubCall(CodeStub* stub); 859 860 // --------------------------------------------------------------------------- 861 // String utilities. 862 863 // Generate code to do a lookup in the number string cache. If the number in 864 // the register object is found in the cache the generated code falls through 865 // with the result in the result register. The object and the result register 866 // can be the same. If the number is not found in the cache the code jumps to 867 // the label not_found with only the content of register object unchanged. 868 void LookupNumberStringCache(Register object, 869 Register result, 870 Register scratch1, 871 Register scratch2, 872 Label* not_found); 873 874 // Check whether the instance type represents a flat one-byte string. Jump to 875 // the label if not. If the instance type can be scratched specify same 876 // register for both instance type and scratch. 877 void JumpIfInstanceTypeIsNotSequentialOneByte( 878 Register instance_type, Register scratch, 879 Label* on_not_flat_one_byte_string); 880 881 // Checks if both objects are sequential one-byte strings, and jumps to label 882 // if either is not. 883 void JumpIfNotBothSequentialOneByteStrings( 884 Register object1, Register object2, Register scratch1, Register scratch2, 885 Label* on_not_flat_one_byte_strings); 886 887 // Checks if the given register or operand is a unique name 888 void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name, 889 Label::Distance distance = Label::kFar) { 890 JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance); 891 } 892 893 void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name, 894 Label::Distance distance = Label::kFar); 895 896 void EmitSeqStringSetCharCheck(Register string, 897 Register index, 898 Register value, 899 uint32_t encoding_mask); 900 901 static int SafepointRegisterStackIndex(Register reg) { 902 return SafepointRegisterStackIndex(reg.code()); 903 } 904 905 // Activation support. 906 void EnterFrame(StackFrame::Type type); 907 void LeaveFrame(StackFrame::Type type); 908 909 // Expects object in eax and returns map with validated enum cache 910 // in eax. Assumes that any other register can be used as a scratch. 911 void CheckEnumCache(Label* call_runtime); 912 913 // AllocationMemento support. Arrays may have an associated 914 // AllocationMemento object that can be checked for in order to pretransition 915 // to another type. 916 // On entry, receiver_reg should point to the array object. 917 // scratch_reg gets clobbered. 918 // If allocation info is present, conditional code is set to equal. 919 void TestJSArrayForAllocationMemento(Register receiver_reg, 920 Register scratch_reg, 921 Label* no_memento_found); 922 923 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 924 Register scratch_reg, 925 Label* memento_found) { 926 Label no_memento_found; 927 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 928 &no_memento_found); 929 j(equal, memento_found); 930 bind(&no_memento_found); 931 } 932 933 // Jumps to found label if a prototype map has dictionary elements. 934 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 935 Register scratch1, Label* found); 936 937 private: 938 bool generating_stub_; 939 bool has_frame_; 940 // This handle will be patched with the code object on installation. 941 Handle<Object> code_object_; 942 943 // Helper functions for generating invokes. 944 void InvokePrologue(const ParameterCount& expected, 945 const ParameterCount& actual, 946 Handle<Code> code_constant, 947 const Operand& code_operand, 948 Label* done, 949 bool* definitely_mismatches, 950 InvokeFlag flag, 951 Label::Distance done_distance, 952 const CallWrapper& call_wrapper = NullCallWrapper()); 953 954 void EnterExitFramePrologue(); 955 void EnterExitFrameEpilogue(int argc, bool save_doubles); 956 957 void LeaveExitFrameEpilogue(bool restore_context); 958 959 // Allocation support helpers. 960 void LoadAllocationTopHelper(Register result, 961 Register scratch, 962 AllocationFlags flags); 963 964 void UpdateAllocationTopHelper(Register result_end, 965 Register scratch, 966 AllocationFlags flags); 967 968 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. 969 void InNewSpace(Register object, 970 Register scratch, 971 Condition cc, 972 Label* condition_met, 973 Label::Distance condition_met_distance = Label::kFar); 974 975 // Helper for finding the mark bits for an address. Afterwards, the 976 // bitmap register points at the word with the mark bits and the mask 977 // the position of the first bit. Uses ecx as scratch and leaves addr_reg 978 // unchanged. 979 inline void GetMarkBits(Register addr_reg, 980 Register bitmap_reg, 981 Register mask_reg); 982 983 // Helper for throwing exceptions. Compute a handler address and jump to 984 // it. See the implementation for register usage. 985 void JumpToHandlerEntry(); 986 987 // Compute memory operands for safepoint stack slots. 988 Operand SafepointRegisterSlot(Register reg); 989 static int SafepointRegisterStackIndex(int reg_code); 990 991 // Needs access to SafepointRegisterStackIndex for compiled frame 992 // traversal. 993 friend class StandardFrame; 994}; 995 996 997// The code patcher is used to patch (typically) small parts of code e.g. for 998// debugging and other types of instrumentation. When using the code patcher 999// the exact number of bytes specified must be emitted. Is not legal to emit 1000// relocation information. If any of these constraints are violated it causes 1001// an assertion. 1002class CodePatcher { 1003 public: 1004 CodePatcher(byte* address, int size); 1005 virtual ~CodePatcher(); 1006 1007 // Macro assembler to emit code. 1008 MacroAssembler* masm() { return &masm_; } 1009 1010 private: 1011 byte* address_; // The address of the code being patched. 1012 int size_; // Number of bytes of the expected patch size. 1013 MacroAssembler masm_; // Macro assembler used to generate the code. 1014}; 1015 1016 1017// ----------------------------------------------------------------------------- 1018// Static helper functions. 1019 1020// Generate an Operand for loading a field from an object. 1021inline Operand FieldOperand(Register object, int offset) { 1022 return Operand(object, offset - kHeapObjectTag); 1023} 1024 1025 1026// Generate an Operand for loading an indexed field from an object. 1027inline Operand FieldOperand(Register object, 1028 Register index, 1029 ScaleFactor scale, 1030 int offset) { 1031 return Operand(object, index, scale, offset - kHeapObjectTag); 1032} 1033 1034 1035inline Operand FixedArrayElementOperand(Register array, 1036 Register index_as_smi, 1037 int additional_offset = 0) { 1038 int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize; 1039 return FieldOperand(array, index_as_smi, times_half_pointer_size, offset); 1040} 1041 1042 1043inline Operand ContextOperand(Register context, int index) { 1044 return Operand(context, Context::SlotOffset(index)); 1045} 1046 1047 1048inline Operand GlobalObjectOperand() { 1049 return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX); 1050} 1051 1052 1053// Generates an Operand for saving parameters after PrepareCallApiFunction. 1054Operand ApiParameterOperand(int index); 1055 1056 1057#ifdef GENERATED_CODE_COVERAGE 1058extern void LogGeneratedCodeCoverage(const char* file_line); 1059#define CODE_COVERAGE_STRINGIFY(x) #x 1060#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1061#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1062#define ACCESS_MASM(masm) { \ 1063 byte* ia32_coverage_function = \ 1064 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \ 1065 masm->pushfd(); \ 1066 masm->pushad(); \ 1067 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \ 1068 masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \ 1069 masm->pop(eax); \ 1070 masm->popad(); \ 1071 masm->popfd(); \ 1072 } \ 1073 masm-> 1074#else 1075#define ACCESS_MASM(masm) masm-> 1076#endif 1077 1078 1079} } // namespace v8::internal 1080 1081#endif // V8_X87_MACRO_ASSEMBLER_X87_H_ 1082