macro-assembler-arm.h revision 50ef84f5fad2def87d3fbc737bec4a32711fdef4
1// Copyright 2006-2009 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ 29#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ 30 31#include "assembler.h" 32 33namespace v8 { 34namespace internal { 35 36// ---------------------------------------------------------------------------- 37// Static helper functions 38 39// Generate a MemOperand for loading a field from an object. 40static inline MemOperand FieldMemOperand(Register object, int offset) { 41 return MemOperand(object, offset - kHeapObjectTag); 42} 43 44 45// Give alias names to registers 46const Register cp = { 8 }; // JavaScript context pointer 47const Register roots = { 10 }; // Roots array pointer. 48 49enum InvokeJSFlags { 50 CALL_JS, 51 JUMP_JS 52}; 53 54 55// Flags used for the AllocateInNewSpace functions. 56enum AllocationFlags { 57 // No special flags. 58 NO_ALLOCATION_FLAGS = 0, 59 // Return the pointer to the allocated already tagged as a heap object. 60 TAG_OBJECT = 1 << 0, 61 // The content of the result register already contains the allocation top in 62 // new space. 63 RESULT_CONTAINS_TOP = 1 << 1, 64 // Specify that the requested size of the space to allocate is specified in 65 // words instead of bytes. 66 SIZE_IN_WORDS = 1 << 2 67}; 68 69 70// Flags used for the ObjectToDoubleVFPRegister function. 71enum ObjectToDoubleFlags { 72 // No special flags. 73 NO_OBJECT_TO_DOUBLE_FLAGS = 0, 74 // Object is known to be a non smi. 75 OBJECT_NOT_SMI = 1 << 0, 76 // Don't load NaNs or infinities, branch to the non number case instead. 77 AVOID_NANS_AND_INFINITIES = 1 << 1 78}; 79 80 81// MacroAssembler implements a collection of frequently used macros. 82class MacroAssembler: public Assembler { 83 public: 84 MacroAssembler(void* buffer, int size); 85 86 // Jump, Call, and Ret pseudo instructions implementing inter-working. 87 void Jump(Register target, Condition cond = al); 88 void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al); 89 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 90 void Call(Register target, Condition cond = al); 91 void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al); 92 void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 93 void Ret(Condition cond = al); 94 95 // Emit code to discard a non-negative number of pointer-sized elements 96 // from the stack, clobbering only the sp register. 97 void Drop(int count, Condition cond = al); 98 99 100 // Swap two registers. If the scratch register is omitted then a slightly 101 // less efficient form using xor instead of mov is emitted. 102 void Swap(Register reg1, 103 Register reg2, 104 Register scratch = no_reg, 105 Condition cond = al); 106 107 108 void And(Register dst, Register src1, const Operand& src2, 109 Condition cond = al); 110 void Ubfx(Register dst, Register src, int lsb, int width, 111 Condition cond = al); 112 void Sbfx(Register dst, Register src, int lsb, int width, 113 Condition cond = al); 114 void Bfc(Register dst, int lsb, int width, Condition cond = al); 115 void Usat(Register dst, int satpos, const Operand& src, 116 Condition cond = al); 117 118 void Call(Label* target); 119 void Move(Register dst, Handle<Object> value); 120 // May do nothing if the registers are identical. 121 void Move(Register dst, Register src); 122 // Jumps to the label at the index given by the Smi in "index". 123 void SmiJumpTable(Register index, Vector<Label*> targets); 124 // Load an object from the root table. 125 void LoadRoot(Register destination, 126 Heap::RootListIndex index, 127 Condition cond = al); 128 // Store an object to the root table. 129 void StoreRoot(Register source, 130 Heap::RootListIndex index, 131 Condition cond = al); 132 133 134 // Check if object is in new space. 135 // scratch can be object itself, but it will be clobbered. 136 void InNewSpace(Register object, 137 Register scratch, 138 Condition cc, // eq for new space, ne otherwise 139 Label* branch); 140 141 142 // For the page containing |object| mark the region covering [address] 143 // dirty. The object address must be in the first 8K of an allocated page. 144 void RecordWriteHelper(Register object, 145 Register address, 146 Register scratch); 147 148 // For the page containing |object| mark the region covering 149 // [object+offset] dirty. The object address must be in the first 8K 150 // of an allocated page. The 'scratch' registers are used in the 151 // implementation and all 3 registers are clobbered by the 152 // operation, as well as the ip register. RecordWrite updates the 153 // write barrier even when storing smis. 154 void RecordWrite(Register object, 155 Operand offset, 156 Register scratch0, 157 Register scratch1); 158 159 // For the page containing |object| mark the region covering 160 // [address] dirty. The object address must be in the first 8K of an 161 // allocated page. All 3 registers are clobbered by the operation, 162 // as well as the ip register. RecordWrite updates the write barrier 163 // even when storing smis. 164 void RecordWrite(Register object, 165 Register address, 166 Register scratch); 167 168 // Push two registers. Pushes leftmost register first (to highest address). 169 void Push(Register src1, Register src2, Condition cond = al) { 170 ASSERT(!src1.is(src2)); 171 if (src1.code() > src2.code()) { 172 stm(db_w, sp, src1.bit() | src2.bit(), cond); 173 } else { 174 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 175 str(src2, MemOperand(sp, 4, NegPreIndex), cond); 176 } 177 } 178 179 // Push three registers. Pushes leftmost register first (to highest address). 180 void Push(Register src1, Register src2, Register src3, Condition cond = al) { 181 ASSERT(!src1.is(src2)); 182 ASSERT(!src2.is(src3)); 183 ASSERT(!src1.is(src3)); 184 if (src1.code() > src2.code()) { 185 if (src2.code() > src3.code()) { 186 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 187 } else { 188 stm(db_w, sp, src1.bit() | src2.bit(), cond); 189 str(src3, MemOperand(sp, 4, NegPreIndex), cond); 190 } 191 } else { 192 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 193 Push(src2, src3, cond); 194 } 195 } 196 197 // Push four registers. Pushes leftmost register first (to highest address). 198 void Push(Register src1, Register src2, 199 Register src3, Register src4, Condition cond = al) { 200 ASSERT(!src1.is(src2)); 201 ASSERT(!src2.is(src3)); 202 ASSERT(!src1.is(src3)); 203 ASSERT(!src1.is(src4)); 204 ASSERT(!src2.is(src4)); 205 ASSERT(!src3.is(src4)); 206 if (src1.code() > src2.code()) { 207 if (src2.code() > src3.code()) { 208 if (src3.code() > src4.code()) { 209 stm(db_w, 210 sp, 211 src1.bit() | src2.bit() | src3.bit() | src4.bit(), 212 cond); 213 } else { 214 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); 215 str(src4, MemOperand(sp, 4, NegPreIndex), cond); 216 } 217 } else { 218 stm(db_w, sp, src1.bit() | src2.bit(), cond); 219 Push(src3, src4, cond); 220 } 221 } else { 222 str(src1, MemOperand(sp, 4, NegPreIndex), cond); 223 Push(src2, src3, src4, cond); 224 } 225 } 226 227 // Load two consecutive registers with two consecutive memory locations. 228 void Ldrd(Register dst1, 229 Register dst2, 230 const MemOperand& src, 231 Condition cond = al); 232 233 // Store two consecutive registers to two consecutive memory locations. 234 void Strd(Register src1, 235 Register src2, 236 const MemOperand& dst, 237 Condition cond = al); 238 239 // --------------------------------------------------------------------------- 240 // Stack limit support 241 242 void StackLimitCheck(Label* on_stack_limit_hit); 243 244 // --------------------------------------------------------------------------- 245 // Activation frames 246 247 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } 248 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } 249 250 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } 251 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } 252 253 // Enter specific kind of exit frame; either normal or debug mode. 254 // Expects the number of arguments in register r0 and 255 // the builtin function to call in register r1. Exits with argc in 256 // r4, argv in r6, and and the builtin function to call in r5. 257 void EnterExitFrame(ExitFrame::Mode mode); 258 259 // Leave the current exit frame. Expects the return value in r0. 260 void LeaveExitFrame(ExitFrame::Mode mode); 261 262 // Get the actual activation frame alignment for target environment. 263 static int ActivationFrameAlignment(); 264 265 void LoadContext(Register dst, int context_chain_length); 266 267 // --------------------------------------------------------------------------- 268 // JavaScript invokes 269 270 // Invoke the JavaScript function code by either calling or jumping. 271 void InvokeCode(Register code, 272 const ParameterCount& expected, 273 const ParameterCount& actual, 274 InvokeFlag flag); 275 276 void InvokeCode(Handle<Code> code, 277 const ParameterCount& expected, 278 const ParameterCount& actual, 279 RelocInfo::Mode rmode, 280 InvokeFlag flag); 281 282 // Invoke the JavaScript function in the given register. Changes the 283 // current context to the context in the function before invoking. 284 void InvokeFunction(Register function, 285 const ParameterCount& actual, 286 InvokeFlag flag); 287 288 void InvokeFunction(JSFunction* function, 289 const ParameterCount& actual, 290 InvokeFlag flag); 291 292 293#ifdef ENABLE_DEBUGGER_SUPPORT 294 // --------------------------------------------------------------------------- 295 // Debugger Support 296 297 void SaveRegistersToMemory(RegList regs); 298 void RestoreRegistersFromMemory(RegList regs); 299 void CopyRegistersFromMemoryToStack(Register base, RegList regs); 300 void CopyRegistersFromStackToMemory(Register base, 301 Register scratch, 302 RegList regs); 303 void DebugBreak(); 304#endif 305 306 // --------------------------------------------------------------------------- 307 // Exception handling 308 309 // Push a new try handler and link into try handler chain. 310 // The return address must be passed in register lr. 311 // On exit, r0 contains TOS (code slot). 312 void PushTryHandler(CodeLocation try_location, HandlerType type); 313 314 // Unlink the stack handler on top of the stack from the try handler chain. 315 // Must preserve the result register. 316 void PopTryHandler(); 317 318 // --------------------------------------------------------------------------- 319 // Inline caching support 320 321 // Generate code for checking access rights - used for security checks 322 // on access to global objects across environments. The holder register 323 // is left untouched, whereas both scratch registers are clobbered. 324 void CheckAccessGlobalProxy(Register holder_reg, 325 Register scratch, 326 Label* miss); 327 328 329 // --------------------------------------------------------------------------- 330 // Allocation support 331 332 // Allocate an object in new space. The object_size is specified in words (not 333 // bytes). If the new space is exhausted control continues at the gc_required 334 // label. The allocated object is returned in result. If the flag 335 // tag_allocated_object is true the result is tagged as as a heap object. All 336 // registers are clobbered also when control continues at the gc_required 337 // label. 338 void AllocateInNewSpace(int object_size, 339 Register result, 340 Register scratch1, 341 Register scratch2, 342 Label* gc_required, 343 AllocationFlags flags); 344 void AllocateInNewSpace(Register object_size, 345 Register result, 346 Register scratch1, 347 Register scratch2, 348 Label* gc_required, 349 AllocationFlags flags); 350 351 // Undo allocation in new space. The object passed and objects allocated after 352 // it will no longer be allocated. The caller must make sure that no pointers 353 // are left to the object(s) no longer allocated as they would be invalid when 354 // allocation is undone. 355 void UndoAllocationInNewSpace(Register object, Register scratch); 356 357 358 void AllocateTwoByteString(Register result, 359 Register length, 360 Register scratch1, 361 Register scratch2, 362 Register scratch3, 363 Label* gc_required); 364 void AllocateAsciiString(Register result, 365 Register length, 366 Register scratch1, 367 Register scratch2, 368 Register scratch3, 369 Label* gc_required); 370 void AllocateTwoByteConsString(Register result, 371 Register length, 372 Register scratch1, 373 Register scratch2, 374 Label* gc_required); 375 void AllocateAsciiConsString(Register result, 376 Register length, 377 Register scratch1, 378 Register scratch2, 379 Label* gc_required); 380 381 // Allocates a heap number or jumps to the gc_required label if the young 382 // space is full and a scavenge is needed. All registers are clobbered also 383 // when control continues at the gc_required label. 384 void AllocateHeapNumber(Register result, 385 Register scratch1, 386 Register scratch2, 387 Register heap_number_map, 388 Label* gc_required); 389 void AllocateHeapNumberWithValue(Register result, 390 DwVfpRegister value, 391 Register scratch1, 392 Register scratch2, 393 Register heap_number_map, 394 Label* gc_required); 395 396 397 // --------------------------------------------------------------------------- 398 // Support functions. 399 400 // Try to get function prototype of a function and puts the value in 401 // the result register. Checks that the function really is a 402 // function and jumps to the miss label if the fast checks fail. The 403 // function register will be untouched; the other registers may be 404 // clobbered. 405 void TryGetFunctionPrototype(Register function, 406 Register result, 407 Register scratch, 408 Label* miss); 409 410 // Compare object type for heap object. heap_object contains a non-Smi 411 // whose object type should be compared with the given type. This both 412 // sets the flags and leaves the object type in the type_reg register. 413 // It leaves the map in the map register (unless the type_reg and map register 414 // are the same register). It leaves the heap object in the heap_object 415 // register unless the heap_object register is the same register as one of the 416 // other registers. 417 void CompareObjectType(Register heap_object, 418 Register map, 419 Register type_reg, 420 InstanceType type); 421 422 // Compare instance type in a map. map contains a valid map object whose 423 // object type should be compared with the given type. This both 424 // sets the flags and leaves the object type in the type_reg register. It 425 // leaves the heap object in the heap_object register unless the heap_object 426 // register is the same register as type_reg. 427 void CompareInstanceType(Register map, 428 Register type_reg, 429 InstanceType type); 430 431 432 // Check if the map of an object is equal to a specified map (either 433 // given directly or as an index into the root list) and branch to 434 // label if not. Skip the smi check if not required (object is known 435 // to be a heap object) 436 void CheckMap(Register obj, 437 Register scratch, 438 Handle<Map> map, 439 Label* fail, 440 bool is_heap_object); 441 442 void CheckMap(Register obj, 443 Register scratch, 444 Heap::RootListIndex index, 445 Label* fail, 446 bool is_heap_object); 447 448 449 // Load and check the instance type of an object for being a string. 450 // Loads the type into the second argument register. 451 // Returns a condition that will be enabled if the object was a string. 452 Condition IsObjectStringType(Register obj, 453 Register type) { 454 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset)); 455 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); 456 tst(type, Operand(kIsNotStringMask)); 457 ASSERT_EQ(0, kStringTag); 458 return eq; 459 } 460 461 462 inline void BranchOnSmi(Register value, Label* smi_label) { 463 tst(value, Operand(kSmiTagMask)); 464 b(eq, smi_label); 465 } 466 467 inline void BranchOnNotSmi(Register value, Label* not_smi_label) { 468 tst(value, Operand(kSmiTagMask)); 469 b(ne, not_smi_label); 470 } 471 472 // Generates code for reporting that an illegal operation has 473 // occurred. 474 void IllegalOperation(int num_arguments); 475 476 // Get the number of least significant bits from a register 477 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 478 479 // Uses VFP instructions to Convert a Smi to a double. 480 void IntegerToDoubleConversionWithVFP3(Register inReg, 481 Register outHighReg, 482 Register outLowReg); 483 484 // Load the value of a number object into a VFP double register. If the object 485 // is not a number a jump to the label not_number is performed and the VFP 486 // double register is unchanged. 487 void ObjectToDoubleVFPRegister( 488 Register object, 489 DwVfpRegister value, 490 Register scratch1, 491 Register scratch2, 492 Register heap_number_map, 493 SwVfpRegister scratch3, 494 Label* not_number, 495 ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS); 496 497 // Load the value of a smi object into a VFP double register. The register 498 // scratch1 can be the same register as smi in which case smi will hold the 499 // untagged value afterwards. 500 void SmiToDoubleVFPRegister(Register smi, 501 DwVfpRegister value, 502 Register scratch1, 503 SwVfpRegister scratch2); 504 505 // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz 506 // instruction. On pre-ARM5 hardware this routine gives the wrong answer 507 // for 0 (31 instead of 32). Source and scratch can be the same in which case 508 // the source is clobbered. Source and zeros can also be the same in which 509 // case scratch should be a different register. 510 void CountLeadingZeros(Register zeros, 511 Register source, 512 Register scratch); 513 514 // --------------------------------------------------------------------------- 515 // Runtime calls 516 517 // Call a code stub. 518 void CallStub(CodeStub* stub, Condition cond = al); 519 520 // Call a code stub. 521 void TailCallStub(CodeStub* stub, Condition cond = al); 522 523 // Return from a code stub after popping its arguments. 524 void StubReturn(int argc, Condition cond = al); 525 526 // Call a runtime routine. 527 void CallRuntime(Runtime::Function* f, int num_arguments); 528 529 // Convenience function: Same as above, but takes the fid instead. 530 void CallRuntime(Runtime::FunctionId fid, int num_arguments); 531 532 // Convenience function: call an external reference. 533 void CallExternalReference(const ExternalReference& ext, 534 int num_arguments); 535 536 // Tail call of a runtime routine (jump). 537 // Like JumpToExternalReference, but also takes care of passing the number 538 // of parameters. 539 void TailCallExternalReference(const ExternalReference& ext, 540 int num_arguments, 541 int result_size); 542 543 // Convenience function: tail call a runtime routine (jump). 544 void TailCallRuntime(Runtime::FunctionId fid, 545 int num_arguments, 546 int result_size); 547 548 // Before calling a C-function from generated code, align arguments on stack. 549 // After aligning the frame, non-register arguments must be stored in 550 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments 551 // are word sized. 552 // Some compilers/platforms require the stack to be aligned when calling 553 // C++ code. 554 // Needs a scratch register to do some arithmetic. This register will be 555 // trashed. 556 void PrepareCallCFunction(int num_arguments, Register scratch); 557 558 // Calls a C function and cleans up the space for arguments allocated 559 // by PrepareCallCFunction. The called function is not allowed to trigger a 560 // garbage collection, since that might move the code and invalidate the 561 // return address (unless this is somehow accounted for by the called 562 // function). 563 void CallCFunction(ExternalReference function, int num_arguments); 564 void CallCFunction(Register function, int num_arguments); 565 566 // Jump to a runtime routine. 567 void JumpToExternalReference(const ExternalReference& builtin); 568 569 // Invoke specified builtin JavaScript function. Adds an entry to 570 // the unresolved list if the name does not resolve. 571 void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags); 572 573 // Store the code object for the given builtin in the target register and 574 // setup the function in r1. 575 void GetBuiltinEntry(Register target, Builtins::JavaScript id); 576 577 Handle<Object> CodeObject() { return code_object_; } 578 579 580 // --------------------------------------------------------------------------- 581 // StatsCounter support 582 583 void SetCounter(StatsCounter* counter, int value, 584 Register scratch1, Register scratch2); 585 void IncrementCounter(StatsCounter* counter, int value, 586 Register scratch1, Register scratch2); 587 void DecrementCounter(StatsCounter* counter, int value, 588 Register scratch1, Register scratch2); 589 590 591 // --------------------------------------------------------------------------- 592 // Debugging 593 594 // Calls Abort(msg) if the condition cc is not satisfied. 595 // Use --debug_code to enable. 596 void Assert(Condition cc, const char* msg); 597 void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index); 598 599 // Like Assert(), but always enabled. 600 void Check(Condition cc, const char* msg); 601 602 // Print a message to stdout and abort execution. 603 void Abort(const char* msg); 604 605 // Verify restrictions about code generated in stubs. 606 void set_generating_stub(bool value) { generating_stub_ = value; } 607 bool generating_stub() { return generating_stub_; } 608 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } 609 bool allow_stub_calls() { return allow_stub_calls_; } 610 611 // --------------------------------------------------------------------------- 612 // Smi utilities 613 614 // Jump if either of the registers contain a non-smi. 615 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 616 // Jump if either of the registers contain a smi. 617 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 618 619 // --------------------------------------------------------------------------- 620 // String utilities 621 622 // Checks if both objects are sequential ASCII strings and jumps to label 623 // if either is not. Assumes that neither object is a smi. 624 void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1, 625 Register object2, 626 Register scratch1, 627 Register scratch2, 628 Label* failure); 629 630 // Checks if both objects are sequential ASCII strings and jumps to label 631 // if either is not. 632 void JumpIfNotBothSequentialAsciiStrings(Register first, 633 Register second, 634 Register scratch1, 635 Register scratch2, 636 Label* not_flat_ascii_strings); 637 638 // Checks if both instance types are sequential ASCII strings and jumps to 639 // label if either is not. 640 void JumpIfBothInstanceTypesAreNotSequentialAscii( 641 Register first_object_instance_type, 642 Register second_object_instance_type, 643 Register scratch1, 644 Register scratch2, 645 Label* failure); 646 647 // Check if instance type is sequential ASCII string and jump to label if 648 // it is not. 649 void JumpIfInstanceTypeIsNotSequentialAscii(Register type, 650 Register scratch, 651 Label* failure); 652 653 654 private: 655 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 656 void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 657 658 // Helper functions for generating invokes. 659 void InvokePrologue(const ParameterCount& expected, 660 const ParameterCount& actual, 661 Handle<Code> code_constant, 662 Register code_reg, 663 Label* done, 664 InvokeFlag flag); 665 666 // Activation support. 667 void EnterFrame(StackFrame::Type type); 668 void LeaveFrame(StackFrame::Type type); 669 670 void InitializeNewString(Register string, 671 Register length, 672 Heap::RootListIndex map_index, 673 Register scratch1, 674 Register scratch2); 675 676 bool generating_stub_; 677 bool allow_stub_calls_; 678 // This handle will be patched with the code object on installation. 679 Handle<Object> code_object_; 680}; 681 682 683#ifdef ENABLE_DEBUGGER_SUPPORT 684// The code patcher is used to patch (typically) small parts of code e.g. for 685// debugging and other types of instrumentation. When using the code patcher 686// the exact number of bytes specified must be emitted. It is not legal to emit 687// relocation information. If any of these constraints are violated it causes 688// an assertion to fail. 689class CodePatcher { 690 public: 691 CodePatcher(byte* address, int instructions); 692 virtual ~CodePatcher(); 693 694 // Macro assembler to emit code. 695 MacroAssembler* masm() { return &masm_; } 696 697 // Emit an instruction directly. 698 void Emit(Instr x); 699 700 // Emit an address directly. 701 void Emit(Address addr); 702 703 private: 704 byte* address_; // The address of the code being patched. 705 int instructions_; // Number of instructions of the expected patch size. 706 int size_; // Number of bytes of the expected patch size. 707 MacroAssembler masm_; // Macro assembler used to generate the code. 708}; 709#endif // ENABLE_DEBUGGER_SUPPORT 710 711 712// ----------------------------------------------------------------------------- 713// Static helper functions. 714 715#ifdef GENERATED_CODE_COVERAGE 716#define CODE_COVERAGE_STRINGIFY(x) #x 717#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 718#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 719#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 720#else 721#define ACCESS_MASM(masm) masm-> 722#endif 723 724 725} } // namespace v8::internal 726 727#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_ 728