codegen-x64.h revision 7f4d5bd8c03935e2c0cd412e561b8fc5a6a880ae
1// Copyright 2010 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#ifndef V8_X64_CODEGEN_X64_H_ 29#define V8_X64_CODEGEN_X64_H_ 30 31#include "ast.h" 32#include "ic-inl.h" 33#include "jump-target-heavy.h" 34 35namespace v8 { 36namespace internal { 37 38// Forward declarations 39class CompilationInfo; 40class DeferredCode; 41class RegisterAllocator; 42class RegisterFile; 43 44enum InitState { CONST_INIT, NOT_CONST_INIT }; 45enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; 46 47 48// ------------------------------------------------------------------------- 49// Reference support 50 51// A reference is a C++ stack-allocated object that puts a 52// reference on the virtual frame. The reference may be consumed 53// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference. 54// When the lifetime (scope) of a valid reference ends, it must have 55// been consumed, and be in state UNLOADED. 56class Reference BASE_EMBEDDED { 57 public: 58 // The values of the types is important, see size(). 59 enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 }; 60 61 Reference(CodeGenerator* cgen, 62 Expression* expression, 63 bool persist_after_get = false); 64 ~Reference(); 65 66 Expression* expression() const { return expression_; } 67 Type type() const { return type_; } 68 void set_type(Type value) { 69 ASSERT_EQ(ILLEGAL, type_); 70 type_ = value; 71 } 72 73 void set_unloaded() { 74 ASSERT_NE(ILLEGAL, type_); 75 ASSERT_NE(UNLOADED, type_); 76 type_ = UNLOADED; 77 } 78 // The size the reference takes up on the stack. 79 int size() const { 80 return (type_ < SLOT) ? 0 : type_; 81 } 82 83 bool is_illegal() const { return type_ == ILLEGAL; } 84 bool is_slot() const { return type_ == SLOT; } 85 bool is_property() const { return type_ == NAMED || type_ == KEYED; } 86 bool is_unloaded() const { return type_ == UNLOADED; } 87 88 // Return the name. Only valid for named property references. 89 Handle<String> GetName(); 90 91 // Generate code to push the value of the reference on top of the 92 // expression stack. The reference is expected to be already on top of 93 // the expression stack, and it is consumed by the call unless the 94 // reference is for a compound assignment. 95 // If the reference is not consumed, it is left in place under its value. 96 void GetValue(); 97 98 // Like GetValue except that the slot is expected to be written to before 99 // being read from again. The value of the reference may be invalidated, 100 // causing subsequent attempts to read it to fail. 101 void TakeValue(); 102 103 // Generate code to store the value on top of the expression stack in the 104 // reference. The reference is expected to be immediately below the value 105 // on the expression stack. The value is stored in the location specified 106 // by the reference, and is left on top of the stack, after the reference 107 // is popped from beneath it (unloaded). 108 void SetValue(InitState init_state); 109 110 private: 111 CodeGenerator* cgen_; 112 Expression* expression_; 113 Type type_; 114 bool persist_after_get_; 115}; 116 117 118// ------------------------------------------------------------------------- 119// Control destinations. 120 121// A control destination encapsulates a pair of jump targets and a 122// flag indicating which one is the preferred fall-through. The 123// preferred fall-through must be unbound, the other may be already 124// bound (ie, a backward target). 125// 126// The true and false targets may be jumped to unconditionally or 127// control may split conditionally. Unconditional jumping and 128// splitting should be emitted in tail position (as the last thing 129// when compiling an expression) because they can cause either label 130// to be bound or the non-fall through to be jumped to leaving an 131// invalid virtual frame. 132// 133// The labels in the control destination can be extracted and 134// manipulated normally without affecting the state of the 135// destination. 136 137class ControlDestination BASE_EMBEDDED { 138 public: 139 ControlDestination(JumpTarget* true_target, 140 JumpTarget* false_target, 141 bool true_is_fall_through) 142 : true_target_(true_target), 143 false_target_(false_target), 144 true_is_fall_through_(true_is_fall_through), 145 is_used_(false) { 146 ASSERT(true_is_fall_through ? !true_target->is_bound() 147 : !false_target->is_bound()); 148 } 149 150 // Accessors for the jump targets. Directly jumping or branching to 151 // or binding the targets will not update the destination's state. 152 JumpTarget* true_target() const { return true_target_; } 153 JumpTarget* false_target() const { return false_target_; } 154 155 // True if the the destination has been jumped to unconditionally or 156 // control has been split to both targets. This predicate does not 157 // test whether the targets have been extracted and manipulated as 158 // raw jump targets. 159 bool is_used() const { return is_used_; } 160 161 // True if the destination is used and the true target (respectively 162 // false target) was the fall through. If the target is backward, 163 // "fall through" included jumping unconditionally to it. 164 bool true_was_fall_through() const { 165 return is_used_ && true_is_fall_through_; 166 } 167 168 bool false_was_fall_through() const { 169 return is_used_ && !true_is_fall_through_; 170 } 171 172 // Emit a branch to one of the true or false targets, and bind the 173 // other target. Because this binds the fall-through target, it 174 // should be emitted in tail position (as the last thing when 175 // compiling an expression). 176 void Split(Condition cc) { 177 ASSERT(!is_used_); 178 if (true_is_fall_through_) { 179 false_target_->Branch(NegateCondition(cc)); 180 true_target_->Bind(); 181 } else { 182 true_target_->Branch(cc); 183 false_target_->Bind(); 184 } 185 is_used_ = true; 186 } 187 188 // Emit an unconditional jump in tail position, to the true target 189 // (if the argument is true) or the false target. The "jump" will 190 // actually bind the jump target if it is forward, jump to it if it 191 // is backward. 192 void Goto(bool where) { 193 ASSERT(!is_used_); 194 JumpTarget* target = where ? true_target_ : false_target_; 195 if (target->is_bound()) { 196 target->Jump(); 197 } else { 198 target->Bind(); 199 } 200 is_used_ = true; 201 true_is_fall_through_ = where; 202 } 203 204 // Mark this jump target as used as if Goto had been called, but 205 // without generating a jump or binding a label (the control effect 206 // should have already happened). This is used when the left 207 // subexpression of the short-circuit boolean operators are 208 // compiled. 209 void Use(bool where) { 210 ASSERT(!is_used_); 211 ASSERT((where ? true_target_ : false_target_)->is_bound()); 212 is_used_ = true; 213 true_is_fall_through_ = where; 214 } 215 216 // Swap the true and false targets but keep the same actual label as 217 // the fall through. This is used when compiling negated 218 // expressions, where we want to swap the targets but preserve the 219 // state. 220 void Invert() { 221 JumpTarget* temp_target = true_target_; 222 true_target_ = false_target_; 223 false_target_ = temp_target; 224 225 true_is_fall_through_ = !true_is_fall_through_; 226 } 227 228 private: 229 // True and false jump targets. 230 JumpTarget* true_target_; 231 JumpTarget* false_target_; 232 233 // Before using the destination: true if the true target is the 234 // preferred fall through, false if the false target is. After 235 // using the destination: true if the true target was actually used 236 // as the fall through, false if the false target was. 237 bool true_is_fall_through_; 238 239 // True if the Split or Goto functions have been called. 240 bool is_used_; 241}; 242 243 244// ------------------------------------------------------------------------- 245// Code generation state 246 247// The state is passed down the AST by the code generator (and back up, in 248// the form of the state of the jump target pair). It is threaded through 249// the call stack. Constructing a state implicitly pushes it on the owning 250// code generator's stack of states, and destroying one implicitly pops it. 251// 252// The code generator state is only used for expressions, so statements have 253// the initial state. 254 255class CodeGenState BASE_EMBEDDED { 256 public: 257 // Create an initial code generator state. Destroying the initial state 258 // leaves the code generator with a NULL state. 259 explicit CodeGenState(CodeGenerator* owner); 260 261 // Create a code generator state based on a code generator's current 262 // state. The new state has its own control destination. 263 CodeGenState(CodeGenerator* owner, ControlDestination* destination); 264 265 // Destroy a code generator state and restore the owning code generator's 266 // previous state. 267 ~CodeGenState(); 268 269 // Accessors for the state. 270 ControlDestination* destination() const { return destination_; } 271 272 private: 273 // The owning code generator. 274 CodeGenerator* owner_; 275 276 // A control destination in case the expression has a control-flow 277 // effect. 278 ControlDestination* destination_; 279 280 // The previous state of the owning code generator, restored when 281 // this state is destroyed. 282 CodeGenState* previous_; 283}; 284 285 286// ------------------------------------------------------------------------- 287// Arguments allocation mode 288 289enum ArgumentsAllocationMode { 290 NO_ARGUMENTS_ALLOCATION, 291 EAGER_ARGUMENTS_ALLOCATION, 292 LAZY_ARGUMENTS_ALLOCATION 293}; 294 295 296// ------------------------------------------------------------------------- 297// CodeGenerator 298 299class CodeGenerator: public AstVisitor { 300 public: 301 // Takes a function literal, generates code for it. This function should only 302 // be called by compiler.cc. 303 static Handle<Code> MakeCode(CompilationInfo* info); 304 305 // Printing of AST, etc. as requested by flags. 306 static void MakeCodePrologue(CompilationInfo* info); 307 308 // Allocate and install the code. 309 static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm, 310 Code::Flags flags, 311 CompilationInfo* info); 312 313#ifdef ENABLE_LOGGING_AND_PROFILING 314 static bool ShouldGenerateLog(Expression* type); 315#endif 316 317 static bool RecordPositions(MacroAssembler* masm, 318 int pos, 319 bool right_here = false); 320 321 // Accessors 322 MacroAssembler* masm() { return masm_; } 323 VirtualFrame* frame() const { return frame_; } 324 inline Handle<Script> script(); 325 326 bool has_valid_frame() const { return frame_ != NULL; } 327 328 // Set the virtual frame to be new_frame, with non-frame register 329 // reference counts given by non_frame_registers. The non-frame 330 // register reference counts of the old frame are returned in 331 // non_frame_registers. 332 void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers); 333 334 void DeleteFrame(); 335 336 RegisterAllocator* allocator() const { return allocator_; } 337 338 CodeGenState* state() { return state_; } 339 void set_state(CodeGenState* state) { state_ = state; } 340 341 void AddDeferred(DeferredCode* code) { deferred_.Add(code); } 342 343 bool in_spilled_code() const { return in_spilled_code_; } 344 void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; } 345 346 // If the name is an inline runtime function call return the number of 347 // expected arguments. Otherwise return -1. 348 static int InlineRuntimeCallArgumentsCount(Handle<String> name); 349 350 private: 351 // Construction/Destruction 352 explicit CodeGenerator(MacroAssembler* masm); 353 354 // Accessors 355 inline bool is_eval(); 356 inline Scope* scope(); 357 358 // Generating deferred code. 359 void ProcessDeferred(); 360 361 // State 362 ControlDestination* destination() const { return state_->destination(); } 363 364 // Track loop nesting level. 365 int loop_nesting() const { return loop_nesting_; } 366 void IncrementLoopNesting() { loop_nesting_++; } 367 void DecrementLoopNesting() { loop_nesting_--; } 368 369 370 // Node visitors. 371 void VisitStatements(ZoneList<Statement*>* statements); 372 373#define DEF_VISIT(type) \ 374 void Visit##type(type* node); 375 AST_NODE_LIST(DEF_VISIT) 376#undef DEF_VISIT 377 378 // Visit a statement and then spill the virtual frame if control flow can 379 // reach the end of the statement (ie, it does not exit via break, 380 // continue, return, or throw). This function is used temporarily while 381 // the code generator is being transformed. 382 void VisitAndSpill(Statement* statement); 383 384 // Visit a list of statements and then spill the virtual frame if control 385 // flow can reach the end of the list. 386 void VisitStatementsAndSpill(ZoneList<Statement*>* statements); 387 388 // Main code generation function 389 void Generate(CompilationInfo* info); 390 391 // Generate the return sequence code. Should be called no more than 392 // once per compiled function, immediately after binding the return 393 // target (which can not be done more than once). 394 void GenerateReturnSequence(Result* return_value); 395 396 // Returns the arguments allocation mode. 397 ArgumentsAllocationMode ArgumentsMode(); 398 399 // Store the arguments object and allocate it if necessary. 400 Result StoreArgumentsObject(bool initial); 401 402 // The following are used by class Reference. 403 void LoadReference(Reference* ref); 404 void UnloadReference(Reference* ref); 405 406 static Operand ContextOperand(Register context, int index) { 407 return Operand(context, Context::SlotOffset(index)); 408 } 409 410 Operand SlotOperand(Slot* slot, Register tmp); 411 412 Operand ContextSlotOperandCheckExtensions(Slot* slot, 413 Result tmp, 414 JumpTarget* slow); 415 416 // Expressions 417 static Operand GlobalObject() { 418 return ContextOperand(rsi, Context::GLOBAL_INDEX); 419 } 420 421 void LoadCondition(Expression* x, 422 ControlDestination* destination, 423 bool force_control); 424 void Load(Expression* expr); 425 void LoadGlobal(); 426 void LoadGlobalReceiver(); 427 428 // Generate code to push the value of an expression on top of the frame 429 // and then spill the frame fully to memory. This function is used 430 // temporarily while the code generator is being transformed. 431 void LoadAndSpill(Expression* expression); 432 433 // Read a value from a slot and leave it on top of the expression stack. 434 void LoadFromSlot(Slot* slot, TypeofState typeof_state); 435 void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state); 436 Result LoadFromGlobalSlotCheckExtensions(Slot* slot, 437 TypeofState typeof_state, 438 JumpTarget* slow); 439 440 // Support for loading from local/global variables and arguments 441 // whose location is known unless they are shadowed by 442 // eval-introduced bindings. Generates no code for unsupported slot 443 // types and therefore expects to fall through to the slow jump target. 444 void EmitDynamicLoadFromSlotFastCase(Slot* slot, 445 TypeofState typeof_state, 446 Result* result, 447 JumpTarget* slow, 448 JumpTarget* done); 449 450 // Store the value on top of the expression stack into a slot, leaving the 451 // value in place. 452 void StoreToSlot(Slot* slot, InitState init_state); 453 454 // Receiver is passed on the frame and not consumed. 455 Result EmitNamedLoad(Handle<String> name, bool is_contextual); 456 457 // Load a property of an object, returning it in a Result. 458 // The object and the property name are passed on the stack, and 459 // not changed. 460 Result EmitKeyedLoad(); 461 462 // Special code for typeof expressions: Unfortunately, we must 463 // be careful when loading the expression in 'typeof' 464 // expressions. We are not allowed to throw reference errors for 465 // non-existing properties of the global object, so we must make it 466 // look like an explicit property access, instead of an access 467 // through the context chain. 468 void LoadTypeofExpression(Expression* x); 469 470 // Translate the value on top of the frame into control flow to the 471 // control destination. 472 void ToBoolean(ControlDestination* destination); 473 474 // Generate code that computes a shortcutting logical operation. 475 void GenerateLogicalBooleanOperation(BinaryOperation* node); 476 477 void GenericBinaryOperation(BinaryOperation* expr, 478 OverwriteMode overwrite_mode); 479 480 // Emits code sequence that jumps to deferred code if the input 481 // is not a smi. Cannot be in MacroAssembler because it takes 482 // advantage of TypeInfo to skip unneeded checks. 483 void JumpIfNotSmiUsingTypeInfo(Register reg, 484 TypeInfo type, 485 DeferredCode* deferred); 486 487 // Emits code sequence that jumps to deferred code if the inputs 488 // are not both smis. Cannot be in MacroAssembler because it takes 489 // advantage of TypeInfo to skip unneeded checks. 490 void JumpIfNotBothSmiUsingTypeInfo(Register left, 491 Register right, 492 TypeInfo left_info, 493 TypeInfo right_info, 494 DeferredCode* deferred); 495 496 // If possible, combine two constant smi values using op to produce 497 // a smi result, and push it on the virtual frame, all at compile time. 498 // Returns true if it succeeds. Otherwise it has no effect. 499 bool FoldConstantSmis(Token::Value op, int left, int right); 500 501 // Emit code to perform a binary operation on a constant 502 // smi and a likely smi. Consumes the Result *operand. 503 Result ConstantSmiBinaryOperation(BinaryOperation* expr, 504 Result* operand, 505 Handle<Object> constant_operand, 506 bool reversed, 507 OverwriteMode overwrite_mode); 508 509 // Emit code to perform a binary operation on two likely smis. 510 // The code to handle smi arguments is produced inline. 511 // Consumes the Results *left and *right. 512 Result LikelySmiBinaryOperation(BinaryOperation* expr, 513 Result* left, 514 Result* right, 515 OverwriteMode overwrite_mode); 516 517 void Comparison(AstNode* node, 518 Condition cc, 519 bool strict, 520 ControlDestination* destination); 521 void GenerateInlineNumberComparison(Result* left_side, 522 Result* right_side, 523 Condition cc, 524 ControlDestination* dest); 525 526 // To prevent long attacker-controlled byte sequences, integer constants 527 // from the JavaScript source are loaded in two parts if they are larger 528 // than 16 bits. 529 static const int kMaxSmiInlinedBits = 16; 530 bool IsUnsafeSmi(Handle<Object> value); 531 // Load an integer constant x into a register target using 532 // at most 16 bits of user-controlled data per assembly operation. 533 void LoadUnsafeSmi(Register target, Handle<Object> value); 534 535 void CallWithArguments(ZoneList<Expression*>* arguments, 536 CallFunctionFlags flags, 537 int position); 538 539 // An optimized implementation of expressions of the form 540 // x.apply(y, arguments). We call x the applicand and y the receiver. 541 // The optimization avoids allocating an arguments object if possible. 542 void CallApplyLazy(Expression* applicand, 543 Expression* receiver, 544 VariableProxy* arguments, 545 int position); 546 547 void CheckStack(); 548 549 struct InlineRuntimeLUT { 550 void (CodeGenerator::*method)(ZoneList<Expression*>*); 551 const char* name; 552 int nargs; 553 }; 554 static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name); 555 bool CheckForInlineRuntimeCall(CallRuntime* node); 556 static bool PatchInlineRuntimeEntry(Handle<String> name, 557 const InlineRuntimeLUT& new_entry, 558 InlineRuntimeLUT* old_entry); 559 void ProcessDeclarations(ZoneList<Declaration*>* declarations); 560 561 static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop); 562 563 static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop); 564 565 // Declare global variables and functions in the given array of 566 // name/value pairs. 567 void DeclareGlobals(Handle<FixedArray> pairs); 568 569 // Instantiate the function based on the shared function info. 570 void InstantiateFunction(Handle<SharedFunctionInfo> function_info); 571 572 // Support for type checks. 573 void GenerateIsSmi(ZoneList<Expression*>* args); 574 void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args); 575 void GenerateIsArray(ZoneList<Expression*>* args); 576 void GenerateIsRegExp(ZoneList<Expression*>* args); 577 void GenerateIsObject(ZoneList<Expression*>* args); 578 void GenerateIsFunction(ZoneList<Expression*>* args); 579 void GenerateIsUndetectableObject(ZoneList<Expression*>* args); 580 581 // Support for construct call checks. 582 void GenerateIsConstructCall(ZoneList<Expression*>* args); 583 584 // Support for arguments.length and arguments[?]. 585 void GenerateArgumentsLength(ZoneList<Expression*>* args); 586 void GenerateArguments(ZoneList<Expression*>* args); 587 588 // Support for accessing the class and value fields of an object. 589 void GenerateClassOf(ZoneList<Expression*>* args); 590 void GenerateValueOf(ZoneList<Expression*>* args); 591 void GenerateSetValueOf(ZoneList<Expression*>* args); 592 593 // Fast support for charCodeAt(n). 594 void GenerateStringCharCodeAt(ZoneList<Expression*>* args); 595 596 // Fast support for string.charAt(n) and string[n]. 597 void GenerateStringCharFromCode(ZoneList<Expression*>* args); 598 599 // Fast support for string.charAt(n) and string[n]. 600 void GenerateStringCharAt(ZoneList<Expression*>* args); 601 602 // Fast support for object equality testing. 603 void GenerateObjectEquals(ZoneList<Expression*>* args); 604 605 void GenerateLog(ZoneList<Expression*>* args); 606 607 void GenerateGetFramePointer(ZoneList<Expression*>* args); 608 609 // Fast support for Math.random(). 610 void GenerateRandomHeapNumber(ZoneList<Expression*>* args); 611 612 // Fast support for StringAdd. 613 void GenerateStringAdd(ZoneList<Expression*>* args); 614 615 // Fast support for SubString. 616 void GenerateSubString(ZoneList<Expression*>* args); 617 618 // Fast support for StringCompare. 619 void GenerateStringCompare(ZoneList<Expression*>* args); 620 621 // Support for direct calls from JavaScript to native RegExp code. 622 void GenerateRegExpExec(ZoneList<Expression*>* args); 623 624 void GenerateRegExpConstructResult(ZoneList<Expression*>* args); 625 626 // Support for fast native caches. 627 void GenerateGetFromCache(ZoneList<Expression*>* args); 628 629 // Fast support for number to string. 630 void GenerateNumberToString(ZoneList<Expression*>* args); 631 632 // Fast swapping of elements. Takes three expressions, the object and two 633 // indices. This should only be used if the indices are known to be 634 // non-negative and within bounds of the elements array at the call site. 635 void GenerateSwapElements(ZoneList<Expression*>* args); 636 637 // Fast call for custom callbacks. 638 void GenerateCallFunction(ZoneList<Expression*>* args); 639 640 // Fast call to math functions. 641 void GenerateMathPow(ZoneList<Expression*>* args); 642 void GenerateMathSin(ZoneList<Expression*>* args); 643 void GenerateMathCos(ZoneList<Expression*>* args); 644 void GenerateMathSqrt(ZoneList<Expression*>* args); 645 646// Simple condition analysis. 647 enum ConditionAnalysis { 648 ALWAYS_TRUE, 649 ALWAYS_FALSE, 650 DONT_KNOW 651 }; 652 ConditionAnalysis AnalyzeCondition(Expression* cond); 653 654 // Methods used to indicate which source code is generated for. Source 655 // positions are collected by the assembler and emitted with the relocation 656 // information. 657 void CodeForFunctionPosition(FunctionLiteral* fun); 658 void CodeForReturnPosition(FunctionLiteral* fun); 659 void CodeForStatementPosition(Statement* node); 660 void CodeForDoWhileConditionPosition(DoWhileStatement* stmt); 661 void CodeForSourcePosition(int pos); 662 663 void SetTypeForStackSlot(Slot* slot, TypeInfo info); 664 665#ifdef DEBUG 666 // True if the registers are valid for entry to a block. There should 667 // be no frame-external references to (non-reserved) registers. 668 bool HasValidEntryRegisters(); 669#endif 670 671 ZoneList<DeferredCode*> deferred_; 672 673 // Assembler 674 MacroAssembler* masm_; // to generate code 675 676 CompilationInfo* info_; 677 678 // Code generation state 679 VirtualFrame* frame_; 680 RegisterAllocator* allocator_; 681 CodeGenState* state_; 682 int loop_nesting_; 683 684 // Jump targets. 685 // The target of the return from the function. 686 BreakTarget function_return_; 687 688 // True if the function return is shadowed (ie, jumping to the target 689 // function_return_ does not jump to the true function return, but rather 690 // to some unlinking code). 691 bool function_return_is_shadowed_; 692 693 // True when we are in code that expects the virtual frame to be fully 694 // spilled. Some virtual frame function are disabled in DEBUG builds when 695 // called from spilled code, because they do not leave the virtual frame 696 // in a spilled state. 697 bool in_spilled_code_; 698 699 static InlineRuntimeLUT kInlineRuntimeLUT[]; 700 701 friend class VirtualFrame; 702 friend class JumpTarget; 703 friend class Reference; 704 friend class Result; 705 friend class FastCodeGenerator; 706 friend class FullCodeGenerator; 707 friend class FullCodeGenSyntaxChecker; 708 709 friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc 710 711 DISALLOW_COPY_AND_ASSIGN(CodeGenerator); 712}; 713 714 715// Compute a transcendental math function natively, or call the 716// TranscendentalCache runtime function. 717class TranscendentalCacheStub: public CodeStub { 718 public: 719 explicit TranscendentalCacheStub(TranscendentalCache::Type type) 720 : type_(type) {} 721 void Generate(MacroAssembler* masm); 722 private: 723 TranscendentalCache::Type type_; 724 Major MajorKey() { return TranscendentalCache; } 725 int MinorKey() { return type_; } 726 Runtime::FunctionId RuntimeFunction(); 727 void GenerateOperation(MacroAssembler* masm, Label* on_nan_result); 728}; 729 730 731// Flag that indicates how to generate code for the stub GenericBinaryOpStub. 732enum GenericBinaryFlags { 733 NO_GENERIC_BINARY_FLAGS = 0, 734 NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. 735}; 736 737 738class GenericBinaryOpStub: public CodeStub { 739 public: 740 GenericBinaryOpStub(Token::Value op, 741 OverwriteMode mode, 742 GenericBinaryFlags flags, 743 TypeInfo operands_type = TypeInfo::Unknown()) 744 : op_(op), 745 mode_(mode), 746 flags_(flags), 747 args_in_registers_(false), 748 args_reversed_(false), 749 static_operands_type_(operands_type), 750 runtime_operands_type_(BinaryOpIC::DEFAULT), 751 name_(NULL) { 752 ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); 753 } 754 755 GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) 756 : op_(OpBits::decode(key)), 757 mode_(ModeBits::decode(key)), 758 flags_(FlagBits::decode(key)), 759 args_in_registers_(ArgsInRegistersBits::decode(key)), 760 args_reversed_(ArgsReversedBits::decode(key)), 761 static_operands_type_(TypeInfo::ExpandedRepresentation( 762 StaticTypeInfoBits::decode(key))), 763 runtime_operands_type_(type_info), 764 name_(NULL) { 765 } 766 767 // Generate code to call the stub with the supplied arguments. This will add 768 // code at the call site to prepare arguments either in registers or on the 769 // stack together with the actual call. 770 void GenerateCall(MacroAssembler* masm, Register left, Register right); 771 void GenerateCall(MacroAssembler* masm, Register left, Smi* right); 772 void GenerateCall(MacroAssembler* masm, Smi* left, Register right); 773 774 Result GenerateCall(MacroAssembler* masm, 775 VirtualFrame* frame, 776 Result* left, 777 Result* right); 778 779 private: 780 Token::Value op_; 781 OverwriteMode mode_; 782 GenericBinaryFlags flags_; 783 bool args_in_registers_; // Arguments passed in registers not on the stack. 784 bool args_reversed_; // Left and right argument are swapped. 785 786 // Number type information of operands, determined by code generator. 787 TypeInfo static_operands_type_; 788 789 // Operand type information determined at runtime. 790 BinaryOpIC::TypeInfo runtime_operands_type_; 791 792 char* name_; 793 794 const char* GetName(); 795 796#ifdef DEBUG 797 void Print() { 798 PrintF("GenericBinaryOpStub %d (op %s), " 799 "(mode %d, flags %d, registers %d, reversed %d, only_numbers %s)\n", 800 MinorKey(), 801 Token::String(op_), 802 static_cast<int>(mode_), 803 static_cast<int>(flags_), 804 static_cast<int>(args_in_registers_), 805 static_cast<int>(args_reversed_), 806 static_operands_type_.ToString()); 807 } 808#endif 809 810 // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM. 811 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; 812 class OpBits: public BitField<Token::Value, 2, 7> {}; 813 class ArgsInRegistersBits: public BitField<bool, 9, 1> {}; 814 class ArgsReversedBits: public BitField<bool, 10, 1> {}; 815 class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {}; 816 class StaticTypeInfoBits: public BitField<int, 12, 3> {}; 817 class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {}; 818 819 Major MajorKey() { return GenericBinaryOp; } 820 int MinorKey() { 821 // Encode the parameters in a unique 18 bit value. 822 return OpBits::encode(op_) 823 | ModeBits::encode(mode_) 824 | FlagBits::encode(flags_) 825 | ArgsInRegistersBits::encode(args_in_registers_) 826 | ArgsReversedBits::encode(args_reversed_) 827 | StaticTypeInfoBits::encode( 828 static_operands_type_.ThreeBitRepresentation()) 829 | RuntimeTypeInfoBits::encode(runtime_operands_type_); 830 } 831 832 void Generate(MacroAssembler* masm); 833 void GenerateSmiCode(MacroAssembler* masm, Label* slow); 834 void GenerateLoadArguments(MacroAssembler* masm); 835 void GenerateReturn(MacroAssembler* masm); 836 void GenerateRegisterArgsPush(MacroAssembler* masm); 837 void GenerateTypeTransition(MacroAssembler* masm); 838 839 bool ArgsInRegistersSupported() { 840 return (op_ == Token::ADD) || (op_ == Token::SUB) 841 || (op_ == Token::MUL) || (op_ == Token::DIV); 842 } 843 bool IsOperationCommutative() { 844 return (op_ == Token::ADD) || (op_ == Token::MUL); 845 } 846 847 void SetArgsInRegisters() { args_in_registers_ = true; } 848 void SetArgsReversed() { args_reversed_ = true; } 849 bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } 850 bool HasArgsInRegisters() { return args_in_registers_; } 851 bool HasArgsReversed() { return args_reversed_; } 852 853 bool ShouldGenerateSmiCode() { 854 return HasSmiCodeInStub() && 855 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && 856 runtime_operands_type_ != BinaryOpIC::STRINGS; 857 } 858 859 bool ShouldGenerateFPCode() { 860 return runtime_operands_type_ != BinaryOpIC::STRINGS; 861 } 862 863 virtual int GetCodeKind() { return Code::BINARY_OP_IC; } 864 865 virtual InlineCacheState GetICState() { 866 return BinaryOpIC::ToState(runtime_operands_type_); 867 } 868}; 869 870class StringHelper : public AllStatic { 871 public: 872 // Generate code for copying characters using a simple loop. This should only 873 // be used in places where the number of characters is small and the 874 // additional setup and checking in GenerateCopyCharactersREP adds too much 875 // overhead. Copying of overlapping regions is not supported. 876 static void GenerateCopyCharacters(MacroAssembler* masm, 877 Register dest, 878 Register src, 879 Register count, 880 bool ascii); 881 882 // Generate code for copying characters using the rep movs instruction. 883 // Copies rcx characters from rsi to rdi. Copying of overlapping regions is 884 // not supported. 885 static void GenerateCopyCharactersREP(MacroAssembler* masm, 886 Register dest, // Must be rdi. 887 Register src, // Must be rsi. 888 Register count, // Must be rcx. 889 bool ascii); 890 891 892 // Probe the symbol table for a two character string. If the string is 893 // not found by probing a jump to the label not_found is performed. This jump 894 // does not guarantee that the string is not in the symbol table. If the 895 // string is found the code falls through with the string in register rax. 896 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, 897 Register c1, 898 Register c2, 899 Register scratch1, 900 Register scratch2, 901 Register scratch3, 902 Register scratch4, 903 Label* not_found); 904 905 // Generate string hash. 906 static void GenerateHashInit(MacroAssembler* masm, 907 Register hash, 908 Register character, 909 Register scratch); 910 static void GenerateHashAddCharacter(MacroAssembler* masm, 911 Register hash, 912 Register character, 913 Register scratch); 914 static void GenerateHashGetHash(MacroAssembler* masm, 915 Register hash, 916 Register scratch); 917 918 private: 919 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); 920}; 921 922 923// Flag that indicates how to generate code for the stub StringAddStub. 924enum StringAddFlags { 925 NO_STRING_ADD_FLAGS = 0, 926 NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. 927}; 928 929 930class StringAddStub: public CodeStub { 931 public: 932 explicit StringAddStub(StringAddFlags flags) { 933 string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); 934 } 935 936 private: 937 Major MajorKey() { return StringAdd; } 938 int MinorKey() { return string_check_ ? 0 : 1; } 939 940 void Generate(MacroAssembler* masm); 941 942 // Should the stub check whether arguments are strings? 943 bool string_check_; 944}; 945 946 947class SubStringStub: public CodeStub { 948 public: 949 SubStringStub() {} 950 951 private: 952 Major MajorKey() { return SubString; } 953 int MinorKey() { return 0; } 954 955 void Generate(MacroAssembler* masm); 956}; 957 958 959class StringCompareStub: public CodeStub { 960 public: 961 explicit StringCompareStub() {} 962 963 // Compare two flat ascii strings and returns result in rax after popping two 964 // arguments from the stack. 965 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, 966 Register left, 967 Register right, 968 Register scratch1, 969 Register scratch2, 970 Register scratch3, 971 Register scratch4); 972 973 private: 974 Major MajorKey() { return StringCompare; } 975 int MinorKey() { return 0; } 976 977 void Generate(MacroAssembler* masm); 978}; 979 980 981class NumberToStringStub: public CodeStub { 982 public: 983 NumberToStringStub() { } 984 985 // Generate code to do a lookup in the number string cache. If the number in 986 // the register object is found in the cache the generated code falls through 987 // with the result in the result register. The object and the result register 988 // can be the same. If the number is not found in the cache the code jumps to 989 // the label not_found with only the content of register object unchanged. 990 static void GenerateLookupNumberStringCache(MacroAssembler* masm, 991 Register object, 992 Register result, 993 Register scratch1, 994 Register scratch2, 995 bool object_is_smi, 996 Label* not_found); 997 998 private: 999 static void GenerateConvertHashCodeToIndex(MacroAssembler* masm, 1000 Register hash, 1001 Register mask); 1002 1003 Major MajorKey() { return NumberToString; } 1004 int MinorKey() { return 0; } 1005 1006 void Generate(MacroAssembler* masm); 1007 1008 const char* GetName() { return "NumberToStringStub"; } 1009 1010#ifdef DEBUG 1011 void Print() { 1012 PrintF("NumberToStringStub\n"); 1013 } 1014#endif 1015}; 1016 1017 1018class RecordWriteStub : public CodeStub { 1019 public: 1020 RecordWriteStub(Register object, Register addr, Register scratch) 1021 : object_(object), addr_(addr), scratch_(scratch) { } 1022 1023 void Generate(MacroAssembler* masm); 1024 1025 private: 1026 Register object_; 1027 Register addr_; 1028 Register scratch_; 1029 1030#ifdef DEBUG 1031 void Print() { 1032 PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n", 1033 object_.code(), addr_.code(), scratch_.code()); 1034 } 1035#endif 1036 1037 // Minor key encoding in 12 bits. 4 bits for each of the three 1038 // registers (object, address and scratch) OOOOAAAASSSS. 1039 class ScratchBits : public BitField<uint32_t, 0, 4> {}; 1040 class AddressBits : public BitField<uint32_t, 4, 4> {}; 1041 class ObjectBits : public BitField<uint32_t, 8, 4> {}; 1042 1043 Major MajorKey() { return RecordWrite; } 1044 1045 int MinorKey() { 1046 // Encode the registers. 1047 return ObjectBits::encode(object_.code()) | 1048 AddressBits::encode(addr_.code()) | 1049 ScratchBits::encode(scratch_.code()); 1050 } 1051}; 1052 1053 1054} } // namespace v8::internal 1055 1056#endif // V8_X64_CODEGEN_X64_H_ 1057