1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#include "src/code-factory.h"
8#include "src/code-stubs.h"
9#include "src/hydrogen-osr.h"
10#include "src/ic/ic.h"
11#include "src/ic/stub-cache.h"
12#include "src/mips64/lithium-codegen-mips64.h"
13#include "src/mips64/lithium-gap-resolver-mips64.h"
14
15namespace v8 {
16namespace internal {
17
18
19class SafepointGenerator FINAL  : public CallWrapper {
20 public:
21  SafepointGenerator(LCodeGen* codegen,
22                     LPointerMap* pointers,
23                     Safepoint::DeoptMode mode)
24      : codegen_(codegen),
25        pointers_(pointers),
26        deopt_mode_(mode) { }
27  virtual ~SafepointGenerator() {}
28
29  virtual void BeforeCall(int call_size) const OVERRIDE {}
30
31  virtual void AfterCall() const OVERRIDE {
32    codegen_->RecordSafepoint(pointers_, deopt_mode_);
33  }
34
35 private:
36  LCodeGen* codegen_;
37  LPointerMap* pointers_;
38  Safepoint::DeoptMode deopt_mode_;
39};
40
41
42#define __ masm()->
43
44bool LCodeGen::GenerateCode() {
45  LPhase phase("Z_Code generation", chunk());
46  DCHECK(is_unused());
47  status_ = GENERATING;
48
49  // Open a frame scope to indicate that there is a frame on the stack.  The
50  // NONE indicates that the scope shouldn't actually generate code to set up
51  // the frame (that is done in GeneratePrologue).
52  FrameScope frame_scope(masm_, StackFrame::NONE);
53
54  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
55         GenerateJumpTable() && GenerateSafepointTable();
56}
57
58
59void LCodeGen::FinishCode(Handle<Code> code) {
60  DCHECK(is_done());
61  code->set_stack_slots(GetStackSlotCount());
62  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
63  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
64  PopulateDeoptimizationData(code);
65}
66
67
68void LCodeGen::SaveCallerDoubles() {
69  DCHECK(info()->saves_caller_doubles());
70  DCHECK(NeedsEagerFrame());
71  Comment(";;; Save clobbered callee double registers");
72  int count = 0;
73  BitVector* doubles = chunk()->allocated_double_registers();
74  BitVector::Iterator save_iterator(doubles);
75  while (!save_iterator.Done()) {
76    __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
77            MemOperand(sp, count * kDoubleSize));
78    save_iterator.Advance();
79    count++;
80  }
81}
82
83
84void LCodeGen::RestoreCallerDoubles() {
85  DCHECK(info()->saves_caller_doubles());
86  DCHECK(NeedsEagerFrame());
87  Comment(";;; Restore clobbered callee double registers");
88  BitVector* doubles = chunk()->allocated_double_registers();
89  BitVector::Iterator save_iterator(doubles);
90  int count = 0;
91  while (!save_iterator.Done()) {
92    __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
93            MemOperand(sp, count * kDoubleSize));
94    save_iterator.Advance();
95    count++;
96  }
97}
98
99
100bool LCodeGen::GeneratePrologue() {
101  DCHECK(is_generating());
102
103  if (info()->IsOptimizing()) {
104    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
105
106#ifdef DEBUG
107    if (strlen(FLAG_stop_at) > 0 &&
108        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
109      __ stop("stop_at");
110    }
111#endif
112
113    // a1: Callee's JS function.
114    // cp: Callee's context.
115    // fp: Caller's frame pointer.
116    // lr: Caller's pc.
117
118    // Sloppy mode functions and builtins need to replace the receiver with the
119    // global proxy when called as functions (without an explicit receiver
120    // object).
121    if (info_->this_has_uses() &&
122        info_->strict_mode() == SLOPPY &&
123        !info_->is_native()) {
124      Label ok;
125      int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
126      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
127      __ ld(a2, MemOperand(sp, receiver_offset));
128      __ Branch(&ok, ne, a2, Operand(at));
129
130      __ ld(a2, GlobalObjectOperand());
131      __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
132
133      __ sd(a2, MemOperand(sp, receiver_offset));
134
135      __ bind(&ok);
136    }
137  }
138
139  info()->set_prologue_offset(masm_->pc_offset());
140  if (NeedsEagerFrame()) {
141    if (info()->IsStub()) {
142      __ StubPrologue();
143    } else {
144      __ Prologue(info()->IsCodePreAgingActive());
145    }
146    frame_is_built_ = true;
147    info_->AddNoFrameRange(0, masm_->pc_offset());
148  }
149
150  // Reserve space for the stack slots needed by the code.
151  int slots = GetStackSlotCount();
152  if (slots > 0) {
153    if (FLAG_debug_code) {
154      __ Dsubu(sp,  sp, Operand(slots * kPointerSize));
155      __ Push(a0, a1);
156      __ Daddu(a0, sp, Operand(slots *  kPointerSize));
157      __ li(a1, Operand(kSlotsZapValue));
158      Label loop;
159      __ bind(&loop);
160      __ Dsubu(a0, a0, Operand(kPointerSize));
161      __ sd(a1, MemOperand(a0, 2 * kPointerSize));
162      __ Branch(&loop, ne, a0, Operand(sp));
163      __ Pop(a0, a1);
164    } else {
165      __ Dsubu(sp, sp, Operand(slots * kPointerSize));
166    }
167  }
168
169  if (info()->saves_caller_doubles()) {
170    SaveCallerDoubles();
171  }
172
173  // Possibly allocate a local context.
174  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175  if (heap_slots > 0) {
176    Comment(";;; Allocate local context");
177    bool need_write_barrier = true;
178    // Argument to NewContext is the function, which is in a1.
179    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180      FastNewContextStub stub(isolate(), heap_slots);
181      __ CallStub(&stub);
182      // Result of FastNewContextStub is always in new space.
183      need_write_barrier = false;
184    } else {
185      __ push(a1);
186      __ CallRuntime(Runtime::kNewFunctionContext, 1);
187    }
188    RecordSafepoint(Safepoint::kNoLazyDeopt);
189    // Context is returned in both v0. It replaces the context passed to us.
190    // It's saved in the stack and kept live in cp.
191    __ mov(cp, v0);
192    __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
193    // Copy any necessary parameters into the context.
194    int num_parameters = scope()->num_parameters();
195    for (int i = 0; i < num_parameters; i++) {
196      Variable* var = scope()->parameter(i);
197      if (var->IsContextSlot()) {
198        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
199            (num_parameters - 1 - i) * kPointerSize;
200        // Load parameter from stack.
201        __ ld(a0, MemOperand(fp, parameter_offset));
202        // Store it in the context.
203        MemOperand target = ContextOperand(cp, var->index());
204        __ sd(a0, target);
205        // Update the write barrier. This clobbers a3 and a0.
206        if (need_write_barrier) {
207          __ RecordWriteContextSlot(
208              cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
209        } else if (FLAG_debug_code) {
210          Label done;
211          __ JumpIfInNewSpace(cp, a0, &done);
212          __ Abort(kExpectedNewSpaceObject);
213          __ bind(&done);
214        }
215      }
216    }
217    Comment(";;; End allocate local context");
218  }
219
220  // Trace the call.
221  if (FLAG_trace && info()->IsOptimizing()) {
222    // We have not executed any compiled code yet, so cp still holds the
223    // incoming context.
224    __ CallRuntime(Runtime::kTraceEnter, 0);
225  }
226  return !is_aborted();
227}
228
229
230void LCodeGen::GenerateOsrPrologue() {
231  // Generate the OSR entry prologue at the first unknown OSR value, or if there
232  // are none, at the OSR entrypoint instruction.
233  if (osr_pc_offset_ >= 0) return;
234
235  osr_pc_offset_ = masm()->pc_offset();
236
237  // Adjust the frame size, subsuming the unoptimized frame into the
238  // optimized frame.
239  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
240  DCHECK(slots >= 0);
241  __ Dsubu(sp, sp, Operand(slots * kPointerSize));
242}
243
244
245void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
246  if (instr->IsCall()) {
247    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
248  }
249  if (!instr->IsLazyBailout() && !instr->IsGap()) {
250    safepoints_.BumpLastLazySafepointIndex();
251  }
252}
253
254
255bool LCodeGen::GenerateDeferredCode() {
256  DCHECK(is_generating());
257  if (deferred_.length() > 0) {
258    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
259      LDeferredCode* code = deferred_[i];
260
261      HValue* value =
262          instructions_->at(code->instruction_index())->hydrogen_value();
263      RecordAndWritePosition(
264          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
265
266      Comment(";;; <@%d,#%d> "
267              "-------------------- Deferred %s --------------------",
268              code->instruction_index(),
269              code->instr()->hydrogen_value()->id(),
270              code->instr()->Mnemonic());
271      __ bind(code->entry());
272      if (NeedsDeferredFrame()) {
273        Comment(";;; Build frame");
274        DCHECK(!frame_is_built_);
275        DCHECK(info()->IsStub());
276        frame_is_built_ = true;
277        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
278        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
279        __ push(scratch0());
280        __ Daddu(fp, sp,
281            Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
282        Comment(";;; Deferred code");
283      }
284      code->Generate();
285      if (NeedsDeferredFrame()) {
286        Comment(";;; Destroy frame");
287        DCHECK(frame_is_built_);
288        __ pop(at);
289        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
290        frame_is_built_ = false;
291      }
292      __ jmp(code->exit());
293    }
294  }
295  // Deferred code is the last part of the instruction sequence. Mark
296  // the generated code as done unless we bailed out.
297  if (!is_aborted()) status_ = DONE;
298  return !is_aborted();
299}
300
301
302bool LCodeGen::GenerateJumpTable() {
303  if (jump_table_.length() > 0) {
304    Comment(";;; -------------------- Jump table --------------------");
305  }
306  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
307  Label table_start;
308  __ bind(&table_start);
309  Label needs_frame;
310  for (int i = 0; i < jump_table_.length(); i++) {
311    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
312    __ bind(&table_entry->label);
313    Address entry = table_entry->address;
314    DeoptComment(table_entry->reason);
315    __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
316    if (table_entry->needs_frame) {
317      DCHECK(!info()->saves_caller_doubles());
318      if (needs_frame.is_bound()) {
319        __ Branch(&needs_frame);
320      } else {
321        __ bind(&needs_frame);
322        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
323        // This variant of deopt can only be used with stubs. Since we don't
324        // have a function pointer to install in the stack frame that we're
325        // building, install a special marker there instead.
326        DCHECK(info()->IsStub());
327        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
328        __ push(scratch0());
329        __ Daddu(fp, sp,
330            Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
331        __ Call(t9);
332      }
333    } else {
334      if (info()->saves_caller_doubles()) {
335        DCHECK(info()->IsStub());
336        RestoreCallerDoubles();
337      }
338      __ Call(t9);
339    }
340  }
341  __ RecordComment("]");
342
343  // The deoptimization jump table is the last part of the instruction
344  // sequence. Mark the generated code as done unless we bailed out.
345  if (!is_aborted()) status_ = DONE;
346  return !is_aborted();
347}
348
349
350bool LCodeGen::GenerateSafepointTable() {
351  DCHECK(is_done());
352  safepoints_.Emit(masm(), GetStackSlotCount());
353  return !is_aborted();
354}
355
356
357Register LCodeGen::ToRegister(int index) const {
358  return Register::FromAllocationIndex(index);
359}
360
361
362DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
363  return DoubleRegister::FromAllocationIndex(index);
364}
365
366
367Register LCodeGen::ToRegister(LOperand* op) const {
368  DCHECK(op->IsRegister());
369  return ToRegister(op->index());
370}
371
372
373Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
374  if (op->IsRegister()) {
375    return ToRegister(op->index());
376  } else if (op->IsConstantOperand()) {
377    LConstantOperand* const_op = LConstantOperand::cast(op);
378    HConstant* constant = chunk_->LookupConstant(const_op);
379    Handle<Object> literal = constant->handle(isolate());
380    Representation r = chunk_->LookupLiteralRepresentation(const_op);
381    if (r.IsInteger32()) {
382      DCHECK(literal->IsNumber());
383      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
384    } else if (r.IsSmi()) {
385      DCHECK(constant->HasSmiValue());
386      __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
387    } else if (r.IsDouble()) {
388      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
389    } else {
390      DCHECK(r.IsSmiOrTagged());
391      __ li(scratch, literal);
392    }
393    return scratch;
394  } else if (op->IsStackSlot()) {
395    __ ld(scratch, ToMemOperand(op));
396    return scratch;
397  }
398  UNREACHABLE();
399  return scratch;
400}
401
402
403DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
404  DCHECK(op->IsDoubleRegister());
405  return ToDoubleRegister(op->index());
406}
407
408
409DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
410                                                FloatRegister flt_scratch,
411                                                DoubleRegister dbl_scratch) {
412  if (op->IsDoubleRegister()) {
413    return ToDoubleRegister(op->index());
414  } else if (op->IsConstantOperand()) {
415    LConstantOperand* const_op = LConstantOperand::cast(op);
416    HConstant* constant = chunk_->LookupConstant(const_op);
417    Handle<Object> literal = constant->handle(isolate());
418    Representation r = chunk_->LookupLiteralRepresentation(const_op);
419    if (r.IsInteger32()) {
420      DCHECK(literal->IsNumber());
421      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
422      __ mtc1(at, flt_scratch);
423      __ cvt_d_w(dbl_scratch, flt_scratch);
424      return dbl_scratch;
425    } else if (r.IsDouble()) {
426      Abort(kUnsupportedDoubleImmediate);
427    } else if (r.IsTagged()) {
428      Abort(kUnsupportedTaggedImmediate);
429    }
430  } else if (op->IsStackSlot()) {
431    MemOperand mem_op = ToMemOperand(op);
432    __ ldc1(dbl_scratch, mem_op);
433    return dbl_scratch;
434  }
435  UNREACHABLE();
436  return dbl_scratch;
437}
438
439
440Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
441  HConstant* constant = chunk_->LookupConstant(op);
442  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
443  return constant->handle(isolate());
444}
445
446
447bool LCodeGen::IsInteger32(LConstantOperand* op) const {
448  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
449}
450
451
452bool LCodeGen::IsSmi(LConstantOperand* op) const {
453  return chunk_->LookupLiteralRepresentation(op).IsSmi();
454}
455
456
457int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
458  // return ToRepresentation(op, Representation::Integer32());
459  HConstant* constant = chunk_->LookupConstant(op);
460  return constant->Integer32Value();
461}
462
463
464int32_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
465                                   const Representation& r) const {
466  HConstant* constant = chunk_->LookupConstant(op);
467  int32_t value = constant->Integer32Value();
468  if (r.IsInteger32()) return value;
469  DCHECK(r.IsSmiOrTagged());
470  return reinterpret_cast<int64_t>(Smi::FromInt(value));
471}
472
473
474Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
475  HConstant* constant = chunk_->LookupConstant(op);
476  return Smi::FromInt(constant->Integer32Value());
477}
478
479
480double LCodeGen::ToDouble(LConstantOperand* op) const {
481  HConstant* constant = chunk_->LookupConstant(op);
482  DCHECK(constant->HasDoubleValue());
483  return constant->DoubleValue();
484}
485
486
487Operand LCodeGen::ToOperand(LOperand* op) {
488  if (op->IsConstantOperand()) {
489    LConstantOperand* const_op = LConstantOperand::cast(op);
490    HConstant* constant = chunk()->LookupConstant(const_op);
491    Representation r = chunk_->LookupLiteralRepresentation(const_op);
492    if (r.IsSmi()) {
493      DCHECK(constant->HasSmiValue());
494      return Operand(Smi::FromInt(constant->Integer32Value()));
495    } else if (r.IsInteger32()) {
496      DCHECK(constant->HasInteger32Value());
497      return Operand(constant->Integer32Value());
498    } else if (r.IsDouble()) {
499      Abort(kToOperandUnsupportedDoubleImmediate);
500    }
501    DCHECK(r.IsTagged());
502    return Operand(constant->handle(isolate()));
503  } else if (op->IsRegister()) {
504    return Operand(ToRegister(op));
505  } else if (op->IsDoubleRegister()) {
506    Abort(kToOperandIsDoubleRegisterUnimplemented);
507    return Operand((int64_t)0);
508  }
509  // Stack slots not implemented, use ToMemOperand instead.
510  UNREACHABLE();
511  return Operand((int64_t)0);
512}
513
514
515static int ArgumentsOffsetWithoutFrame(int index) {
516  DCHECK(index < 0);
517  return -(index + 1) * kPointerSize;
518}
519
520
521MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
522  DCHECK(!op->IsRegister());
523  DCHECK(!op->IsDoubleRegister());
524  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
525  if (NeedsEagerFrame()) {
526    return MemOperand(fp, StackSlotOffset(op->index()));
527  } else {
528    // Retrieve parameter without eager stack-frame relative to the
529    // stack-pointer.
530    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
531  }
532}
533
534
535MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
536  DCHECK(op->IsDoubleStackSlot());
537  if (NeedsEagerFrame()) {
538    // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
539    return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
540  } else {
541    // Retrieve parameter without eager stack-frame relative to the
542    // stack-pointer.
543    // return MemOperand(
544    //    sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
545    return MemOperand(
546        sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
547  }
548}
549
550
551void LCodeGen::WriteTranslation(LEnvironment* environment,
552                                Translation* translation) {
553  if (environment == NULL) return;
554
555  // The translation includes one command per value in the environment.
556  int translation_size = environment->translation_size();
557  // The output frame height does not include the parameters.
558  int height = translation_size - environment->parameter_count();
559
560  WriteTranslation(environment->outer(), translation);
561  bool has_closure_id = !info()->closure().is_null() &&
562      !info()->closure().is_identical_to(environment->closure());
563  int closure_id = has_closure_id
564      ? DefineDeoptimizationLiteral(environment->closure())
565      : Translation::kSelfLiteralId;
566
567  switch (environment->frame_type()) {
568    case JS_FUNCTION:
569      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
570      break;
571    case JS_CONSTRUCT:
572      translation->BeginConstructStubFrame(closure_id, translation_size);
573      break;
574    case JS_GETTER:
575      DCHECK(translation_size == 1);
576      DCHECK(height == 0);
577      translation->BeginGetterStubFrame(closure_id);
578      break;
579    case JS_SETTER:
580      DCHECK(translation_size == 2);
581      DCHECK(height == 0);
582      translation->BeginSetterStubFrame(closure_id);
583      break;
584    case STUB:
585      translation->BeginCompiledStubFrame();
586      break;
587    case ARGUMENTS_ADAPTOR:
588      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
589      break;
590  }
591
592  int object_index = 0;
593  int dematerialized_index = 0;
594  for (int i = 0; i < translation_size; ++i) {
595    LOperand* value = environment->values()->at(i);
596    AddToTranslation(environment,
597                     translation,
598                     value,
599                     environment->HasTaggedValueAt(i),
600                     environment->HasUint32ValueAt(i),
601                     &object_index,
602                     &dematerialized_index);
603  }
604}
605
606
607void LCodeGen::AddToTranslation(LEnvironment* environment,
608                                Translation* translation,
609                                LOperand* op,
610                                bool is_tagged,
611                                bool is_uint32,
612                                int* object_index_pointer,
613                                int* dematerialized_index_pointer) {
614  if (op == LEnvironment::materialization_marker()) {
615    int object_index = (*object_index_pointer)++;
616    if (environment->ObjectIsDuplicateAt(object_index)) {
617      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
618      translation->DuplicateObject(dupe_of);
619      return;
620    }
621    int object_length = environment->ObjectLengthAt(object_index);
622    if (environment->ObjectIsArgumentsAt(object_index)) {
623      translation->BeginArgumentsObject(object_length);
624    } else {
625      translation->BeginCapturedObject(object_length);
626    }
627    int dematerialized_index = *dematerialized_index_pointer;
628    int env_offset = environment->translation_size() + dematerialized_index;
629    *dematerialized_index_pointer += object_length;
630    for (int i = 0; i < object_length; ++i) {
631      LOperand* value = environment->values()->at(env_offset + i);
632      AddToTranslation(environment,
633                       translation,
634                       value,
635                       environment->HasTaggedValueAt(env_offset + i),
636                       environment->HasUint32ValueAt(env_offset + i),
637                       object_index_pointer,
638                       dematerialized_index_pointer);
639    }
640    return;
641  }
642
643  if (op->IsStackSlot()) {
644    if (is_tagged) {
645      translation->StoreStackSlot(op->index());
646    } else if (is_uint32) {
647      translation->StoreUint32StackSlot(op->index());
648    } else {
649      translation->StoreInt32StackSlot(op->index());
650    }
651  } else if (op->IsDoubleStackSlot()) {
652    translation->StoreDoubleStackSlot(op->index());
653  } else if (op->IsRegister()) {
654    Register reg = ToRegister(op);
655    if (is_tagged) {
656      translation->StoreRegister(reg);
657    } else if (is_uint32) {
658      translation->StoreUint32Register(reg);
659    } else {
660      translation->StoreInt32Register(reg);
661    }
662  } else if (op->IsDoubleRegister()) {
663    DoubleRegister reg = ToDoubleRegister(op);
664    translation->StoreDoubleRegister(reg);
665  } else if (op->IsConstantOperand()) {
666    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
667    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
668    translation->StoreLiteral(src_index);
669  } else {
670    UNREACHABLE();
671  }
672}
673
674
675void LCodeGen::CallCode(Handle<Code> code,
676                        RelocInfo::Mode mode,
677                        LInstruction* instr) {
678  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
679}
680
681
682void LCodeGen::CallCodeGeneric(Handle<Code> code,
683                               RelocInfo::Mode mode,
684                               LInstruction* instr,
685                               SafepointMode safepoint_mode) {
686  DCHECK(instr != NULL);
687  __ Call(code, mode);
688  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
689}
690
691
692void LCodeGen::CallRuntime(const Runtime::Function* function,
693                           int num_arguments,
694                           LInstruction* instr,
695                           SaveFPRegsMode save_doubles) {
696  DCHECK(instr != NULL);
697
698  __ CallRuntime(function, num_arguments, save_doubles);
699
700  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
701}
702
703
704void LCodeGen::LoadContextFromDeferred(LOperand* context) {
705  if (context->IsRegister()) {
706    __ Move(cp, ToRegister(context));
707  } else if (context->IsStackSlot()) {
708    __ ld(cp, ToMemOperand(context));
709  } else if (context->IsConstantOperand()) {
710    HConstant* constant =
711        chunk_->LookupConstant(LConstantOperand::cast(context));
712    __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
713  } else {
714    UNREACHABLE();
715  }
716}
717
718
719void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
720                                       int argc,
721                                       LInstruction* instr,
722                                       LOperand* context) {
723  LoadContextFromDeferred(context);
724  __ CallRuntimeSaveDoubles(id);
725  RecordSafepointWithRegisters(
726      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
727}
728
729
730void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
731                                                    Safepoint::DeoptMode mode) {
732  environment->set_has_been_used();
733  if (!environment->HasBeenRegistered()) {
734    // Physical stack frame layout:
735    // -x ............. -4  0 ..................................... y
736    // [incoming arguments] [spill slots] [pushed outgoing arguments]
737
738    // Layout of the environment:
739    // 0 ..................................................... size-1
740    // [parameters] [locals] [expression stack including arguments]
741
742    // Layout of the translation:
743    // 0 ........................................................ size - 1 + 4
744    // [expression stack including arguments] [locals] [4 words] [parameters]
745    // |>------------  translation_size ------------<|
746
747    int frame_count = 0;
748    int jsframe_count = 0;
749    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
750      ++frame_count;
751      if (e->frame_type() == JS_FUNCTION) {
752        ++jsframe_count;
753      }
754    }
755    Translation translation(&translations_, frame_count, jsframe_count, zone());
756    WriteTranslation(environment, &translation);
757    int deoptimization_index = deoptimizations_.length();
758    int pc_offset = masm()->pc_offset();
759    environment->Register(deoptimization_index,
760                          translation.index(),
761                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
762    deoptimizations_.Add(environment, zone());
763  }
764}
765
766
767void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
768                            Deoptimizer::BailoutType bailout_type,
769                            Register src1, const Operand& src2,
770                            const char* detail) {
771  LEnvironment* environment = instr->environment();
772  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
773  DCHECK(environment->HasBeenRegistered());
774  int id = environment->deoptimization_index();
775  DCHECK(info()->IsOptimizing() || info()->IsStub());
776  Address entry =
777      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
778  if (entry == NULL) {
779    Abort(kBailoutWasNotPrepared);
780    return;
781  }
782
783  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
784    Register scratch = scratch0();
785    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
786    Label no_deopt;
787    __ Push(a1, scratch);
788    __ li(scratch, Operand(count));
789    __ lw(a1, MemOperand(scratch));
790    __ Subu(a1, a1, Operand(1));
791    __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
792    __ li(a1, Operand(FLAG_deopt_every_n_times));
793    __ sw(a1, MemOperand(scratch));
794    __ Pop(a1, scratch);
795
796    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
797    __ bind(&no_deopt);
798    __ sw(a1, MemOperand(scratch));
799    __ Pop(a1, scratch);
800  }
801
802  if (info()->ShouldTrapOnDeopt()) {
803    Label skip;
804    if (condition != al) {
805      __ Branch(&skip, NegateCondition(condition), src1, src2);
806    }
807    __ stop("trap_on_deopt");
808    __ bind(&skip);
809  }
810
811  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
812                             instr->Mnemonic(), detail);
813  DCHECK(info()->IsStub() || frame_is_built_);
814  // Go through jump table if we need to handle condition, build frame, or
815  // restore caller doubles.
816  if (condition == al && frame_is_built_ &&
817      !info()->saves_caller_doubles()) {
818    DeoptComment(reason);
819    __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
820  } else {
821    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
822                                            !frame_is_built_);
823    // We often have several deopts to the same entry, reuse the last
824    // jump entry if this is the case.
825    if (jump_table_.is_empty() ||
826        !table_entry.IsEquivalentTo(jump_table_.last())) {
827      jump_table_.Add(table_entry, zone());
828    }
829    __ Branch(&jump_table_.last().label, condition, src1, src2);
830  }
831}
832
833
834void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
835                            Register src1, const Operand& src2,
836                            const char* detail) {
837  Deoptimizer::BailoutType bailout_type = info()->IsStub()
838      ? Deoptimizer::LAZY
839      : Deoptimizer::EAGER;
840  DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail);
841}
842
843
844void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
845  int length = deoptimizations_.length();
846  if (length == 0) return;
847  Handle<DeoptimizationInputData> data =
848      DeoptimizationInputData::New(isolate(), length, TENURED);
849
850  Handle<ByteArray> translations =
851      translations_.CreateByteArray(isolate()->factory());
852  data->SetTranslationByteArray(*translations);
853  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
854  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
855  if (info_->IsOptimizing()) {
856    // Reference to shared function info does not change between phases.
857    AllowDeferredHandleDereference allow_handle_dereference;
858    data->SetSharedFunctionInfo(*info_->shared_info());
859  } else {
860    data->SetSharedFunctionInfo(Smi::FromInt(0));
861  }
862
863  Handle<FixedArray> literals =
864      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
865  { AllowDeferredHandleDereference copy_handles;
866    for (int i = 0; i < deoptimization_literals_.length(); i++) {
867      literals->set(i, *deoptimization_literals_[i]);
868    }
869    data->SetLiteralArray(*literals);
870  }
871
872  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
873  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
874
875  // Populate the deoptimization entries.
876  for (int i = 0; i < length; i++) {
877    LEnvironment* env = deoptimizations_[i];
878    data->SetAstId(i, env->ast_id());
879    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
880    data->SetArgumentsStackHeight(i,
881                                  Smi::FromInt(env->arguments_stack_height()));
882    data->SetPc(i, Smi::FromInt(env->pc_offset()));
883  }
884  code->set_deoptimization_data(*data);
885}
886
887
888int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
889  int result = deoptimization_literals_.length();
890  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
891    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
892  }
893  deoptimization_literals_.Add(literal, zone());
894  return result;
895}
896
897
898void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
899  DCHECK(deoptimization_literals_.length() == 0);
900
901  const ZoneList<Handle<JSFunction> >* inlined_closures =
902      chunk()->inlined_closures();
903
904  for (int i = 0, length = inlined_closures->length();
905       i < length;
906       i++) {
907    DefineDeoptimizationLiteral(inlined_closures->at(i));
908  }
909
910  inlined_function_count_ = deoptimization_literals_.length();
911}
912
913
914void LCodeGen::RecordSafepointWithLazyDeopt(
915    LInstruction* instr, SafepointMode safepoint_mode) {
916  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
917    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
918  } else {
919    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
920    RecordSafepointWithRegisters(
921        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
922  }
923}
924
925
926void LCodeGen::RecordSafepoint(
927    LPointerMap* pointers,
928    Safepoint::Kind kind,
929    int arguments,
930    Safepoint::DeoptMode deopt_mode) {
931  DCHECK(expected_safepoint_kind_ == kind);
932
933  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
934  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
935      kind, arguments, deopt_mode);
936  for (int i = 0; i < operands->length(); i++) {
937    LOperand* pointer = operands->at(i);
938    if (pointer->IsStackSlot()) {
939      safepoint.DefinePointerSlot(pointer->index(), zone());
940    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
941      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
942    }
943  }
944}
945
946
947void LCodeGen::RecordSafepoint(LPointerMap* pointers,
948                               Safepoint::DeoptMode deopt_mode) {
949  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
950}
951
952
953void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
954  LPointerMap empty_pointers(zone());
955  RecordSafepoint(&empty_pointers, deopt_mode);
956}
957
958
959void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
960                                            int arguments,
961                                            Safepoint::DeoptMode deopt_mode) {
962  RecordSafepoint(
963      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
964}
965
966
967void LCodeGen::RecordAndWritePosition(int position) {
968  if (position == RelocInfo::kNoPosition) return;
969  masm()->positions_recorder()->RecordPosition(position);
970  masm()->positions_recorder()->WriteRecordedPositions();
971}
972
973
974static const char* LabelType(LLabel* label) {
975  if (label->is_loop_header()) return " (loop header)";
976  if (label->is_osr_entry()) return " (OSR entry)";
977  return "";
978}
979
980
981void LCodeGen::DoLabel(LLabel* label) {
982  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
983          current_instruction_,
984          label->hydrogen_value()->id(),
985          label->block_id(),
986          LabelType(label));
987  __ bind(label->label());
988  current_block_ = label->block_id();
989  DoGap(label);
990}
991
992
993void LCodeGen::DoParallelMove(LParallelMove* move) {
994  resolver_.Resolve(move);
995}
996
997
998void LCodeGen::DoGap(LGap* gap) {
999  for (int i = LGap::FIRST_INNER_POSITION;
1000       i <= LGap::LAST_INNER_POSITION;
1001       i++) {
1002    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1003    LParallelMove* move = gap->GetParallelMove(inner_pos);
1004    if (move != NULL) DoParallelMove(move);
1005  }
1006}
1007
1008
1009void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1010  DoGap(instr);
1011}
1012
1013
1014void LCodeGen::DoParameter(LParameter* instr) {
1015  // Nothing to do.
1016}
1017
1018
1019void LCodeGen::DoCallStub(LCallStub* instr) {
1020  DCHECK(ToRegister(instr->context()).is(cp));
1021  DCHECK(ToRegister(instr->result()).is(v0));
1022  switch (instr->hydrogen()->major_key()) {
1023    case CodeStub::RegExpExec: {
1024      RegExpExecStub stub(isolate());
1025      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1026      break;
1027    }
1028    case CodeStub::SubString: {
1029      SubStringStub stub(isolate());
1030      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1031      break;
1032    }
1033    case CodeStub::StringCompare: {
1034      StringCompareStub stub(isolate());
1035      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1036      break;
1037    }
1038    default:
1039      UNREACHABLE();
1040  }
1041}
1042
1043
1044void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1045  GenerateOsrPrologue();
1046}
1047
1048
1049void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1050  Register dividend = ToRegister(instr->dividend());
1051  int32_t divisor = instr->divisor();
1052  DCHECK(dividend.is(ToRegister(instr->result())));
1053
1054  // Theoretically, a variation of the branch-free code for integer division by
1055  // a power of 2 (calculating the remainder via an additional multiplication
1056  // (which gets simplified to an 'and') and subtraction) should be faster, and
1057  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1058  // indicate that positive dividends are heavily favored, so the branching
1059  // version performs better.
1060  HMod* hmod = instr->hydrogen();
1061  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1062  Label dividend_is_not_negative, done;
1063
1064  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1065    __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
1066    // Note: The code below even works when right contains kMinInt.
1067    __ dsubu(dividend, zero_reg, dividend);
1068    __ And(dividend, dividend, Operand(mask));
1069    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1070      DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
1071    }
1072    __ Branch(USE_DELAY_SLOT, &done);
1073    __ dsubu(dividend, zero_reg, dividend);
1074  }
1075
1076  __ bind(&dividend_is_not_negative);
1077  __ And(dividend, dividend, Operand(mask));
1078  __ bind(&done);
1079}
1080
1081
1082void LCodeGen::DoModByConstI(LModByConstI* instr) {
1083  Register dividend = ToRegister(instr->dividend());
1084  int32_t divisor = instr->divisor();
1085  Register result = ToRegister(instr->result());
1086  DCHECK(!dividend.is(result));
1087
1088  if (divisor == 0) {
1089    DeoptimizeIf(al, instr);
1090    return;
1091  }
1092
1093  __ TruncatingDiv(result, dividend, Abs(divisor));
1094  __ Dmul(result, result, Operand(Abs(divisor)));
1095  __ Dsubu(result, dividend, Operand(result));
1096
1097  // Check for negative zero.
1098  HMod* hmod = instr->hydrogen();
1099  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1100    Label remainder_not_zero;
1101    __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1102    DeoptimizeIf(lt, instr, dividend, Operand(zero_reg));
1103    __ bind(&remainder_not_zero);
1104  }
1105}
1106
1107
1108void LCodeGen::DoModI(LModI* instr) {
1109  HMod* hmod = instr->hydrogen();
1110  const Register left_reg = ToRegister(instr->left());
1111  const Register right_reg = ToRegister(instr->right());
1112  const Register result_reg = ToRegister(instr->result());
1113
1114  // div runs in the background while we check for special cases.
1115  __ Dmod(result_reg, left_reg, right_reg);
1116
1117  Label done;
1118  // Check for x % 0, we have to deopt in this case because we can't return a
1119  // NaN.
1120  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1121    DeoptimizeIf(eq, instr, right_reg, Operand(zero_reg));
1122  }
1123
1124  // Check for kMinInt % -1, div will return kMinInt, which is not what we
1125  // want. We have to deopt if we care about -0, because we can't return that.
1126  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1127    Label no_overflow_possible;
1128    __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1129    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1130      DeoptimizeIf(eq, instr, right_reg, Operand(-1));
1131    } else {
1132      __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1133      __ Branch(USE_DELAY_SLOT, &done);
1134      __ mov(result_reg, zero_reg);
1135    }
1136    __ bind(&no_overflow_possible);
1137  }
1138
1139  // If we care about -0, test if the dividend is <0 and the result is 0.
1140  __ Branch(&done, ge, left_reg, Operand(zero_reg));
1141
1142  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1143    DeoptimizeIf(eq, instr, result_reg, Operand(zero_reg));
1144  }
1145  __ bind(&done);
1146}
1147
1148
1149void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1150  Register dividend = ToRegister(instr->dividend());
1151  int32_t divisor = instr->divisor();
1152  Register result = ToRegister(instr->result());
1153  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1154  DCHECK(!result.is(dividend));
1155
1156  // Check for (0 / -x) that will produce negative zero.
1157  HDiv* hdiv = instr->hydrogen();
1158  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1159    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
1160  }
1161  // Check for (kMinInt / -1).
1162  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1163    DeoptimizeIf(eq, instr, dividend, Operand(kMinInt));
1164  }
1165  // Deoptimize if remainder will not be 0.
1166  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1167      divisor != 1 && divisor != -1) {
1168    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1169    __ And(at, dividend, Operand(mask));
1170    DeoptimizeIf(ne, instr, at, Operand(zero_reg));
1171  }
1172
1173  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
1174    __ Dsubu(result, zero_reg, dividend);
1175    return;
1176  }
1177  uint16_t shift = WhichPowerOf2Abs(divisor);
1178  if (shift == 0) {
1179    __ Move(result, dividend);
1180  } else if (shift == 1) {
1181    __ dsrl32(result, dividend, 31);
1182    __ Daddu(result, dividend, Operand(result));
1183  } else {
1184    __ dsra32(result, dividend, 31);
1185    __ dsrl32(result, result, 32 - shift);
1186    __ Daddu(result, dividend, Operand(result));
1187  }
1188  if (shift > 0) __ dsra(result, result, shift);
1189  if (divisor < 0) __ Dsubu(result, zero_reg, result);
1190}
1191
1192
1193void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1194  Register dividend = ToRegister(instr->dividend());
1195  int32_t divisor = instr->divisor();
1196  Register result = ToRegister(instr->result());
1197  DCHECK(!dividend.is(result));
1198
1199  if (divisor == 0) {
1200    DeoptimizeIf(al, instr);
1201    return;
1202  }
1203
1204  // Check for (0 / -x) that will produce negative zero.
1205  HDiv* hdiv = instr->hydrogen();
1206  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1207    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
1208  }
1209
1210  __ TruncatingDiv(result, dividend, Abs(divisor));
1211  if (divisor < 0) __ Subu(result, zero_reg, result);
1212
1213  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1214    __ Dmul(scratch0(), result, Operand(divisor));
1215    __ Dsubu(scratch0(), scratch0(), dividend);
1216    DeoptimizeIf(ne, instr, scratch0(), Operand(zero_reg));
1217  }
1218}
1219
1220
1221// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1222void LCodeGen::DoDivI(LDivI* instr) {
1223  HBinaryOperation* hdiv = instr->hydrogen();
1224  Register dividend = ToRegister(instr->dividend());
1225  Register divisor = ToRegister(instr->divisor());
1226  const Register result = ToRegister(instr->result());
1227
1228  // On MIPS div is asynchronous - it will run in the background while we
1229  // check for special cases.
1230  __ Ddiv(result, dividend, divisor);
1231
1232  // Check for x / 0.
1233  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1234    DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
1235  }
1236
1237  // Check for (0 / -x) that will produce negative zero.
1238  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1239    Label left_not_zero;
1240    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1241    DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
1242    __ bind(&left_not_zero);
1243  }
1244
1245  // Check for (kMinInt / -1).
1246  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1247      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1248    Label left_not_min_int;
1249    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1250    DeoptimizeIf(eq, instr, divisor, Operand(-1));
1251    __ bind(&left_not_min_int);
1252  }
1253
1254  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1255    // Calculate remainder.
1256    Register remainder = ToRegister(instr->temp());
1257    if (kArchVariant != kMips64r6) {
1258      __ mfhi(remainder);
1259    } else {
1260      __ dmod(remainder, dividend, divisor);
1261    }
1262    DeoptimizeIf(ne, instr, remainder, Operand(zero_reg));
1263  }
1264}
1265
1266
1267void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1268  DoubleRegister addend = ToDoubleRegister(instr->addend());
1269  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1270  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1271
1272  // This is computed in-place.
1273  DCHECK(addend.is(ToDoubleRegister(instr->result())));
1274
1275  __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
1276}
1277
1278
1279void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1280  Register dividend = ToRegister(instr->dividend());
1281  Register result = ToRegister(instr->result());
1282  int32_t divisor = instr->divisor();
1283  Register scratch = result.is(dividend) ? scratch0() : dividend;
1284  DCHECK(!result.is(dividend) || !scratch.is(dividend));
1285
1286  // If the divisor is 1, return the dividend.
1287  if (divisor == 1) {
1288    __ Move(result, dividend);
1289    return;
1290  }
1291
1292  // If the divisor is positive, things are easy: There can be no deopts and we
1293  // can simply do an arithmetic right shift.
1294  uint16_t shift = WhichPowerOf2Abs(divisor);
1295  if (divisor > 1) {
1296    __ dsra(result, dividend, shift);
1297    return;
1298  }
1299
1300  // If the divisor is negative, we have to negate and handle edge cases.
1301  // Dividend can be the same register as result so save the value of it
1302  // for checking overflow.
1303  __ Move(scratch, dividend);
1304
1305  __ Dsubu(result, zero_reg, dividend);
1306  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1307    DeoptimizeIf(eq, instr, result, Operand(zero_reg));
1308  }
1309
1310  __ Xor(scratch, scratch, result);
1311  // Dividing by -1 is basically negation, unless we overflow.
1312  if (divisor == -1) {
1313    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1314      DeoptimizeIf(gt, instr, result, Operand(kMaxInt));
1315    }
1316    return;
1317  }
1318
1319  // If the negation could not overflow, simply shifting is OK.
1320  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1321    __ dsra(result, result, shift);
1322    return;
1323  }
1324
1325  Label no_overflow, done;
1326  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1327  __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
1328  __ Branch(&done);
1329  __ bind(&no_overflow);
1330  __ dsra(result, result, shift);
1331  __ bind(&done);
1332}
1333
1334
1335void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1336  Register dividend = ToRegister(instr->dividend());
1337  int32_t divisor = instr->divisor();
1338  Register result = ToRegister(instr->result());
1339  DCHECK(!dividend.is(result));
1340
1341  if (divisor == 0) {
1342    DeoptimizeIf(al, instr);
1343    return;
1344  }
1345
1346  // Check for (0 / -x) that will produce negative zero.
1347  HMathFloorOfDiv* hdiv = instr->hydrogen();
1348  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1349    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
1350  }
1351
1352  // Easy case: We need no dynamic check for the dividend and the flooring
1353  // division is the same as the truncating division.
1354  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1355      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1356    __ TruncatingDiv(result, dividend, Abs(divisor));
1357    if (divisor < 0) __ Dsubu(result, zero_reg, result);
1358    return;
1359  }
1360
1361  // In the general case we may need to adjust before and after the truncating
1362  // division to get a flooring division.
1363  Register temp = ToRegister(instr->temp());
1364  DCHECK(!temp.is(dividend) && !temp.is(result));
1365  Label needs_adjustment, done;
1366  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1367            dividend, Operand(zero_reg));
1368  __ TruncatingDiv(result, dividend, Abs(divisor));
1369  if (divisor < 0) __ Dsubu(result, zero_reg, result);
1370  __ jmp(&done);
1371  __ bind(&needs_adjustment);
1372  __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1373  __ TruncatingDiv(result, temp, Abs(divisor));
1374  if (divisor < 0) __ Dsubu(result, zero_reg, result);
1375  __ Dsubu(result, result, Operand(1));
1376  __ bind(&done);
1377}
1378
1379
1380// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1381void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1382  HBinaryOperation* hdiv = instr->hydrogen();
1383  Register dividend = ToRegister(instr->dividend());
1384  Register divisor = ToRegister(instr->divisor());
1385  const Register result = ToRegister(instr->result());
1386
1387  // On MIPS div is asynchronous - it will run in the background while we
1388  // check for special cases.
1389  __ Ddiv(result, dividend, divisor);
1390
1391  // Check for x / 0.
1392  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1393    DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
1394  }
1395
1396  // Check for (0 / -x) that will produce negative zero.
1397  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1398    Label left_not_zero;
1399    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1400    DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
1401    __ bind(&left_not_zero);
1402  }
1403
1404  // Check for (kMinInt / -1).
1405  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1406      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1407    Label left_not_min_int;
1408    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1409    DeoptimizeIf(eq, instr, divisor, Operand(-1));
1410    __ bind(&left_not_min_int);
1411  }
1412
1413  // We performed a truncating division. Correct the result if necessary.
1414  Label done;
1415  Register remainder = scratch0();
1416  if (kArchVariant != kMips64r6) {
1417    __ mfhi(remainder);
1418  } else {
1419    __ dmod(remainder, dividend, divisor);
1420  }
1421  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1422  __ Xor(remainder, remainder, Operand(divisor));
1423  __ Branch(&done, ge, remainder, Operand(zero_reg));
1424  __ Dsubu(result, result, Operand(1));
1425  __ bind(&done);
1426}
1427
1428
1429void LCodeGen::DoMulI(LMulI* instr) {
1430  Register scratch = scratch0();
1431  Register result = ToRegister(instr->result());
1432  // Note that result may alias left.
1433  Register left = ToRegister(instr->left());
1434  LOperand* right_op = instr->right();
1435
1436  bool bailout_on_minus_zero =
1437    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1438  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1439
1440  if (right_op->IsConstantOperand()) {
1441    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1442
1443    if (bailout_on_minus_zero && (constant < 0)) {
1444      // The case of a null constant will be handled separately.
1445      // If constant is negative and left is null, the result should be -0.
1446      DeoptimizeIf(eq, instr, left, Operand(zero_reg));
1447    }
1448
1449    switch (constant) {
1450      case -1:
1451        if (overflow) {
1452          __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1453          DeoptimizeIf(gt, instr, scratch, Operand(kMaxInt));
1454        } else {
1455          __ Dsubu(result, zero_reg, left);
1456        }
1457        break;
1458      case 0:
1459        if (bailout_on_minus_zero) {
1460          // If left is strictly negative and the constant is null, the
1461          // result is -0. Deoptimize if required, otherwise return 0.
1462          DeoptimizeIf(lt, instr, left, Operand(zero_reg));
1463        }
1464        __ mov(result, zero_reg);
1465        break;
1466      case 1:
1467        // Nothing to do.
1468        __ Move(result, left);
1469        break;
1470      default:
1471        // Multiplying by powers of two and powers of two plus or minus
1472        // one can be done faster with shifted operands.
1473        // For other constants we emit standard code.
1474        int32_t mask = constant >> 31;
1475        uint32_t constant_abs = (constant + mask) ^ mask;
1476
1477        if (base::bits::IsPowerOfTwo32(constant_abs)) {
1478          int32_t shift = WhichPowerOf2(constant_abs);
1479          __ dsll(result, left, shift);
1480          // Correct the sign of the result if the constant is negative.
1481          if (constant < 0)  __ Dsubu(result, zero_reg, result);
1482        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1483          int32_t shift = WhichPowerOf2(constant_abs - 1);
1484          __ dsll(scratch, left, shift);
1485          __ Daddu(result, scratch, left);
1486          // Correct the sign of the result if the constant is negative.
1487          if (constant < 0)  __ Dsubu(result, zero_reg, result);
1488        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1489          int32_t shift = WhichPowerOf2(constant_abs + 1);
1490          __ dsll(scratch, left, shift);
1491          __ Dsubu(result, scratch, left);
1492          // Correct the sign of the result if the constant is negative.
1493          if (constant < 0)  __ Dsubu(result, zero_reg, result);
1494        } else {
1495          // Generate standard code.
1496          __ li(at, constant);
1497          __ Dmul(result, left, at);
1498        }
1499    }
1500
1501  } else {
1502    DCHECK(right_op->IsRegister());
1503    Register right = ToRegister(right_op);
1504
1505    if (overflow) {
1506      // hi:lo = left * right.
1507      if (instr->hydrogen()->representation().IsSmi()) {
1508        __ Dmulh(result, left, right);
1509      } else {
1510        __ Dmul(result, left, right);
1511      }
1512      __ dsra32(scratch, result, 0);
1513      __ sra(at, result, 31);
1514      if (instr->hydrogen()->representation().IsSmi()) {
1515        __ SmiTag(result);
1516      }
1517      DeoptimizeIf(ne, instr, scratch, Operand(at));
1518    } else {
1519      if (instr->hydrogen()->representation().IsSmi()) {
1520        __ SmiUntag(result, left);
1521        __ Dmul(result, result, right);
1522      } else {
1523        __ Dmul(result, left, right);
1524      }
1525    }
1526
1527    if (bailout_on_minus_zero) {
1528      Label done;
1529      __ Xor(at, left, right);
1530      __ Branch(&done, ge, at, Operand(zero_reg));
1531      // Bail out if the result is minus zero.
1532      DeoptimizeIf(eq, instr, result, Operand(zero_reg));
1533      __ bind(&done);
1534    }
1535  }
1536}
1537
1538
1539void LCodeGen::DoBitI(LBitI* instr) {
1540  LOperand* left_op = instr->left();
1541  LOperand* right_op = instr->right();
1542  DCHECK(left_op->IsRegister());
1543  Register left = ToRegister(left_op);
1544  Register result = ToRegister(instr->result());
1545  Operand right(no_reg);
1546
1547  if (right_op->IsStackSlot()) {
1548    right = Operand(EmitLoadRegister(right_op, at));
1549  } else {
1550    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1551    right = ToOperand(right_op);
1552  }
1553
1554  switch (instr->op()) {
1555    case Token::BIT_AND:
1556      __ And(result, left, right);
1557      break;
1558    case Token::BIT_OR:
1559      __ Or(result, left, right);
1560      break;
1561    case Token::BIT_XOR:
1562      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1563        __ Nor(result, zero_reg, left);
1564      } else {
1565        __ Xor(result, left, right);
1566      }
1567      break;
1568    default:
1569      UNREACHABLE();
1570      break;
1571  }
1572}
1573
1574
1575void LCodeGen::DoShiftI(LShiftI* instr) {
1576  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1577  // result may alias either of them.
1578  LOperand* right_op = instr->right();
1579  Register left = ToRegister(instr->left());
1580  Register result = ToRegister(instr->result());
1581
1582  if (right_op->IsRegister()) {
1583    // No need to mask the right operand on MIPS, it is built into the variable
1584    // shift instructions.
1585    switch (instr->op()) {
1586      case Token::ROR:
1587        __ Ror(result, left, Operand(ToRegister(right_op)));
1588        break;
1589      case Token::SAR:
1590        __ srav(result, left, ToRegister(right_op));
1591        break;
1592      case Token::SHR:
1593        __ srlv(result, left, ToRegister(right_op));
1594        if (instr->can_deopt()) {
1595           // TODO(yy): (-1) >>> 0. anything else?
1596          DeoptimizeIf(lt, instr, result, Operand(zero_reg));
1597          DeoptimizeIf(gt, instr, result, Operand(kMaxInt));
1598        }
1599        break;
1600      case Token::SHL:
1601        __ sllv(result, left, ToRegister(right_op));
1602        break;
1603      default:
1604        UNREACHABLE();
1605        break;
1606    }
1607  } else {
1608    // Mask the right_op operand.
1609    int value = ToInteger32(LConstantOperand::cast(right_op));
1610    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1611    switch (instr->op()) {
1612      case Token::ROR:
1613        if (shift_count != 0) {
1614          __ Ror(result, left, Operand(shift_count));
1615        } else {
1616          __ Move(result, left);
1617        }
1618        break;
1619      case Token::SAR:
1620        if (shift_count != 0) {
1621          __ sra(result, left, shift_count);
1622        } else {
1623          __ Move(result, left);
1624        }
1625        break;
1626      case Token::SHR:
1627        if (shift_count != 0) {
1628          __ srl(result, left, shift_count);
1629        } else {
1630          if (instr->can_deopt()) {
1631            __ And(at, left, Operand(0x80000000));
1632            DeoptimizeIf(ne, instr, at, Operand(zero_reg));
1633          }
1634          __ Move(result, left);
1635        }
1636        break;
1637      case Token::SHL:
1638        if (shift_count != 0) {
1639          if (instr->hydrogen_value()->representation().IsSmi()) {
1640            __ dsll(result, left, shift_count);
1641          } else {
1642            __ sll(result, left, shift_count);
1643          }
1644        } else {
1645          __ Move(result, left);
1646        }
1647        break;
1648      default:
1649        UNREACHABLE();
1650        break;
1651    }
1652  }
1653}
1654
1655
1656void LCodeGen::DoSubI(LSubI* instr) {
1657  LOperand* left = instr->left();
1658  LOperand* right = instr->right();
1659  LOperand* result = instr->result();
1660  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1661
1662  if (!can_overflow) {
1663    if (right->IsStackSlot()) {
1664      Register right_reg = EmitLoadRegister(right, at);
1665      __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg));
1666    } else {
1667      DCHECK(right->IsRegister() || right->IsConstantOperand());
1668      __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
1669    }
1670  } else {  // can_overflow.
1671    Register overflow = scratch0();
1672    Register scratch = scratch1();
1673    if (right->IsStackSlot() || right->IsConstantOperand()) {
1674      Register right_reg = EmitLoadRegister(right, scratch);
1675      __ SubuAndCheckForOverflow(ToRegister(result),
1676                                 ToRegister(left),
1677                                 right_reg,
1678                                 overflow);  // Reg at also used as scratch.
1679    } else {
1680      DCHECK(right->IsRegister());
1681      // Due to overflow check macros not supporting constant operands,
1682      // handling the IsConstantOperand case was moved to prev if clause.
1683      __ SubuAndCheckForOverflow(ToRegister(result),
1684                                 ToRegister(left),
1685                                 ToRegister(right),
1686                                 overflow);  // Reg at also used as scratch.
1687    }
1688    DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
1689    if (!instr->hydrogen()->representation().IsSmi()) {
1690      DeoptimizeIf(gt, instr, ToRegister(result), Operand(kMaxInt));
1691      DeoptimizeIf(lt, instr, ToRegister(result), Operand(kMinInt));
1692    }
1693  }
1694}
1695
1696
1697void LCodeGen::DoConstantI(LConstantI* instr) {
1698  __ li(ToRegister(instr->result()), Operand(instr->value()));
1699}
1700
1701
1702void LCodeGen::DoConstantS(LConstantS* instr) {
1703  __ li(ToRegister(instr->result()), Operand(instr->value()));
1704}
1705
1706
1707void LCodeGen::DoConstantD(LConstantD* instr) {
1708  DCHECK(instr->result()->IsDoubleRegister());
1709  DoubleRegister result = ToDoubleRegister(instr->result());
1710  double v = instr->value();
1711  __ Move(result, v);
1712}
1713
1714
1715void LCodeGen::DoConstantE(LConstantE* instr) {
1716  __ li(ToRegister(instr->result()), Operand(instr->value()));
1717}
1718
1719
1720void LCodeGen::DoConstantT(LConstantT* instr) {
1721  Handle<Object> object = instr->value(isolate());
1722  AllowDeferredHandleDereference smi_check;
1723  __ li(ToRegister(instr->result()), object);
1724}
1725
1726
1727void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1728  Register result = ToRegister(instr->result());
1729  Register map = ToRegister(instr->value());
1730  __ EnumLength(result, map);
1731}
1732
1733
1734void LCodeGen::DoDateField(LDateField* instr) {
1735  Register object = ToRegister(instr->date());
1736  Register result = ToRegister(instr->result());
1737  Register scratch = ToRegister(instr->temp());
1738  Smi* index = instr->index();
1739  Label runtime, done;
1740  DCHECK(object.is(a0));
1741  DCHECK(result.is(v0));
1742  DCHECK(!scratch.is(scratch0()));
1743  DCHECK(!scratch.is(object));
1744
1745  __ SmiTst(object, at);
1746  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
1747  __ GetObjectType(object, scratch, scratch);
1748  DeoptimizeIf(ne, instr, scratch, Operand(JS_DATE_TYPE));
1749
1750  if (index->value() == 0) {
1751    __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
1752  } else {
1753    if (index->value() < JSDate::kFirstUncachedField) {
1754      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1755      __ li(scratch, Operand(stamp));
1756      __ ld(scratch, MemOperand(scratch));
1757      __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1758      __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1759      __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
1760                                            kPointerSize * index->value()));
1761      __ jmp(&done);
1762    }
1763    __ bind(&runtime);
1764    __ PrepareCallCFunction(2, scratch);
1765    __ li(a1, Operand(index));
1766    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1767    __ bind(&done);
1768  }
1769}
1770
1771
1772MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1773                                           LOperand* index,
1774                                           String::Encoding encoding) {
1775  if (index->IsConstantOperand()) {
1776    int offset = ToInteger32(LConstantOperand::cast(index));
1777    if (encoding == String::TWO_BYTE_ENCODING) {
1778      offset *= kUC16Size;
1779    }
1780    STATIC_ASSERT(kCharSize == 1);
1781    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1782  }
1783  Register scratch = scratch0();
1784  DCHECK(!scratch.is(string));
1785  DCHECK(!scratch.is(ToRegister(index)));
1786  if (encoding == String::ONE_BYTE_ENCODING) {
1787    __ Daddu(scratch, string, ToRegister(index));
1788  } else {
1789    STATIC_ASSERT(kUC16Size == 2);
1790    __ dsll(scratch, ToRegister(index), 1);
1791    __ Daddu(scratch, string, scratch);
1792  }
1793  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1794}
1795
1796
1797void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1798  String::Encoding encoding = instr->hydrogen()->encoding();
1799  Register string = ToRegister(instr->string());
1800  Register result = ToRegister(instr->result());
1801
1802  if (FLAG_debug_code) {
1803    Register scratch = scratch0();
1804    __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1805    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1806
1807    __ And(scratch, scratch,
1808           Operand(kStringRepresentationMask | kStringEncodingMask));
1809    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1810    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1811    __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1812                                ? one_byte_seq_type : two_byte_seq_type));
1813    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1814  }
1815
1816  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1817  if (encoding == String::ONE_BYTE_ENCODING) {
1818    __ lbu(result, operand);
1819  } else {
1820    __ lhu(result, operand);
1821  }
1822}
1823
1824
1825void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1826  String::Encoding encoding = instr->hydrogen()->encoding();
1827  Register string = ToRegister(instr->string());
1828  Register value = ToRegister(instr->value());
1829
1830  if (FLAG_debug_code) {
1831    Register scratch = scratch0();
1832    Register index = ToRegister(instr->index());
1833    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1834    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1835    int encoding_mask =
1836        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1837        ? one_byte_seq_type : two_byte_seq_type;
1838    __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1839  }
1840
1841  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1842  if (encoding == String::ONE_BYTE_ENCODING) {
1843    __ sb(value, operand);
1844  } else {
1845    __ sh(value, operand);
1846  }
1847}
1848
1849
1850void LCodeGen::DoAddI(LAddI* instr) {
1851  LOperand* left = instr->left();
1852  LOperand* right = instr->right();
1853  LOperand* result = instr->result();
1854  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1855
1856  if (!can_overflow) {
1857    if (right->IsStackSlot()) {
1858      Register right_reg = EmitLoadRegister(right, at);
1859      __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg));
1860    } else {
1861      DCHECK(right->IsRegister() || right->IsConstantOperand());
1862      __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1863    }
1864  } else {  // can_overflow.
1865    Register overflow = scratch0();
1866    Register scratch = scratch1();
1867    if (right->IsStackSlot() ||
1868        right->IsConstantOperand()) {
1869      Register right_reg = EmitLoadRegister(right, scratch);
1870      __ AdduAndCheckForOverflow(ToRegister(result),
1871                                 ToRegister(left),
1872                                 right_reg,
1873                                 overflow);  // Reg at also used as scratch.
1874    } else {
1875      DCHECK(right->IsRegister());
1876      // Due to overflow check macros not supporting constant operands,
1877      // handling the IsConstantOperand case was moved to prev if clause.
1878      __ AdduAndCheckForOverflow(ToRegister(result),
1879                                 ToRegister(left),
1880                                 ToRegister(right),
1881                                 overflow);  // Reg at also used as scratch.
1882    }
1883    DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
1884    // if not smi, it must int32.
1885    if (!instr->hydrogen()->representation().IsSmi()) {
1886      DeoptimizeIf(gt, instr, ToRegister(result), Operand(kMaxInt));
1887      DeoptimizeIf(lt, instr, ToRegister(result), Operand(kMinInt));
1888    }
1889  }
1890}
1891
1892
1893void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1894  LOperand* left = instr->left();
1895  LOperand* right = instr->right();
1896  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1897  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1898  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1899    Register left_reg = ToRegister(left);
1900    Register right_reg = EmitLoadRegister(right, scratch0());
1901    Register result_reg = ToRegister(instr->result());
1902    Label return_right, done;
1903    Register scratch = scratch1();
1904    __ Slt(scratch, left_reg, Operand(right_reg));
1905    if (condition == ge) {
1906     __  Movz(result_reg, left_reg, scratch);
1907     __  Movn(result_reg, right_reg, scratch);
1908    } else {
1909     DCHECK(condition == le);
1910     __  Movn(result_reg, left_reg, scratch);
1911     __  Movz(result_reg, right_reg, scratch);
1912    }
1913  } else {
1914    DCHECK(instr->hydrogen()->representation().IsDouble());
1915    FPURegister left_reg = ToDoubleRegister(left);
1916    FPURegister right_reg = ToDoubleRegister(right);
1917    FPURegister result_reg = ToDoubleRegister(instr->result());
1918    Label check_nan_left, check_zero, return_left, return_right, done;
1919    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1920    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1921    __ Branch(&return_right);
1922
1923    __ bind(&check_zero);
1924    // left == right != 0.
1925    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1926    // At this point, both left and right are either 0 or -0.
1927    if (operation == HMathMinMax::kMathMin) {
1928      __ neg_d(left_reg, left_reg);
1929      __ sub_d(result_reg, left_reg, right_reg);
1930      __ neg_d(result_reg, result_reg);
1931    } else {
1932      __ add_d(result_reg, left_reg, right_reg);
1933    }
1934    __ Branch(&done);
1935
1936    __ bind(&check_nan_left);
1937    // left == NaN.
1938    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1939    __ bind(&return_right);
1940    if (!right_reg.is(result_reg)) {
1941      __ mov_d(result_reg, right_reg);
1942    }
1943    __ Branch(&done);
1944
1945    __ bind(&return_left);
1946    if (!left_reg.is(result_reg)) {
1947      __ mov_d(result_reg, left_reg);
1948    }
1949    __ bind(&done);
1950  }
1951}
1952
1953
1954void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1955  DoubleRegister left = ToDoubleRegister(instr->left());
1956  DoubleRegister right = ToDoubleRegister(instr->right());
1957  DoubleRegister result = ToDoubleRegister(instr->result());
1958  switch (instr->op()) {
1959    case Token::ADD:
1960      __ add_d(result, left, right);
1961      break;
1962    case Token::SUB:
1963      __ sub_d(result, left, right);
1964      break;
1965    case Token::MUL:
1966      __ mul_d(result, left, right);
1967      break;
1968    case Token::DIV:
1969      __ div_d(result, left, right);
1970      break;
1971    case Token::MOD: {
1972      // Save a0-a3 on the stack.
1973      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1974      __ MultiPush(saved_regs);
1975
1976      __ PrepareCallCFunction(0, 2, scratch0());
1977      __ MovToFloatParameters(left, right);
1978      __ CallCFunction(
1979          ExternalReference::mod_two_doubles_operation(isolate()),
1980          0, 2);
1981      // Move the result in the double result register.
1982      __ MovFromFloatResult(result);
1983
1984      // Restore saved register.
1985      __ MultiPop(saved_regs);
1986      break;
1987    }
1988    default:
1989      UNREACHABLE();
1990      break;
1991  }
1992}
1993
1994
1995void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1996  DCHECK(ToRegister(instr->context()).is(cp));
1997  DCHECK(ToRegister(instr->left()).is(a1));
1998  DCHECK(ToRegister(instr->right()).is(a0));
1999  DCHECK(ToRegister(instr->result()).is(v0));
2000
2001  Handle<Code> code =
2002      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2003  CallCode(code, RelocInfo::CODE_TARGET, instr);
2004  // Other arch use a nop here, to signal that there is no inlined
2005  // patchable code. Mips does not need the nop, since our marker
2006  // instruction (andi zero_reg) will never be used in normal code.
2007}
2008
2009
2010template<class InstrType>
2011void LCodeGen::EmitBranch(InstrType instr,
2012                          Condition condition,
2013                          Register src1,
2014                          const Operand& src2) {
2015  int left_block = instr->TrueDestination(chunk_);
2016  int right_block = instr->FalseDestination(chunk_);
2017
2018  int next_block = GetNextEmittedBlock();
2019  if (right_block == left_block || condition == al) {
2020    EmitGoto(left_block);
2021  } else if (left_block == next_block) {
2022    __ Branch(chunk_->GetAssemblyLabel(right_block),
2023              NegateCondition(condition), src1, src2);
2024  } else if (right_block == next_block) {
2025    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2026  } else {
2027    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2028    __ Branch(chunk_->GetAssemblyLabel(right_block));
2029  }
2030}
2031
2032
2033template<class InstrType>
2034void LCodeGen::EmitBranchF(InstrType instr,
2035                           Condition condition,
2036                           FPURegister src1,
2037                           FPURegister src2) {
2038  int right_block = instr->FalseDestination(chunk_);
2039  int left_block = instr->TrueDestination(chunk_);
2040
2041  int next_block = GetNextEmittedBlock();
2042  if (right_block == left_block) {
2043    EmitGoto(left_block);
2044  } else if (left_block == next_block) {
2045    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2046               NegateCondition(condition), src1, src2);
2047  } else if (right_block == next_block) {
2048    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2049               condition, src1, src2);
2050  } else {
2051    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2052               condition, src1, src2);
2053    __ Branch(chunk_->GetAssemblyLabel(right_block));
2054  }
2055}
2056
2057
2058template<class InstrType>
2059void LCodeGen::EmitFalseBranch(InstrType instr,
2060                               Condition condition,
2061                               Register src1,
2062                               const Operand& src2) {
2063  int false_block = instr->FalseDestination(chunk_);
2064  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2065}
2066
2067
2068template<class InstrType>
2069void LCodeGen::EmitFalseBranchF(InstrType instr,
2070                                Condition condition,
2071                                FPURegister src1,
2072                                FPURegister src2) {
2073  int false_block = instr->FalseDestination(chunk_);
2074  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2075             condition, src1, src2);
2076}
2077
2078
2079void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2080  __ stop("LDebugBreak");
2081}
2082
2083
2084void LCodeGen::DoBranch(LBranch* instr) {
2085  Representation r = instr->hydrogen()->value()->representation();
2086  if (r.IsInteger32() || r.IsSmi()) {
2087    DCHECK(!info()->IsStub());
2088    Register reg = ToRegister(instr->value());
2089    EmitBranch(instr, ne, reg, Operand(zero_reg));
2090  } else if (r.IsDouble()) {
2091    DCHECK(!info()->IsStub());
2092    DoubleRegister reg = ToDoubleRegister(instr->value());
2093    // Test the double value. Zero and NaN are false.
2094    EmitBranchF(instr, nue, reg, kDoubleRegZero);
2095  } else {
2096    DCHECK(r.IsTagged());
2097    Register reg = ToRegister(instr->value());
2098    HType type = instr->hydrogen()->value()->type();
2099    if (type.IsBoolean()) {
2100      DCHECK(!info()->IsStub());
2101      __ LoadRoot(at, Heap::kTrueValueRootIndex);
2102      EmitBranch(instr, eq, reg, Operand(at));
2103    } else if (type.IsSmi()) {
2104      DCHECK(!info()->IsStub());
2105      EmitBranch(instr, ne, reg, Operand(zero_reg));
2106    } else if (type.IsJSArray()) {
2107      DCHECK(!info()->IsStub());
2108      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2109    } else if (type.IsHeapNumber()) {
2110      DCHECK(!info()->IsStub());
2111      DoubleRegister dbl_scratch = double_scratch0();
2112      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2113      // Test the double value. Zero and NaN are false.
2114      EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2115    } else if (type.IsString()) {
2116      DCHECK(!info()->IsStub());
2117      __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2118      EmitBranch(instr, ne, at, Operand(zero_reg));
2119    } else {
2120      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2121      // Avoid deopts in the case where we've never executed this path before.
2122      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2123
2124      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2125        // undefined -> false.
2126        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2127        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2128      }
2129      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2130        // Boolean -> its value.
2131        __ LoadRoot(at, Heap::kTrueValueRootIndex);
2132        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2133        __ LoadRoot(at, Heap::kFalseValueRootIndex);
2134        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2135      }
2136      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2137        // 'null' -> false.
2138        __ LoadRoot(at, Heap::kNullValueRootIndex);
2139        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2140      }
2141
2142      if (expected.Contains(ToBooleanStub::SMI)) {
2143        // Smis: 0 -> false, all other -> true.
2144        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2145        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2146      } else if (expected.NeedsMap()) {
2147        // If we need a map later and have a Smi -> deopt.
2148        __ SmiTst(reg, at);
2149        DeoptimizeIf(eq, instr, at, Operand(zero_reg));
2150      }
2151
2152      const Register map = scratch0();
2153      if (expected.NeedsMap()) {
2154        __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2155        if (expected.CanBeUndetectable()) {
2156          // Undetectable -> false.
2157          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2158          __ And(at, at, Operand(1 << Map::kIsUndetectable));
2159          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2160        }
2161      }
2162
2163      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2164        // spec object -> true.
2165        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2166        __ Branch(instr->TrueLabel(chunk_),
2167                  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2168      }
2169
2170      if (expected.Contains(ToBooleanStub::STRING)) {
2171        // String value -> false iff empty.
2172        Label not_string;
2173        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2174        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2175        __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2176        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2177        __ Branch(instr->FalseLabel(chunk_));
2178        __ bind(&not_string);
2179      }
2180
2181      if (expected.Contains(ToBooleanStub::SYMBOL)) {
2182        // Symbol value -> true.
2183        const Register scratch = scratch1();
2184        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2185        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2186      }
2187
2188      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2189        // heap number -> false iff +0, -0, or NaN.
2190        DoubleRegister dbl_scratch = double_scratch0();
2191        Label not_heap_number;
2192        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2193        __ Branch(&not_heap_number, ne, map, Operand(at));
2194        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2195        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2196                   ne, dbl_scratch, kDoubleRegZero);
2197        // Falls through if dbl_scratch == 0.
2198        __ Branch(instr->FalseLabel(chunk_));
2199        __ bind(&not_heap_number);
2200      }
2201
2202      if (!expected.IsGeneric()) {
2203        // We've seen something for the first time -> deopt.
2204        // This can only happen if we are not generic already.
2205        DeoptimizeIf(al, instr, zero_reg, Operand(zero_reg));
2206      }
2207    }
2208  }
2209}
2210
2211
2212void LCodeGen::EmitGoto(int block) {
2213  if (!IsNextEmittedBlock(block)) {
2214    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2215  }
2216}
2217
2218
2219void LCodeGen::DoGoto(LGoto* instr) {
2220  EmitGoto(instr->block_id());
2221}
2222
2223
2224Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2225  Condition cond = kNoCondition;
2226  switch (op) {
2227    case Token::EQ:
2228    case Token::EQ_STRICT:
2229      cond = eq;
2230      break;
2231    case Token::NE:
2232    case Token::NE_STRICT:
2233      cond = ne;
2234      break;
2235    case Token::LT:
2236      cond = is_unsigned ? lo : lt;
2237      break;
2238    case Token::GT:
2239      cond = is_unsigned ? hi : gt;
2240      break;
2241    case Token::LTE:
2242      cond = is_unsigned ? ls : le;
2243      break;
2244    case Token::GTE:
2245      cond = is_unsigned ? hs : ge;
2246      break;
2247    case Token::IN:
2248    case Token::INSTANCEOF:
2249    default:
2250      UNREACHABLE();
2251  }
2252  return cond;
2253}
2254
2255
2256void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2257  LOperand* left = instr->left();
2258  LOperand* right = instr->right();
2259  bool is_unsigned =
2260      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2261      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2262  Condition cond = TokenToCondition(instr->op(), is_unsigned);
2263
2264  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2265    // We can statically evaluate the comparison.
2266    double left_val = ToDouble(LConstantOperand::cast(left));
2267    double right_val = ToDouble(LConstantOperand::cast(right));
2268    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2269        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2270    EmitGoto(next_block);
2271  } else {
2272    if (instr->is_double()) {
2273      // Compare left and right as doubles and load the
2274      // resulting flags into the normal status register.
2275      FPURegister left_reg = ToDoubleRegister(left);
2276      FPURegister right_reg = ToDoubleRegister(right);
2277
2278      // If a NaN is involved, i.e. the result is unordered,
2279      // jump to false block label.
2280      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2281                 left_reg, right_reg);
2282
2283      EmitBranchF(instr, cond, left_reg, right_reg);
2284    } else {
2285      Register cmp_left;
2286      Operand cmp_right = Operand((int64_t)0);
2287      if (right->IsConstantOperand()) {
2288        int32_t value = ToInteger32(LConstantOperand::cast(right));
2289        if (instr->hydrogen_value()->representation().IsSmi()) {
2290          cmp_left = ToRegister(left);
2291          cmp_right = Operand(Smi::FromInt(value));
2292        } else {
2293          cmp_left = ToRegister(left);
2294          cmp_right = Operand(value);
2295        }
2296      } else if (left->IsConstantOperand()) {
2297        int32_t value = ToInteger32(LConstantOperand::cast(left));
2298        if (instr->hydrogen_value()->representation().IsSmi()) {
2299          cmp_left = ToRegister(right);
2300          cmp_right = Operand(Smi::FromInt(value));
2301        } else {
2302          cmp_left = ToRegister(right);
2303          cmp_right = Operand(value);
2304        }
2305        // We commuted the operands, so commute the condition.
2306        cond = CommuteCondition(cond);
2307      } else {
2308        cmp_left = ToRegister(left);
2309        cmp_right = Operand(ToRegister(right));
2310      }
2311
2312      EmitBranch(instr, cond, cmp_left, cmp_right);
2313    }
2314  }
2315}
2316
2317
2318void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2319  Register left = ToRegister(instr->left());
2320  Register right = ToRegister(instr->right());
2321
2322  EmitBranch(instr, eq, left, Operand(right));
2323}
2324
2325
2326void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2327  if (instr->hydrogen()->representation().IsTagged()) {
2328    Register input_reg = ToRegister(instr->object());
2329    __ li(at, Operand(factory()->the_hole_value()));
2330    EmitBranch(instr, eq, input_reg, Operand(at));
2331    return;
2332  }
2333
2334  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2335  EmitFalseBranchF(instr, eq, input_reg, input_reg);
2336
2337  Register scratch = scratch0();
2338  __ FmoveHigh(scratch, input_reg);
2339  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2340}
2341
2342
2343void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2344  Representation rep = instr->hydrogen()->value()->representation();
2345  DCHECK(!rep.IsInteger32());
2346  Register scratch = ToRegister(instr->temp());
2347
2348  if (rep.IsDouble()) {
2349    DoubleRegister value = ToDoubleRegister(instr->value());
2350    EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2351    __ FmoveHigh(scratch, value);
2352    // Only use low 32-bits of value.
2353    __ dsll32(scratch, scratch, 0);
2354    __ dsrl32(scratch, scratch, 0);
2355    __ li(at, 0x80000000);
2356  } else {
2357    Register value = ToRegister(instr->value());
2358    __ CheckMap(value,
2359                scratch,
2360                Heap::kHeapNumberMapRootIndex,
2361                instr->FalseLabel(chunk()),
2362                DO_SMI_CHECK);
2363    __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2364    EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2365    __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2366    __ mov(at, zero_reg);
2367  }
2368  EmitBranch(instr, eq, scratch, Operand(at));
2369}
2370
2371
2372Condition LCodeGen::EmitIsObject(Register input,
2373                                 Register temp1,
2374                                 Register temp2,
2375                                 Label* is_not_object,
2376                                 Label* is_object) {
2377  __ JumpIfSmi(input, is_not_object);
2378
2379  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2380  __ Branch(is_object, eq, input, Operand(temp2));
2381
2382  // Load map.
2383  __ ld(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2384  // Undetectable objects behave like undefined.
2385  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2386  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2387  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2388
2389  // Load instance type and check that it is in object type range.
2390  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2391  __ Branch(is_not_object,
2392            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2393
2394  return le;
2395}
2396
2397
2398void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2399  Register reg = ToRegister(instr->value());
2400  Register temp1 = ToRegister(instr->temp());
2401  Register temp2 = scratch0();
2402
2403  Condition true_cond =
2404      EmitIsObject(reg, temp1, temp2,
2405          instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2406
2407  EmitBranch(instr, true_cond, temp2,
2408             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2409}
2410
2411
2412Condition LCodeGen::EmitIsString(Register input,
2413                                 Register temp1,
2414                                 Label* is_not_string,
2415                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2416  if (check_needed == INLINE_SMI_CHECK) {
2417    __ JumpIfSmi(input, is_not_string);
2418  }
2419  __ GetObjectType(input, temp1, temp1);
2420
2421  return lt;
2422}
2423
2424
2425void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2426  Register reg = ToRegister(instr->value());
2427  Register temp1 = ToRegister(instr->temp());
2428
2429  SmiCheck check_needed =
2430      instr->hydrogen()->value()->type().IsHeapObject()
2431          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2432  Condition true_cond =
2433      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2434
2435  EmitBranch(instr, true_cond, temp1,
2436             Operand(FIRST_NONSTRING_TYPE));
2437}
2438
2439
2440void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2441  Register input_reg = EmitLoadRegister(instr->value(), at);
2442  __ And(at, input_reg, kSmiTagMask);
2443  EmitBranch(instr, eq, at, Operand(zero_reg));
2444}
2445
2446
2447void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2448  Register input = ToRegister(instr->value());
2449  Register temp = ToRegister(instr->temp());
2450
2451  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2452    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2453  }
2454  __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2455  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2456  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2457  EmitBranch(instr, ne, at, Operand(zero_reg));
2458}
2459
2460
2461static Condition ComputeCompareCondition(Token::Value op) {
2462  switch (op) {
2463    case Token::EQ_STRICT:
2464    case Token::EQ:
2465      return eq;
2466    case Token::LT:
2467      return lt;
2468    case Token::GT:
2469      return gt;
2470    case Token::LTE:
2471      return le;
2472    case Token::GTE:
2473      return ge;
2474    default:
2475      UNREACHABLE();
2476      return kNoCondition;
2477  }
2478}
2479
2480
2481void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2482  DCHECK(ToRegister(instr->context()).is(cp));
2483  Token::Value op = instr->op();
2484
2485  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2486  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2487
2488  Condition condition = ComputeCompareCondition(op);
2489
2490  EmitBranch(instr, condition, v0, Operand(zero_reg));
2491}
2492
2493
2494static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2495  InstanceType from = instr->from();
2496  InstanceType to = instr->to();
2497  if (from == FIRST_TYPE) return to;
2498  DCHECK(from == to || to == LAST_TYPE);
2499  return from;
2500}
2501
2502
2503static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2504  InstanceType from = instr->from();
2505  InstanceType to = instr->to();
2506  if (from == to) return eq;
2507  if (to == LAST_TYPE) return hs;
2508  if (from == FIRST_TYPE) return ls;
2509  UNREACHABLE();
2510  return eq;
2511}
2512
2513
2514void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2515  Register scratch = scratch0();
2516  Register input = ToRegister(instr->value());
2517
2518  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2519    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2520  }
2521
2522  __ GetObjectType(input, scratch, scratch);
2523  EmitBranch(instr,
2524             BranchCondition(instr->hydrogen()),
2525             scratch,
2526             Operand(TestType(instr->hydrogen())));
2527}
2528
2529
2530void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2531  Register input = ToRegister(instr->value());
2532  Register result = ToRegister(instr->result());
2533
2534  __ AssertString(input);
2535
2536  __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
2537  __ IndexFromHash(result, result);
2538}
2539
2540
2541void LCodeGen::DoHasCachedArrayIndexAndBranch(
2542    LHasCachedArrayIndexAndBranch* instr) {
2543  Register input = ToRegister(instr->value());
2544  Register scratch = scratch0();
2545
2546  __ lwu(scratch,
2547         FieldMemOperand(input, String::kHashFieldOffset));
2548  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2549  EmitBranch(instr, eq, at, Operand(zero_reg));
2550}
2551
2552
2553// Branches to a label or falls through with the answer in flags.  Trashes
2554// the temp registers, but not the input.
2555void LCodeGen::EmitClassOfTest(Label* is_true,
2556                               Label* is_false,
2557                               Handle<String>class_name,
2558                               Register input,
2559                               Register temp,
2560                               Register temp2) {
2561  DCHECK(!input.is(temp));
2562  DCHECK(!input.is(temp2));
2563  DCHECK(!temp.is(temp2));
2564
2565  __ JumpIfSmi(input, is_false);
2566
2567  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2568    // Assuming the following assertions, we can use the same compares to test
2569    // for both being a function type and being in the object type range.
2570    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2571    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2572                  FIRST_SPEC_OBJECT_TYPE + 1);
2573    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2574                  LAST_SPEC_OBJECT_TYPE - 1);
2575    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2576
2577    __ GetObjectType(input, temp, temp2);
2578    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2579    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2580    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2581  } else {
2582    // Faster code path to avoid two compares: subtract lower bound from the
2583    // actual type and do a signed compare with the width of the type range.
2584    __ GetObjectType(input, temp, temp2);
2585    __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2586    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2587                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2588  }
2589
2590  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2591  // Check if the constructor in the map is a function.
2592  __ ld(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2593
2594  // Objects with a non-function constructor have class 'Object'.
2595  __ GetObjectType(temp, temp2, temp2);
2596  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2597    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2598  } else {
2599    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2600  }
2601
2602  // temp now contains the constructor function. Grab the
2603  // instance class name from there.
2604  __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2605  __ ld(temp, FieldMemOperand(temp,
2606                               SharedFunctionInfo::kInstanceClassNameOffset));
2607  // The class name we are testing against is internalized since it's a literal.
2608  // The name in the constructor is internalized because of the way the context
2609  // is booted.  This routine isn't expected to work for random API-created
2610  // classes and it doesn't have to because you can't access it with natives
2611  // syntax.  Since both sides are internalized it is sufficient to use an
2612  // identity comparison.
2613
2614  // End with the address of this class_name instance in temp register.
2615  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2616}
2617
2618
2619void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2620  Register input = ToRegister(instr->value());
2621  Register temp = scratch0();
2622  Register temp2 = ToRegister(instr->temp());
2623  Handle<String> class_name = instr->hydrogen()->class_name();
2624
2625  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2626                  class_name, input, temp, temp2);
2627
2628  EmitBranch(instr, eq, temp, Operand(class_name));
2629}
2630
2631
2632void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2633  Register reg = ToRegister(instr->value());
2634  Register temp = ToRegister(instr->temp());
2635
2636  __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2637  EmitBranch(instr, eq, temp, Operand(instr->map()));
2638}
2639
2640
2641void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2642  DCHECK(ToRegister(instr->context()).is(cp));
2643  Label true_label, done;
2644  DCHECK(ToRegister(instr->left()).is(a0));  // Object is in a0.
2645  DCHECK(ToRegister(instr->right()).is(a1));  // Function is in a1.
2646  Register result = ToRegister(instr->result());
2647  DCHECK(result.is(v0));
2648
2649  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2650  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2651
2652  __ Branch(&true_label, eq, result, Operand(zero_reg));
2653  __ li(result, Operand(factory()->false_value()));
2654  __ Branch(&done);
2655  __ bind(&true_label);
2656  __ li(result, Operand(factory()->true_value()));
2657  __ bind(&done);
2658}
2659
2660
2661void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2662  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2663   public:
2664    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2665                                  LInstanceOfKnownGlobal* instr)
2666        : LDeferredCode(codegen), instr_(instr) { }
2667    virtual void Generate() OVERRIDE {
2668      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2669    }
2670    virtual LInstruction* instr() OVERRIDE { return instr_; }
2671    Label* map_check() { return &map_check_; }
2672
2673   private:
2674    LInstanceOfKnownGlobal* instr_;
2675    Label map_check_;
2676  };
2677
2678  DeferredInstanceOfKnownGlobal* deferred;
2679  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2680
2681  Label done, false_result;
2682  Register object = ToRegister(instr->value());
2683  Register temp = ToRegister(instr->temp());
2684  Register result = ToRegister(instr->result());
2685
2686  DCHECK(object.is(a0));
2687  DCHECK(result.is(v0));
2688
2689  // A Smi is not instance of anything.
2690  __ JumpIfSmi(object, &false_result);
2691
2692  // This is the inlined call site instanceof cache. The two occurences of the
2693  // hole value will be patched to the last map/result pair generated by the
2694  // instanceof stub.
2695  Label cache_miss;
2696  Register map = temp;
2697  __ ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
2698
2699  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2700  __ bind(deferred->map_check());  // Label for calculating code patching.
2701  // We use Factory::the_hole_value() on purpose instead of loading from the
2702  // root array to force relocation to be able to later patch with
2703  // the cached map.
2704  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2705  __ li(at, Operand(Handle<Object>(cell)));
2706  __ ld(at, FieldMemOperand(at, PropertyCell::kValueOffset));
2707  __ BranchShort(&cache_miss, ne, map, Operand(at));
2708  // We use Factory::the_hole_value() on purpose instead of loading from the
2709  // root array to force relocation to be able to later patch
2710  // with true or false. The distance from map check has to be constant.
2711  __ li(result, Operand(factory()->the_hole_value()));
2712  __ Branch(&done);
2713
2714  // The inlined call site cache did not match. Check null and string before
2715  // calling the deferred code.
2716  __ bind(&cache_miss);
2717  // Null is not instance of anything.
2718  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2719  __ Branch(&false_result, eq, object, Operand(temp));
2720
2721  // String values is not instance of anything.
2722  Condition cc = __ IsObjectStringType(object, temp, temp);
2723  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2724
2725  // Go to the deferred code.
2726  __ Branch(deferred->entry());
2727
2728  __ bind(&false_result);
2729  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2730
2731  // Here result has either true or false. Deferred code also produces true or
2732  // false object.
2733  __ bind(deferred->exit());
2734  __ bind(&done);
2735}
2736
2737
2738void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2739                                               Label* map_check) {
2740  Register result = ToRegister(instr->result());
2741  DCHECK(result.is(v0));
2742
2743  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2744  flags = static_cast<InstanceofStub::Flags>(
2745      flags | InstanceofStub::kArgsInRegisters);
2746  flags = static_cast<InstanceofStub::Flags>(
2747      flags | InstanceofStub::kCallSiteInlineCheck);
2748  flags = static_cast<InstanceofStub::Flags>(
2749      flags | InstanceofStub::kReturnTrueFalseObject);
2750  InstanceofStub stub(isolate(), flags);
2751
2752  PushSafepointRegistersScope scope(this);
2753  LoadContextFromDeferred(instr->context());
2754
2755  // Get the temp register reserved by the instruction. This needs to be a4 as
2756  // its slot of the pushing of safepoint registers is used to communicate the
2757  // offset to the location of the map check.
2758  Register temp = ToRegister(instr->temp());
2759  DCHECK(temp.is(a4));
2760  __ li(InstanceofStub::right(), instr->function());
2761  static const int kAdditionalDelta = 13;
2762  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2763  Label before_push_delta;
2764  __ bind(&before_push_delta);
2765  {
2766    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2767    __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE);
2768    __ StoreToSafepointRegisterSlot(temp, temp);
2769  }
2770  CallCodeGeneric(stub.GetCode(),
2771                  RelocInfo::CODE_TARGET,
2772                  instr,
2773                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2774  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2775  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2776  // Put the result value into the result register slot and
2777  // restore all registers.
2778  __ StoreToSafepointRegisterSlot(result, result);
2779}
2780
2781
2782void LCodeGen::DoCmpT(LCmpT* instr) {
2783  DCHECK(ToRegister(instr->context()).is(cp));
2784  Token::Value op = instr->op();
2785
2786  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2787  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2788  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2789
2790  Condition condition = ComputeCompareCondition(op);
2791  // A minor optimization that relies on LoadRoot always emitting one
2792  // instruction.
2793  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2794  Label done, check;
2795  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2796  __ bind(&check);
2797  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2798  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2799  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2800  __ bind(&done);
2801}
2802
2803
2804void LCodeGen::DoReturn(LReturn* instr) {
2805  if (FLAG_trace && info()->IsOptimizing()) {
2806    // Push the return value on the stack as the parameter.
2807    // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2808    // managed by the register allocator and tearing down the frame, it's
2809    // safe to write to the context register.
2810    __ push(v0);
2811    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2812    __ CallRuntime(Runtime::kTraceExit, 1);
2813  }
2814  if (info()->saves_caller_doubles()) {
2815    RestoreCallerDoubles();
2816  }
2817  int no_frame_start = -1;
2818  if (NeedsEagerFrame()) {
2819    __ mov(sp, fp);
2820    no_frame_start = masm_->pc_offset();
2821    __ Pop(ra, fp);
2822  }
2823  if (instr->has_constant_parameter_count()) {
2824    int parameter_count = ToInteger32(instr->constant_parameter_count());
2825    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2826    if (sp_delta != 0) {
2827      __ Daddu(sp, sp, Operand(sp_delta));
2828    }
2829  } else {
2830    Register reg = ToRegister(instr->parameter_count());
2831    // The argument count parameter is a smi
2832    __ SmiUntag(reg);
2833    __ dsll(at, reg, kPointerSizeLog2);
2834    __ Daddu(sp, sp, at);
2835  }
2836
2837  __ Jump(ra);
2838
2839  if (no_frame_start != -1) {
2840    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2841  }
2842}
2843
2844
2845void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2846  Register result = ToRegister(instr->result());
2847  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2848  __ ld(result, FieldMemOperand(at, Cell::kValueOffset));
2849  if (instr->hydrogen()->RequiresHoleCheck()) {
2850    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2851    DeoptimizeIf(eq, instr, result, Operand(at));
2852  }
2853}
2854
2855
2856template <class T>
2857void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2858  DCHECK(FLAG_vector_ics);
2859  Register vector = ToRegister(instr->temp_vector());
2860  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
2861  __ li(vector, instr->hydrogen()->feedback_vector());
2862  // No need to allocate this register.
2863  DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
2864  __ li(VectorLoadICDescriptor::SlotRegister(),
2865        Operand(Smi::FromInt(instr->hydrogen()->slot())));
2866}
2867
2868
2869void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2870  DCHECK(ToRegister(instr->context()).is(cp));
2871  DCHECK(ToRegister(instr->global_object())
2872            .is(LoadDescriptor::ReceiverRegister()));
2873  DCHECK(ToRegister(instr->result()).is(v0));
2874
2875  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2876  if (FLAG_vector_ics) {
2877    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2878  }
2879  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2880  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
2881  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2882}
2883
2884
2885void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2886  Register value = ToRegister(instr->value());
2887  Register cell = scratch0();
2888
2889  // Load the cell.
2890  __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2891
2892  // If the cell we are storing to contains the hole it could have
2893  // been deleted from the property dictionary. In that case, we need
2894  // to update the property details in the property dictionary to mark
2895  // it as no longer deleted.
2896  if (instr->hydrogen()->RequiresHoleCheck()) {
2897    // We use a temp to check the payload.
2898    Register payload = ToRegister(instr->temp());
2899    __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
2900    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2901    DeoptimizeIf(eq, instr, payload, Operand(at));
2902  }
2903
2904  // Store the value.
2905  __ sd(value, FieldMemOperand(cell, Cell::kValueOffset));
2906  // Cells are always rescanned, so no write barrier here.
2907}
2908
2909
2910void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2911  Register context = ToRegister(instr->context());
2912  Register result = ToRegister(instr->result());
2913
2914  __ ld(result, ContextOperand(context, instr->slot_index()));
2915  if (instr->hydrogen()->RequiresHoleCheck()) {
2916    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2917
2918    if (instr->hydrogen()->DeoptimizesOnHole()) {
2919      DeoptimizeIf(eq, instr, result, Operand(at));
2920    } else {
2921      Label is_not_hole;
2922      __ Branch(&is_not_hole, ne, result, Operand(at));
2923      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2924      __ bind(&is_not_hole);
2925    }
2926  }
2927}
2928
2929
2930void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2931  Register context = ToRegister(instr->context());
2932  Register value = ToRegister(instr->value());
2933  Register scratch = scratch0();
2934  MemOperand target = ContextOperand(context, instr->slot_index());
2935
2936  Label skip_assignment;
2937
2938  if (instr->hydrogen()->RequiresHoleCheck()) {
2939    __ ld(scratch, target);
2940    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2941
2942    if (instr->hydrogen()->DeoptimizesOnHole()) {
2943      DeoptimizeIf(eq, instr, scratch, Operand(at));
2944    } else {
2945      __ Branch(&skip_assignment, ne, scratch, Operand(at));
2946    }
2947  }
2948
2949  __ sd(value, target);
2950  if (instr->hydrogen()->NeedsWriteBarrier()) {
2951    SmiCheck check_needed =
2952        instr->hydrogen()->value()->type().IsHeapObject()
2953            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2954    __ RecordWriteContextSlot(context,
2955                              target.offset(),
2956                              value,
2957                              scratch0(),
2958                              GetRAState(),
2959                              kSaveFPRegs,
2960                              EMIT_REMEMBERED_SET,
2961                              check_needed);
2962  }
2963
2964  __ bind(&skip_assignment);
2965}
2966
2967
2968void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2969  HObjectAccess access = instr->hydrogen()->access();
2970  int offset = access.offset();
2971  Register object = ToRegister(instr->object());
2972  if (access.IsExternalMemory()) {
2973    Register result = ToRegister(instr->result());
2974    MemOperand operand = MemOperand(object, offset);
2975    __ Load(result, operand, access.representation());
2976    return;
2977  }
2978
2979  if (instr->hydrogen()->representation().IsDouble()) {
2980    DoubleRegister result = ToDoubleRegister(instr->result());
2981    __ ldc1(result, FieldMemOperand(object, offset));
2982    return;
2983  }
2984
2985  Register result = ToRegister(instr->result());
2986  if (!access.IsInobject()) {
2987    __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2988    object = result;
2989  }
2990
2991  Representation representation = access.representation();
2992  if (representation.IsSmi() && SmiValuesAre32Bits() &&
2993      instr->hydrogen()->representation().IsInteger32()) {
2994    if (FLAG_debug_code) {
2995      // Verify this is really an Smi.
2996      Register scratch = scratch0();
2997      __ Load(scratch, FieldMemOperand(object, offset), representation);
2998      __ AssertSmi(scratch);
2999    }
3000
3001    // Read int value directly from upper half of the smi.
3002    STATIC_ASSERT(kSmiTag == 0);
3003    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3004    offset += kPointerSize / 2;
3005    representation = Representation::Integer32();
3006  }
3007  __ Load(result, FieldMemOperand(object, offset), representation);
3008}
3009
3010
3011void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3012  DCHECK(ToRegister(instr->context()).is(cp));
3013  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3014  DCHECK(ToRegister(instr->result()).is(v0));
3015
3016  // Name is always in a2.
3017  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
3018  if (FLAG_vector_ics) {
3019    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3020  }
3021  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3022  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3023}
3024
3025
3026void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3027  Register scratch = scratch0();
3028  Register function = ToRegister(instr->function());
3029  Register result = ToRegister(instr->result());
3030
3031  // Get the prototype or initial map from the function.
3032  __ ld(result,
3033         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3034
3035  // Check that the function has a prototype or an initial map.
3036  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3037  DeoptimizeIf(eq, instr, result, Operand(at));
3038
3039  // If the function does not have an initial map, we're done.
3040  Label done;
3041  __ GetObjectType(result, scratch, scratch);
3042  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
3043
3044  // Get the prototype from the initial map.
3045  __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
3046
3047  // All done.
3048  __ bind(&done);
3049}
3050
3051
3052void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3053  Register result = ToRegister(instr->result());
3054  __ LoadRoot(result, instr->index());
3055}
3056
3057
3058void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3059  Register arguments = ToRegister(instr->arguments());
3060  Register result = ToRegister(instr->result());
3061  // There are two words between the frame pointer and the last argument.
3062  // Subtracting from length accounts for one of them add one more.
3063  if (instr->length()->IsConstantOperand()) {
3064    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3065    if (instr->index()->IsConstantOperand()) {
3066      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3067      int index = (const_length - const_index) + 1;
3068      __ ld(result, MemOperand(arguments, index * kPointerSize));
3069    } else {
3070      Register index = ToRegister(instr->index());
3071      __ li(at, Operand(const_length + 1));
3072      __ Dsubu(result, at, index);
3073      __ dsll(at, result, kPointerSizeLog2);
3074      __ Daddu(at, arguments, at);
3075      __ ld(result, MemOperand(at));
3076    }
3077  } else if (instr->index()->IsConstantOperand()) {
3078    Register length = ToRegister(instr->length());
3079    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3080    int loc = const_index - 1;
3081    if (loc != 0) {
3082      __ Dsubu(result, length, Operand(loc));
3083      __ dsll(at, result, kPointerSizeLog2);
3084      __ Daddu(at, arguments, at);
3085      __ ld(result, MemOperand(at));
3086    } else {
3087      __ dsll(at, length, kPointerSizeLog2);
3088      __ Daddu(at, arguments, at);
3089      __ ld(result, MemOperand(at));
3090    }
3091  } else {
3092    Register length = ToRegister(instr->length());
3093    Register index = ToRegister(instr->index());
3094    __ Dsubu(result, length, index);
3095    __ Daddu(result, result, 1);
3096    __ dsll(at, result, kPointerSizeLog2);
3097    __ Daddu(at, arguments, at);
3098    __ ld(result, MemOperand(at));
3099  }
3100}
3101
3102
3103void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3104  Register external_pointer = ToRegister(instr->elements());
3105  Register key = no_reg;
3106  ElementsKind elements_kind = instr->elements_kind();
3107  bool key_is_constant = instr->key()->IsConstantOperand();
3108  int constant_key = 0;
3109  if (key_is_constant) {
3110    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3111    if (constant_key & 0xF0000000) {
3112      Abort(kArrayIndexConstantValueTooBig);
3113    }
3114  } else {
3115    key = ToRegister(instr->key());
3116  }
3117  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3118  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3119      ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
3120      : element_size_shift;
3121  int base_offset = instr->base_offset();
3122
3123  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3124      elements_kind == FLOAT32_ELEMENTS ||
3125      elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3126      elements_kind == FLOAT64_ELEMENTS) {
3127    int base_offset = instr->base_offset();
3128    FPURegister result = ToDoubleRegister(instr->result());
3129    if (key_is_constant) {
3130      __ Daddu(scratch0(), external_pointer,
3131          constant_key << element_size_shift);
3132    } else {
3133      if (shift_size < 0) {
3134         if (shift_size == -32) {
3135           __ dsra32(scratch0(), key, 0);
3136         } else {
3137           __ dsra(scratch0(), key, -shift_size);
3138         }
3139      } else {
3140        __ dsll(scratch0(), key, shift_size);
3141      }
3142      __ Daddu(scratch0(), scratch0(), external_pointer);
3143    }
3144    if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3145        elements_kind == FLOAT32_ELEMENTS) {
3146      __ lwc1(result, MemOperand(scratch0(), base_offset));
3147      __ cvt_d_s(result, result);
3148    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3149      __ ldc1(result, MemOperand(scratch0(), base_offset));
3150    }
3151  } else {
3152    Register result = ToRegister(instr->result());
3153    MemOperand mem_operand = PrepareKeyedOperand(
3154        key, external_pointer, key_is_constant, constant_key,
3155        element_size_shift, shift_size, base_offset);
3156    switch (elements_kind) {
3157      case EXTERNAL_INT8_ELEMENTS:
3158      case INT8_ELEMENTS:
3159        __ lb(result, mem_operand);
3160        break;
3161      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3162      case EXTERNAL_UINT8_ELEMENTS:
3163      case UINT8_ELEMENTS:
3164      case UINT8_CLAMPED_ELEMENTS:
3165        __ lbu(result, mem_operand);
3166        break;
3167      case EXTERNAL_INT16_ELEMENTS:
3168      case INT16_ELEMENTS:
3169        __ lh(result, mem_operand);
3170        break;
3171      case EXTERNAL_UINT16_ELEMENTS:
3172      case UINT16_ELEMENTS:
3173        __ lhu(result, mem_operand);
3174        break;
3175      case EXTERNAL_INT32_ELEMENTS:
3176      case INT32_ELEMENTS:
3177        __ lw(result, mem_operand);
3178        break;
3179      case EXTERNAL_UINT32_ELEMENTS:
3180      case UINT32_ELEMENTS:
3181        __ lw(result, mem_operand);
3182        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3183          DeoptimizeIf(Ugreater_equal, instr, result, Operand(0x80000000));
3184        }
3185        break;
3186      case FLOAT32_ELEMENTS:
3187      case FLOAT64_ELEMENTS:
3188      case EXTERNAL_FLOAT32_ELEMENTS:
3189      case EXTERNAL_FLOAT64_ELEMENTS:
3190      case FAST_DOUBLE_ELEMENTS:
3191      case FAST_ELEMENTS:
3192      case FAST_SMI_ELEMENTS:
3193      case FAST_HOLEY_DOUBLE_ELEMENTS:
3194      case FAST_HOLEY_ELEMENTS:
3195      case FAST_HOLEY_SMI_ELEMENTS:
3196      case DICTIONARY_ELEMENTS:
3197      case SLOPPY_ARGUMENTS_ELEMENTS:
3198        UNREACHABLE();
3199        break;
3200    }
3201  }
3202}
3203
3204
3205void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3206  Register elements = ToRegister(instr->elements());
3207  bool key_is_constant = instr->key()->IsConstantOperand();
3208  Register key = no_reg;
3209  DoubleRegister result = ToDoubleRegister(instr->result());
3210  Register scratch = scratch0();
3211
3212  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3213
3214  int base_offset = instr->base_offset();
3215  if (key_is_constant) {
3216    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3217    if (constant_key & 0xF0000000) {
3218      Abort(kArrayIndexConstantValueTooBig);
3219    }
3220    base_offset += constant_key * kDoubleSize;
3221  }
3222  __ Daddu(scratch, elements, Operand(base_offset));
3223
3224  if (!key_is_constant) {
3225    key = ToRegister(instr->key());
3226    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3227        ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
3228        : element_size_shift;
3229    if (shift_size > 0) {
3230      __ dsll(at, key, shift_size);
3231    } else if (shift_size == -32) {
3232      __ dsra32(at, key, 0);
3233    } else {
3234      __ dsra(at, key, -shift_size);
3235    }
3236    __ Daddu(scratch, scratch, at);
3237  }
3238
3239  __ ldc1(result, MemOperand(scratch));
3240
3241  if (instr->hydrogen()->RequiresHoleCheck()) {
3242    __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3243    DeoptimizeIf(eq, instr, scratch, Operand(kHoleNanUpper32));
3244  }
3245}
3246
3247
3248void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3249  HLoadKeyed* hinstr = instr->hydrogen();
3250  Register elements = ToRegister(instr->elements());
3251  Register result = ToRegister(instr->result());
3252  Register scratch = scratch0();
3253  Register store_base = scratch;
3254  int offset = instr->base_offset();
3255
3256  if (instr->key()->IsConstantOperand()) {
3257    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3258    offset += ToInteger32(const_operand) * kPointerSize;
3259    store_base = elements;
3260  } else {
3261    Register key = ToRegister(instr->key());
3262    // Even though the HLoadKeyed instruction forces the input
3263    // representation for the key to be an integer, the input gets replaced
3264    // during bound check elimination with the index argument to the bounds
3265    // check, which can be tagged, so that case must be handled here, too.
3266    if (instr->hydrogen()->key()->representation().IsSmi()) {
3267    __ SmiScale(scratch, key, kPointerSizeLog2);
3268    __ daddu(scratch, elements, scratch);
3269    } else {
3270      __ dsll(scratch, key, kPointerSizeLog2);
3271      __ daddu(scratch, elements, scratch);
3272    }
3273  }
3274
3275  Representation representation = hinstr->representation();
3276  if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3277      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3278    DCHECK(!hinstr->RequiresHoleCheck());
3279    if (FLAG_debug_code) {
3280      Register temp = scratch1();
3281      __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
3282      __ AssertSmi(temp);
3283    }
3284
3285    // Read int value directly from upper half of the smi.
3286    STATIC_ASSERT(kSmiTag == 0);
3287    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3288    offset += kPointerSize / 2;
3289  }
3290
3291  __ Load(result, MemOperand(store_base, offset), representation);
3292
3293  // Check for the hole value.
3294  if (hinstr->RequiresHoleCheck()) {
3295    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3296      __ SmiTst(result, scratch);
3297      DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
3298    } else {
3299      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3300      DeoptimizeIf(eq, instr, result, Operand(scratch));
3301    }
3302  }
3303}
3304
3305
3306void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3307  if (instr->is_typed_elements()) {
3308    DoLoadKeyedExternalArray(instr);
3309  } else if (instr->hydrogen()->representation().IsDouble()) {
3310    DoLoadKeyedFixedDoubleArray(instr);
3311  } else {
3312    DoLoadKeyedFixedArray(instr);
3313  }
3314}
3315
3316
3317MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3318                                         Register base,
3319                                         bool key_is_constant,
3320                                         int constant_key,
3321                                         int element_size,
3322                                         int shift_size,
3323                                         int base_offset) {
3324  if (key_is_constant) {
3325    return MemOperand(base, (constant_key << element_size) + base_offset);
3326  }
3327
3328  if (base_offset == 0) {
3329    if (shift_size >= 0) {
3330      __ dsll(scratch0(), key, shift_size);
3331      __ Daddu(scratch0(), base, scratch0());
3332      return MemOperand(scratch0());
3333    } else {
3334      if (shift_size == -32) {
3335        __ dsra32(scratch0(), key, 0);
3336      } else {
3337        __ dsra(scratch0(), key, -shift_size);
3338      }
3339      __ Daddu(scratch0(), base, scratch0());
3340      return MemOperand(scratch0());
3341    }
3342  }
3343
3344  if (shift_size >= 0) {
3345    __ dsll(scratch0(), key, shift_size);
3346    __ Daddu(scratch0(), base, scratch0());
3347    return MemOperand(scratch0(), base_offset);
3348  } else {
3349    if (shift_size == -32) {
3350       __ dsra32(scratch0(), key, 0);
3351    } else {
3352      __ dsra(scratch0(), key, -shift_size);
3353    }
3354    __ Daddu(scratch0(), base, scratch0());
3355    return MemOperand(scratch0(), base_offset);
3356  }
3357}
3358
3359
3360void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3361  DCHECK(ToRegister(instr->context()).is(cp));
3362  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3363  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3364
3365  if (FLAG_vector_ics) {
3366    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3367  }
3368
3369  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3370  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3371}
3372
3373
3374void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3375  Register scratch = scratch0();
3376  Register temp = scratch1();
3377  Register result = ToRegister(instr->result());
3378
3379  if (instr->hydrogen()->from_inlined()) {
3380    __ Dsubu(result, sp, 2 * kPointerSize);
3381  } else {
3382    // Check if the calling frame is an arguments adaptor frame.
3383    Label done, adapted;
3384    __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3385    __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3386    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3387
3388    // Result is the frame pointer for the frame if not adapted and for the real
3389    // frame below the adaptor frame if adapted.
3390    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
3391    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
3392  }
3393}
3394
3395
3396void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3397  Register elem = ToRegister(instr->elements());
3398  Register result = ToRegister(instr->result());
3399
3400  Label done;
3401
3402  // If no arguments adaptor frame the number of arguments is fixed.
3403  __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
3404  __ Branch(&done, eq, fp, Operand(elem));
3405
3406  // Arguments adaptor frame present. Get argument length from there.
3407  __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3408  __ ld(result,
3409        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3410  __ SmiUntag(result);
3411
3412  // Argument length is in result register.
3413  __ bind(&done);
3414}
3415
3416
3417void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3418  Register receiver = ToRegister(instr->receiver());
3419  Register function = ToRegister(instr->function());
3420  Register result = ToRegister(instr->result());
3421  Register scratch = scratch0();
3422
3423  // If the receiver is null or undefined, we have to pass the global
3424  // object as a receiver to normal functions. Values have to be
3425  // passed unchanged to builtins and strict-mode functions.
3426  Label global_object, result_in_receiver;
3427
3428  if (!instr->hydrogen()->known_function()) {
3429    // Do not transform the receiver to object for strict mode functions.
3430    __ ld(scratch,
3431           FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3432
3433    // Do not transform the receiver to object for builtins.
3434    int32_t strict_mode_function_mask =
3435        1 <<  SharedFunctionInfo::kStrictModeBitWithinByte;
3436    int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
3437
3438    __ lbu(at,
3439           FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
3440    __ And(at, at, Operand(strict_mode_function_mask));
3441    __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3442    __ lbu(at,
3443           FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
3444    __ And(at, at, Operand(native_mask));
3445    __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3446  }
3447
3448  // Normal function. Replace undefined or null with global receiver.
3449  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3450  __ Branch(&global_object, eq, receiver, Operand(scratch));
3451  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3452  __ Branch(&global_object, eq, receiver, Operand(scratch));
3453
3454  // Deoptimize if the receiver is not a JS object.
3455  __ SmiTst(receiver, scratch);
3456  DeoptimizeIf(eq, instr, scratch, Operand(zero_reg));
3457
3458  __ GetObjectType(receiver, scratch, scratch);
3459  DeoptimizeIf(lt, instr, scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3460  __ Branch(&result_in_receiver);
3461
3462  __ bind(&global_object);
3463  __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
3464  __ ld(result,
3465        ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3466  __ ld(result,
3467        FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3468
3469  if (result.is(receiver)) {
3470    __ bind(&result_in_receiver);
3471  } else {
3472    Label result_ok;
3473    __ Branch(&result_ok);
3474    __ bind(&result_in_receiver);
3475    __ mov(result, receiver);
3476    __ bind(&result_ok);
3477  }
3478}
3479
3480
3481void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3482  Register receiver = ToRegister(instr->receiver());
3483  Register function = ToRegister(instr->function());
3484  Register length = ToRegister(instr->length());
3485  Register elements = ToRegister(instr->elements());
3486  Register scratch = scratch0();
3487  DCHECK(receiver.is(a0));  // Used for parameter count.
3488  DCHECK(function.is(a1));  // Required by InvokeFunction.
3489  DCHECK(ToRegister(instr->result()).is(v0));
3490
3491  // Copy the arguments to this function possibly from the
3492  // adaptor frame below it.
3493  const uint32_t kArgumentsLimit = 1 * KB;
3494  DeoptimizeIf(hi, instr, length, Operand(kArgumentsLimit));
3495
3496  // Push the receiver and use the register to keep the original
3497  // number of arguments.
3498  __ push(receiver);
3499  __ Move(receiver, length);
3500  // The arguments are at a one pointer size offset from elements.
3501  __ Daddu(elements, elements, Operand(1 * kPointerSize));
3502
3503  // Loop through the arguments pushing them onto the execution
3504  // stack.
3505  Label invoke, loop;
3506  // length is a small non-negative integer, due to the test above.
3507  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3508  __ dsll(scratch, length, kPointerSizeLog2);
3509  __ bind(&loop);
3510  __ Daddu(scratch, elements, scratch);
3511  __ ld(scratch, MemOperand(scratch));
3512  __ push(scratch);
3513  __ Dsubu(length, length, Operand(1));
3514  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3515  __ dsll(scratch, length, kPointerSizeLog2);
3516
3517  __ bind(&invoke);
3518  DCHECK(instr->HasPointerMap());
3519  LPointerMap* pointers = instr->pointer_map();
3520  SafepointGenerator safepoint_generator(
3521      this, pointers, Safepoint::kLazyDeopt);
3522  // The number of arguments is stored in receiver which is a0, as expected
3523  // by InvokeFunction.
3524  ParameterCount actual(receiver);
3525  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3526}
3527
3528
3529void LCodeGen::DoPushArgument(LPushArgument* instr) {
3530  LOperand* argument = instr->value();
3531  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3532    Abort(kDoPushArgumentNotImplementedForDoubleType);
3533  } else {
3534    Register argument_reg = EmitLoadRegister(argument, at);
3535    __ push(argument_reg);
3536  }
3537}
3538
3539
3540void LCodeGen::DoDrop(LDrop* instr) {
3541  __ Drop(instr->count());
3542}
3543
3544
3545void LCodeGen::DoThisFunction(LThisFunction* instr) {
3546  Register result = ToRegister(instr->result());
3547  __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3548}
3549
3550
3551void LCodeGen::DoContext(LContext* instr) {
3552  // If there is a non-return use, the context must be moved to a register.
3553  Register result = ToRegister(instr->result());
3554  if (info()->IsOptimizing()) {
3555    __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3556  } else {
3557    // If there is no frame, the context must be in cp.
3558    DCHECK(result.is(cp));
3559  }
3560}
3561
3562
3563void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3564  DCHECK(ToRegister(instr->context()).is(cp));
3565  __ li(scratch0(), instr->hydrogen()->pairs());
3566  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3567  // The context is the first argument.
3568  __ Push(cp, scratch0(), scratch1());
3569  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3570}
3571
3572
3573void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3574                                 int formal_parameter_count,
3575                                 int arity,
3576                                 LInstruction* instr,
3577                                 A1State a1_state) {
3578  bool dont_adapt_arguments =
3579      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3580  bool can_invoke_directly =
3581      dont_adapt_arguments || formal_parameter_count == arity;
3582
3583  LPointerMap* pointers = instr->pointer_map();
3584
3585  if (can_invoke_directly) {
3586    if (a1_state == A1_UNINITIALIZED) {
3587      __ li(a1, function);
3588    }
3589
3590    // Change context.
3591    __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3592
3593    // Set r0 to arguments count if adaption is not needed. Assumes that r0
3594    // is available to write to at this point.
3595    if (dont_adapt_arguments) {
3596      __ li(a0, Operand(arity));
3597    }
3598
3599    // Invoke function.
3600    __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3601    __ Call(at);
3602
3603    // Set up deoptimization.
3604    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3605  } else {
3606    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3607    ParameterCount count(arity);
3608    ParameterCount expected(formal_parameter_count);
3609    __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3610  }
3611}
3612
3613
3614void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3615  DCHECK(instr->context() != NULL);
3616  DCHECK(ToRegister(instr->context()).is(cp));
3617  Register input = ToRegister(instr->value());
3618  Register result = ToRegister(instr->result());
3619  Register scratch = scratch0();
3620
3621  // Deoptimize if not a heap number.
3622  __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3623  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3624  DeoptimizeIf(ne, instr, scratch, Operand(at));
3625
3626  Label done;
3627  Register exponent = scratch0();
3628  scratch = no_reg;
3629  __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3630  // Check the sign of the argument. If the argument is positive, just
3631  // return it.
3632  __ Move(result, input);
3633  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3634  __ Branch(&done, eq, at, Operand(zero_reg));
3635
3636  // Input is negative. Reverse its sign.
3637  // Preserve the value of all registers.
3638  {
3639    PushSafepointRegistersScope scope(this);
3640
3641    // Registers were saved at the safepoint, so we can use
3642    // many scratch registers.
3643    Register tmp1 = input.is(a1) ? a0 : a1;
3644    Register tmp2 = input.is(a2) ? a0 : a2;
3645    Register tmp3 = input.is(a3) ? a0 : a3;
3646    Register tmp4 = input.is(a4) ? a0 : a4;
3647
3648    // exponent: floating point exponent value.
3649
3650    Label allocated, slow;
3651    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3652    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3653    __ Branch(&allocated);
3654
3655    // Slow case: Call the runtime system to do the number allocation.
3656    __ bind(&slow);
3657
3658    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3659                            instr->context());
3660    // Set the pointer to the new heap number in tmp.
3661    if (!tmp1.is(v0))
3662      __ mov(tmp1, v0);
3663    // Restore input_reg after call to runtime.
3664    __ LoadFromSafepointRegisterSlot(input, input);
3665    __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3666
3667    __ bind(&allocated);
3668    // exponent: floating point exponent value.
3669    // tmp1: allocated heap number.
3670    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3671    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3672    __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3673    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3674
3675    __ StoreToSafepointRegisterSlot(tmp1, result);
3676  }
3677
3678  __ bind(&done);
3679}
3680
3681
3682void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3683  Register input = ToRegister(instr->value());
3684  Register result = ToRegister(instr->result());
3685  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3686  Label done;
3687  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3688  __ mov(result, input);
3689  __ dsubu(result, zero_reg, input);
3690  // Overflow if result is still negative, i.e. 0x80000000.
3691  DeoptimizeIf(lt, instr, result, Operand(zero_reg));
3692  __ bind(&done);
3693}
3694
3695
3696void LCodeGen::DoMathAbs(LMathAbs* instr) {
3697  // Class for deferred case.
3698  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3699   public:
3700    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3701        : LDeferredCode(codegen), instr_(instr) { }
3702    virtual void Generate() OVERRIDE {
3703      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3704    }
3705    virtual LInstruction* instr() OVERRIDE { return instr_; }
3706   private:
3707    LMathAbs* instr_;
3708  };
3709
3710  Representation r = instr->hydrogen()->value()->representation();
3711  if (r.IsDouble()) {
3712    FPURegister input = ToDoubleRegister(instr->value());
3713    FPURegister result = ToDoubleRegister(instr->result());
3714    __ abs_d(result, input);
3715  } else if (r.IsSmiOrInteger32()) {
3716    EmitIntegerMathAbs(instr);
3717  } else {
3718    // Representation is tagged.
3719    DeferredMathAbsTaggedHeapNumber* deferred =
3720        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3721    Register input = ToRegister(instr->value());
3722    // Smi check.
3723    __ JumpIfNotSmi(input, deferred->entry());
3724    // If smi, handle it directly.
3725    EmitIntegerMathAbs(instr);
3726    __ bind(deferred->exit());
3727  }
3728}
3729
3730
3731void LCodeGen::DoMathFloor(LMathFloor* instr) {
3732  DoubleRegister input = ToDoubleRegister(instr->value());
3733  Register result = ToRegister(instr->result());
3734  Register scratch1 = scratch0();
3735  Register except_flag = ToRegister(instr->temp());
3736
3737  __ EmitFPUTruncate(kRoundToMinusInf,
3738                     result,
3739                     input,
3740                     scratch1,
3741                     double_scratch0(),
3742                     except_flag);
3743
3744  // Deopt if the operation did not succeed.
3745  DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
3746
3747  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3748    // Test for -0.
3749    Label done;
3750    __ Branch(&done, ne, result, Operand(zero_reg));
3751    __ mfhc1(scratch1, input);  // Get exponent/sign bits.
3752    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3753    DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
3754    __ bind(&done);
3755  }
3756}
3757
3758
3759void LCodeGen::DoMathRound(LMathRound* instr) {
3760  DoubleRegister input = ToDoubleRegister(instr->value());
3761  Register result = ToRegister(instr->result());
3762  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3763  Register scratch = scratch0();
3764  Label done, check_sign_on_zero;
3765
3766  // Extract exponent bits.
3767  __ mfhc1(result, input);
3768  __ Ext(scratch,
3769         result,
3770         HeapNumber::kExponentShift,
3771         HeapNumber::kExponentBits);
3772
3773  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3774  Label skip1;
3775  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3776  __ mov(result, zero_reg);
3777  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3778    __ Branch(&check_sign_on_zero);
3779  } else {
3780    __ Branch(&done);
3781  }
3782  __ bind(&skip1);
3783
3784  // The following conversion will not work with numbers
3785  // outside of ]-2^32, 2^32[.
3786  DeoptimizeIf(ge, instr, scratch, Operand(HeapNumber::kExponentBias + 32));
3787
3788  // Save the original sign for later comparison.
3789  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3790
3791  __ Move(double_scratch0(), 0.5);
3792  __ add_d(double_scratch0(), input, double_scratch0());
3793
3794  // Check sign of the result: if the sign changed, the input
3795  // value was in ]0.5, 0[ and the result should be -0.
3796  __ mfhc1(result, double_scratch0());
3797  // mfhc1 sign-extends, clear the upper bits.
3798  __ dsll32(result, result, 0);
3799  __ dsrl32(result, result, 0);
3800  __ Xor(result, result, Operand(scratch));
3801  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3802    // ARM uses 'mi' here, which is 'lt'
3803    DeoptimizeIf(lt, instr, result, Operand(zero_reg));
3804  } else {
3805    Label skip2;
3806    // ARM uses 'mi' here, which is 'lt'
3807    // Negating it results in 'ge'
3808    __ Branch(&skip2, ge, result, Operand(zero_reg));
3809    __ mov(result, zero_reg);
3810    __ Branch(&done);
3811    __ bind(&skip2);
3812  }
3813
3814  Register except_flag = scratch;
3815  __ EmitFPUTruncate(kRoundToMinusInf,
3816                     result,
3817                     double_scratch0(),
3818                     at,
3819                     double_scratch1,
3820                     except_flag);
3821
3822  DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
3823
3824  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3825    // Test for -0.
3826    __ Branch(&done, ne, result, Operand(zero_reg));
3827    __ bind(&check_sign_on_zero);
3828    __ mfhc1(scratch, input);  // Get exponent/sign bits.
3829    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3830    DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
3831  }
3832  __ bind(&done);
3833}
3834
3835
3836void LCodeGen::DoMathFround(LMathFround* instr) {
3837  DoubleRegister input = ToDoubleRegister(instr->value());
3838  DoubleRegister result = ToDoubleRegister(instr->result());
3839  __ cvt_s_d(result, input);
3840  __ cvt_d_s(result, result);
3841}
3842
3843
3844void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3845  DoubleRegister input = ToDoubleRegister(instr->value());
3846  DoubleRegister result = ToDoubleRegister(instr->result());
3847  __ sqrt_d(result, input);
3848}
3849
3850
3851void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3852  DoubleRegister input = ToDoubleRegister(instr->value());
3853  DoubleRegister result = ToDoubleRegister(instr->result());
3854  DoubleRegister temp = ToDoubleRegister(instr->temp());
3855
3856  DCHECK(!input.is(result));
3857
3858  // Note that according to ECMA-262 15.8.2.13:
3859  // Math.pow(-Infinity, 0.5) == Infinity
3860  // Math.sqrt(-Infinity) == NaN
3861  Label done;
3862  __ Move(temp, -V8_INFINITY);
3863  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3864  // Set up Infinity in the delay slot.
3865  // result is overwritten if the branch is not taken.
3866  __ neg_d(result, temp);
3867
3868  // Add +0 to convert -0 to +0.
3869  __ add_d(result, input, kDoubleRegZero);
3870  __ sqrt_d(result, result);
3871  __ bind(&done);
3872}
3873
3874
3875void LCodeGen::DoPower(LPower* instr) {
3876  Representation exponent_type = instr->hydrogen()->right()->representation();
3877  // Having marked this as a call, we can use any registers.
3878  // Just make sure that the input/output registers are the expected ones.
3879  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3880  DCHECK(!instr->right()->IsDoubleRegister() ||
3881         ToDoubleRegister(instr->right()).is(f4));
3882  DCHECK(!instr->right()->IsRegister() ||
3883         ToRegister(instr->right()).is(tagged_exponent));
3884  DCHECK(ToDoubleRegister(instr->left()).is(f2));
3885  DCHECK(ToDoubleRegister(instr->result()).is(f0));
3886
3887  if (exponent_type.IsSmi()) {
3888    MathPowStub stub(isolate(), MathPowStub::TAGGED);
3889    __ CallStub(&stub);
3890  } else if (exponent_type.IsTagged()) {
3891    Label no_deopt;
3892    __ JumpIfSmi(tagged_exponent, &no_deopt);
3893    DCHECK(!a7.is(tagged_exponent));
3894    __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3895    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3896    DeoptimizeIf(ne, instr, a7, Operand(at));
3897    __ bind(&no_deopt);
3898    MathPowStub stub(isolate(), MathPowStub::TAGGED);
3899    __ CallStub(&stub);
3900  } else if (exponent_type.IsInteger32()) {
3901    MathPowStub stub(isolate(), MathPowStub::INTEGER);
3902    __ CallStub(&stub);
3903  } else {
3904    DCHECK(exponent_type.IsDouble());
3905    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3906    __ CallStub(&stub);
3907  }
3908}
3909
3910
3911void LCodeGen::DoMathExp(LMathExp* instr) {
3912  DoubleRegister input = ToDoubleRegister(instr->value());
3913  DoubleRegister result = ToDoubleRegister(instr->result());
3914  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3915  DoubleRegister double_scratch2 = double_scratch0();
3916  Register temp1 = ToRegister(instr->temp1());
3917  Register temp2 = ToRegister(instr->temp2());
3918
3919  MathExpGenerator::EmitMathExp(
3920      masm(), input, result, double_scratch1, double_scratch2,
3921      temp1, temp2, scratch0());
3922}
3923
3924
3925void LCodeGen::DoMathLog(LMathLog* instr) {
3926  __ PrepareCallCFunction(0, 1, scratch0());
3927  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3928  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3929                   0, 1);
3930  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3931}
3932
3933
3934void LCodeGen::DoMathClz32(LMathClz32* instr) {
3935  Register input = ToRegister(instr->value());
3936  Register result = ToRegister(instr->result());
3937  __ Clz(result, input);
3938}
3939
3940
3941void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3942  DCHECK(ToRegister(instr->context()).is(cp));
3943  DCHECK(ToRegister(instr->function()).is(a1));
3944  DCHECK(instr->HasPointerMap());
3945
3946  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3947  if (known_function.is_null()) {
3948    LPointerMap* pointers = instr->pointer_map();
3949    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3950    ParameterCount count(instr->arity());
3951    __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
3952  } else {
3953    CallKnownFunction(known_function,
3954                      instr->hydrogen()->formal_parameter_count(),
3955                      instr->arity(),
3956                      instr,
3957                      A1_CONTAINS_TARGET);
3958  }
3959}
3960
3961
3962void LCodeGen::DoTailCallThroughMegamorphicCache(
3963    LTailCallThroughMegamorphicCache* instr) {
3964  Register receiver = ToRegister(instr->receiver());
3965  Register name = ToRegister(instr->name());
3966  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3967  DCHECK(name.is(LoadDescriptor::NameRegister()));
3968  DCHECK(receiver.is(a1));
3969  DCHECK(name.is(a2));
3970
3971  Register scratch = a3;
3972  Register extra = a4;
3973  Register extra2 = a5;
3974  Register extra3 = a6;
3975
3976  // Important for the tail-call.
3977  bool must_teardown_frame = NeedsEagerFrame();
3978
3979  // The probe will tail call to a handler if found.
3980  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3981                                         must_teardown_frame, receiver, name,
3982                                         scratch, extra, extra2, extra3);
3983
3984  // Tail call to miss if we ended up here.
3985  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
3986  LoadIC::GenerateMiss(masm());
3987}
3988
3989
3990void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3991  DCHECK(ToRegister(instr->result()).is(v0));
3992
3993  LPointerMap* pointers = instr->pointer_map();
3994  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3995
3996  if (instr->target()->IsConstantOperand()) {
3997    LConstantOperand* target = LConstantOperand::cast(instr->target());
3998    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3999    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4000    __ Call(code, RelocInfo::CODE_TARGET);
4001  } else {
4002    DCHECK(instr->target()->IsRegister());
4003    Register target = ToRegister(instr->target());
4004    generator.BeforeCall(__ CallSize(target));
4005    __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4006    __ Call(target);
4007  }
4008  generator.AfterCall();
4009}
4010
4011
4012void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4013  DCHECK(ToRegister(instr->function()).is(a1));
4014  DCHECK(ToRegister(instr->result()).is(v0));
4015
4016  if (instr->hydrogen()->pass_argument_count()) {
4017    __ li(a0, Operand(instr->arity()));
4018  }
4019
4020  // Change context.
4021  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4022
4023  // Load the code entry address
4024  __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4025  __ Call(at);
4026
4027  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4028}
4029
4030
4031void LCodeGen::DoCallFunction(LCallFunction* instr) {
4032  DCHECK(ToRegister(instr->context()).is(cp));
4033  DCHECK(ToRegister(instr->function()).is(a1));
4034  DCHECK(ToRegister(instr->result()).is(v0));
4035
4036  int arity = instr->arity();
4037  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4038  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4039}
4040
4041
4042void LCodeGen::DoCallNew(LCallNew* instr) {
4043  DCHECK(ToRegister(instr->context()).is(cp));
4044  DCHECK(ToRegister(instr->constructor()).is(a1));
4045  DCHECK(ToRegister(instr->result()).is(v0));
4046
4047  __ li(a0, Operand(instr->arity()));
4048  // No cell in a2 for construct type feedback in optimized code
4049  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4050  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4051  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4052}
4053
4054
4055void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4056  DCHECK(ToRegister(instr->context()).is(cp));
4057  DCHECK(ToRegister(instr->constructor()).is(a1));
4058  DCHECK(ToRegister(instr->result()).is(v0));
4059
4060  __ li(a0, Operand(instr->arity()));
4061  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4062  ElementsKind kind = instr->hydrogen()->elements_kind();
4063  AllocationSiteOverrideMode override_mode =
4064      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4065          ? DISABLE_ALLOCATION_SITES
4066          : DONT_OVERRIDE;
4067
4068  if (instr->arity() == 0) {
4069    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4070    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4071  } else if (instr->arity() == 1) {
4072    Label done;
4073    if (IsFastPackedElementsKind(kind)) {
4074      Label packed_case;
4075      // We might need a change here,
4076      // look at the first argument.
4077      __ ld(a5, MemOperand(sp, 0));
4078      __ Branch(&packed_case, eq, a5, Operand(zero_reg));
4079
4080      ElementsKind holey_kind = GetHoleyElementsKind(kind);
4081      ArraySingleArgumentConstructorStub stub(isolate(),
4082                                              holey_kind,
4083                                              override_mode);
4084      CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4085      __ jmp(&done);
4086      __ bind(&packed_case);
4087    }
4088
4089    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4090    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4091    __ bind(&done);
4092  } else {
4093    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4094    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4095  }
4096}
4097
4098
4099void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4100  CallRuntime(instr->function(), instr->arity(), instr);
4101}
4102
4103
4104void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4105  Register function = ToRegister(instr->function());
4106  Register code_object = ToRegister(instr->code_object());
4107  __ Daddu(code_object, code_object,
4108          Operand(Code::kHeaderSize - kHeapObjectTag));
4109  __ sd(code_object,
4110        FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4111}
4112
4113
4114void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4115  Register result = ToRegister(instr->result());
4116  Register base = ToRegister(instr->base_object());
4117  if (instr->offset()->IsConstantOperand()) {
4118    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4119    __ Daddu(result, base, Operand(ToInteger32(offset)));
4120  } else {
4121    Register offset = ToRegister(instr->offset());
4122    __ Daddu(result, base, offset);
4123  }
4124}
4125
4126
4127void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4128  Representation representation = instr->representation();
4129
4130  Register object = ToRegister(instr->object());
4131  Register scratch2 = scratch1();
4132  Register scratch1 = scratch0();
4133  HObjectAccess access = instr->hydrogen()->access();
4134  int offset = access.offset();
4135  if (access.IsExternalMemory()) {
4136    Register value = ToRegister(instr->value());
4137    MemOperand operand = MemOperand(object, offset);
4138    __ Store(value, operand, representation);
4139    return;
4140  }
4141
4142  __ AssertNotSmi(object);
4143
4144  DCHECK(!representation.IsSmi() ||
4145         !instr->value()->IsConstantOperand() ||
4146         IsSmi(LConstantOperand::cast(instr->value())));
4147  if (representation.IsDouble()) {
4148    DCHECK(access.IsInobject());
4149    DCHECK(!instr->hydrogen()->has_transition());
4150    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4151    DoubleRegister value = ToDoubleRegister(instr->value());
4152    __ sdc1(value, FieldMemOperand(object, offset));
4153    return;
4154  }
4155
4156  if (instr->hydrogen()->has_transition()) {
4157    Handle<Map> transition = instr->hydrogen()->transition_map();
4158    AddDeprecationDependency(transition);
4159    __ li(scratch1, Operand(transition));
4160    __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4161    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4162      Register temp = ToRegister(instr->temp());
4163      // Update the write barrier for the map field.
4164      __ RecordWriteForMap(object,
4165                           scratch1,
4166                           temp,
4167                           GetRAState(),
4168                           kSaveFPRegs);
4169    }
4170  }
4171
4172  // Do the store.
4173  Register destination = object;
4174  if (!access.IsInobject()) {
4175       destination = scratch1;
4176    __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
4177  }
4178  Register value = ToRegister(instr->value());
4179  if (representation.IsSmi() && SmiValuesAre32Bits() &&
4180      instr->hydrogen()->value()->representation().IsInteger32()) {
4181    DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4182    if (FLAG_debug_code) {
4183      __ Load(scratch2, FieldMemOperand(destination, offset), representation);
4184      __ AssertSmi(scratch2);
4185    }
4186
4187    // Store int value directly to upper half of the smi.
4188    offset += kPointerSize / 2;
4189    representation = Representation::Integer32();
4190  }
4191
4192  MemOperand operand = FieldMemOperand(destination, offset);
4193  __ Store(value, operand, representation);
4194  if (instr->hydrogen()->NeedsWriteBarrier()) {
4195    // Update the write barrier for the object for in-object properties.
4196    __ RecordWriteField(destination,
4197                        offset,
4198                        value,
4199                        scratch2,
4200                        GetRAState(),
4201                        kSaveFPRegs,
4202                        EMIT_REMEMBERED_SET,
4203                        instr->hydrogen()->SmiCheckForWriteBarrier(),
4204                        instr->hydrogen()->PointersToHereCheckForValue());
4205  }
4206}
4207
4208
4209void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4210  DCHECK(ToRegister(instr->context()).is(cp));
4211  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4212  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4213
4214  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4215  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4216  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4217}
4218
4219
4220void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4221  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4222  Operand operand((int64_t)0);
4223  Register reg;
4224  if (instr->index()->IsConstantOperand()) {
4225    operand = ToOperand(instr->index());
4226    reg = ToRegister(instr->length());
4227    cc = CommuteCondition(cc);
4228  } else {
4229    reg = ToRegister(instr->index());
4230    operand = ToOperand(instr->length());
4231  }
4232  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4233    Label done;
4234    __ Branch(&done, NegateCondition(cc), reg, operand);
4235    __ stop("eliminated bounds check failed");
4236    __ bind(&done);
4237  } else {
4238    DeoptimizeIf(cc, instr, reg, operand);
4239  }
4240}
4241
4242
4243void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4244  Register external_pointer = ToRegister(instr->elements());
4245  Register key = no_reg;
4246  ElementsKind elements_kind = instr->elements_kind();
4247  bool key_is_constant = instr->key()->IsConstantOperand();
4248  int constant_key = 0;
4249  if (key_is_constant) {
4250    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4251    if (constant_key & 0xF0000000) {
4252      Abort(kArrayIndexConstantValueTooBig);
4253    }
4254  } else {
4255    key = ToRegister(instr->key());
4256  }
4257  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4258  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4259      ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4260      : element_size_shift;
4261  int base_offset = instr->base_offset();
4262
4263  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4264      elements_kind == FLOAT32_ELEMENTS ||
4265      elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4266      elements_kind == FLOAT64_ELEMENTS) {
4267    Register address = scratch0();
4268    FPURegister value(ToDoubleRegister(instr->value()));
4269    if (key_is_constant) {
4270      if (constant_key != 0) {
4271        __ Daddu(address, external_pointer,
4272                Operand(constant_key << element_size_shift));
4273      } else {
4274        address = external_pointer;
4275      }
4276    } else {
4277      if (shift_size < 0) {
4278        if (shift_size == -32) {
4279          __ dsra32(address, key, 0);
4280        } else {
4281          __ dsra(address, key, -shift_size);
4282        }
4283      } else {
4284        __ dsll(address, key, shift_size);
4285      }
4286      __ Daddu(address, external_pointer, address);
4287    }
4288
4289    if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4290        elements_kind == FLOAT32_ELEMENTS) {
4291      __ cvt_s_d(double_scratch0(), value);
4292      __ swc1(double_scratch0(), MemOperand(address, base_offset));
4293    } else {  // Storing doubles, not floats.
4294      __ sdc1(value, MemOperand(address, base_offset));
4295    }
4296  } else {
4297    Register value(ToRegister(instr->value()));
4298    MemOperand mem_operand = PrepareKeyedOperand(
4299        key, external_pointer, key_is_constant, constant_key,
4300        element_size_shift, shift_size,
4301        base_offset);
4302    switch (elements_kind) {
4303      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4304      case EXTERNAL_INT8_ELEMENTS:
4305      case EXTERNAL_UINT8_ELEMENTS:
4306      case UINT8_ELEMENTS:
4307      case UINT8_CLAMPED_ELEMENTS:
4308      case INT8_ELEMENTS:
4309        __ sb(value, mem_operand);
4310        break;
4311      case EXTERNAL_INT16_ELEMENTS:
4312      case EXTERNAL_UINT16_ELEMENTS:
4313      case INT16_ELEMENTS:
4314      case UINT16_ELEMENTS:
4315        __ sh(value, mem_operand);
4316        break;
4317      case EXTERNAL_INT32_ELEMENTS:
4318      case EXTERNAL_UINT32_ELEMENTS:
4319      case INT32_ELEMENTS:
4320      case UINT32_ELEMENTS:
4321        __ sw(value, mem_operand);
4322        break;
4323      case FLOAT32_ELEMENTS:
4324      case FLOAT64_ELEMENTS:
4325      case EXTERNAL_FLOAT32_ELEMENTS:
4326      case EXTERNAL_FLOAT64_ELEMENTS:
4327      case FAST_DOUBLE_ELEMENTS:
4328      case FAST_ELEMENTS:
4329      case FAST_SMI_ELEMENTS:
4330      case FAST_HOLEY_DOUBLE_ELEMENTS:
4331      case FAST_HOLEY_ELEMENTS:
4332      case FAST_HOLEY_SMI_ELEMENTS:
4333      case DICTIONARY_ELEMENTS:
4334      case SLOPPY_ARGUMENTS_ELEMENTS:
4335        UNREACHABLE();
4336        break;
4337    }
4338  }
4339}
4340
4341
4342void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4343  DoubleRegister value = ToDoubleRegister(instr->value());
4344  Register elements = ToRegister(instr->elements());
4345  Register scratch = scratch0();
4346  DoubleRegister double_scratch = double_scratch0();
4347  bool key_is_constant = instr->key()->IsConstantOperand();
4348  int base_offset = instr->base_offset();
4349  Label not_nan, done;
4350
4351  // Calculate the effective address of the slot in the array to store the
4352  // double value.
4353  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4354  if (key_is_constant) {
4355    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4356    if (constant_key & 0xF0000000) {
4357      Abort(kArrayIndexConstantValueTooBig);
4358    }
4359    __ Daddu(scratch, elements,
4360             Operand((constant_key << element_size_shift) + base_offset));
4361  } else {
4362    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4363        ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4364        : element_size_shift;
4365    __ Daddu(scratch, elements, Operand(base_offset));
4366    DCHECK((shift_size == 3) || (shift_size == -29));
4367    if (shift_size == 3) {
4368      __ dsll(at, ToRegister(instr->key()), 3);
4369    } else if (shift_size == -29) {
4370      __ dsra(at, ToRegister(instr->key()), 29);
4371    }
4372    __ Daddu(scratch, scratch, at);
4373  }
4374
4375  if (instr->NeedsCanonicalization()) {
4376    Label is_nan;
4377    // Check for NaN. All NaNs must be canonicalized.
4378    __ BranchF(NULL, &is_nan, eq, value, value);
4379    __ Branch(&not_nan);
4380
4381    // Only load canonical NaN if the comparison above set the overflow.
4382    __ bind(&is_nan);
4383    __ LoadRoot(at, Heap::kNanValueRootIndex);
4384    __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
4385    __ sdc1(double_scratch, MemOperand(scratch, 0));
4386    __ Branch(&done);
4387  }
4388
4389  __ bind(&not_nan);
4390  __ sdc1(value, MemOperand(scratch, 0));
4391  __ bind(&done);
4392}
4393
4394
4395void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4396  Register value = ToRegister(instr->value());
4397  Register elements = ToRegister(instr->elements());
4398  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4399      : no_reg;
4400  Register scratch = scratch0();
4401  Register store_base = scratch;
4402  int offset = instr->base_offset();
4403
4404  // Do the store.
4405  if (instr->key()->IsConstantOperand()) {
4406    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4407    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4408    offset += ToInteger32(const_operand) * kPointerSize;
4409    store_base = elements;
4410  } else {
4411    // Even though the HLoadKeyed instruction forces the input
4412    // representation for the key to be an integer, the input gets replaced
4413    // during bound check elimination with the index argument to the bounds
4414    // check, which can be tagged, so that case must be handled here, too.
4415    if (instr->hydrogen()->key()->representation().IsSmi()) {
4416      __ SmiScale(scratch, key, kPointerSizeLog2);
4417      __ daddu(store_base, elements, scratch);
4418    } else {
4419      __ dsll(scratch, key, kPointerSizeLog2);
4420      __ daddu(store_base, elements, scratch);
4421    }
4422  }
4423
4424  Representation representation = instr->hydrogen()->value()->representation();
4425  if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4426    DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4427    DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4428    if (FLAG_debug_code) {
4429      Register temp = scratch1();
4430      __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
4431      __ AssertSmi(temp);
4432    }
4433
4434    // Store int value directly to upper half of the smi.
4435    STATIC_ASSERT(kSmiTag == 0);
4436    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4437    offset += kPointerSize / 2;
4438    representation = Representation::Integer32();
4439  }
4440
4441  __ Store(value, MemOperand(store_base, offset), representation);
4442
4443  if (instr->hydrogen()->NeedsWriteBarrier()) {
4444    SmiCheck check_needed =
4445        instr->hydrogen()->value()->type().IsHeapObject()
4446            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4447    // Compute address of modified element and store it into key register.
4448    __ Daddu(key, store_base, Operand(offset));
4449    __ RecordWrite(elements,
4450                   key,
4451                   value,
4452                   GetRAState(),
4453                   kSaveFPRegs,
4454                   EMIT_REMEMBERED_SET,
4455                   check_needed,
4456                   instr->hydrogen()->PointersToHereCheckForValue());
4457  }
4458}
4459
4460
4461void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4462  // By cases: external, fast double
4463  if (instr->is_typed_elements()) {
4464    DoStoreKeyedExternalArray(instr);
4465  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4466    DoStoreKeyedFixedDoubleArray(instr);
4467  } else {
4468    DoStoreKeyedFixedArray(instr);
4469  }
4470}
4471
4472
4473void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4474  DCHECK(ToRegister(instr->context()).is(cp));
4475  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4476  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4477  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4478
4479  Handle<Code> ic =
4480      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4481  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4482}
4483
4484
4485void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4486  Register object_reg = ToRegister(instr->object());
4487  Register scratch = scratch0();
4488
4489  Handle<Map> from_map = instr->original_map();
4490  Handle<Map> to_map = instr->transitioned_map();
4491  ElementsKind from_kind = instr->from_kind();
4492  ElementsKind to_kind = instr->to_kind();
4493
4494  Label not_applicable;
4495  __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4496  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4497
4498  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4499    Register new_map_reg = ToRegister(instr->new_map_temp());
4500    __ li(new_map_reg, Operand(to_map));
4501    __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4502    // Write barrier.
4503    __ RecordWriteForMap(object_reg,
4504                         new_map_reg,
4505                         scratch,
4506                         GetRAState(),
4507                         kDontSaveFPRegs);
4508  } else {
4509    DCHECK(object_reg.is(a0));
4510    DCHECK(ToRegister(instr->context()).is(cp));
4511    PushSafepointRegistersScope scope(this);
4512    __ li(a1, Operand(to_map));
4513    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4514    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4515    __ CallStub(&stub);
4516    RecordSafepointWithRegisters(
4517        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4518  }
4519  __ bind(&not_applicable);
4520}
4521
4522
4523void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4524  Register object = ToRegister(instr->object());
4525  Register temp = ToRegister(instr->temp());
4526  Label no_memento_found;
4527  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4528                                     ne, &no_memento_found);
4529  DeoptimizeIf(al, instr);
4530  __ bind(&no_memento_found);
4531}
4532
4533
4534void LCodeGen::DoStringAdd(LStringAdd* instr) {
4535  DCHECK(ToRegister(instr->context()).is(cp));
4536  DCHECK(ToRegister(instr->left()).is(a1));
4537  DCHECK(ToRegister(instr->right()).is(a0));
4538  StringAddStub stub(isolate(),
4539                     instr->hydrogen()->flags(),
4540                     instr->hydrogen()->pretenure_flag());
4541  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4542}
4543
4544
4545void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4546  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4547   public:
4548    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4549        : LDeferredCode(codegen), instr_(instr) { }
4550    virtual void Generate() OVERRIDE {
4551      codegen()->DoDeferredStringCharCodeAt(instr_);
4552    }
4553    virtual LInstruction* instr() OVERRIDE { return instr_; }
4554   private:
4555    LStringCharCodeAt* instr_;
4556  };
4557
4558  DeferredStringCharCodeAt* deferred =
4559      new(zone()) DeferredStringCharCodeAt(this, instr);
4560  StringCharLoadGenerator::Generate(masm(),
4561                                    ToRegister(instr->string()),
4562                                    ToRegister(instr->index()),
4563                                    ToRegister(instr->result()),
4564                                    deferred->entry());
4565  __ bind(deferred->exit());
4566}
4567
4568
4569void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4570  Register string = ToRegister(instr->string());
4571  Register result = ToRegister(instr->result());
4572  Register scratch = scratch0();
4573
4574  // TODO(3095996): Get rid of this. For now, we need to make the
4575  // result register contain a valid pointer because it is already
4576  // contained in the register pointer map.
4577  __ mov(result, zero_reg);
4578
4579  PushSafepointRegistersScope scope(this);
4580  __ push(string);
4581  // Push the index as a smi. This is safe because of the checks in
4582  // DoStringCharCodeAt above.
4583  if (instr->index()->IsConstantOperand()) {
4584    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4585    __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4586    __ push(scratch);
4587  } else {
4588    Register index = ToRegister(instr->index());
4589    __ SmiTag(index);
4590    __ push(index);
4591  }
4592  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4593                          instr->context());
4594  __ AssertSmi(v0);
4595  __ SmiUntag(v0);
4596  __ StoreToSafepointRegisterSlot(v0, result);
4597}
4598
4599
4600void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4601  class DeferredStringCharFromCode FINAL : public LDeferredCode {
4602   public:
4603    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4604        : LDeferredCode(codegen), instr_(instr) { }
4605    virtual void Generate() OVERRIDE {
4606      codegen()->DoDeferredStringCharFromCode(instr_);
4607    }
4608    virtual LInstruction* instr() OVERRIDE { return instr_; }
4609   private:
4610    LStringCharFromCode* instr_;
4611  };
4612
4613  DeferredStringCharFromCode* deferred =
4614      new(zone()) DeferredStringCharFromCode(this, instr);
4615
4616  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4617  Register char_code = ToRegister(instr->char_code());
4618  Register result = ToRegister(instr->result());
4619  Register scratch = scratch0();
4620  DCHECK(!char_code.is(result));
4621
4622  __ Branch(deferred->entry(), hi,
4623            char_code, Operand(String::kMaxOneByteCharCode));
4624  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4625  __ dsll(scratch, char_code, kPointerSizeLog2);
4626  __ Daddu(result, result, scratch);
4627  __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4628  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4629  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4630  __ bind(deferred->exit());
4631}
4632
4633
4634void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4635  Register char_code = ToRegister(instr->char_code());
4636  Register result = ToRegister(instr->result());
4637
4638  // TODO(3095996): Get rid of this. For now, we need to make the
4639  // result register contain a valid pointer because it is already
4640  // contained in the register pointer map.
4641  __ mov(result, zero_reg);
4642
4643  PushSafepointRegistersScope scope(this);
4644  __ SmiTag(char_code);
4645  __ push(char_code);
4646  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4647  __ StoreToSafepointRegisterSlot(v0, result);
4648}
4649
4650
4651void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4652  LOperand* input = instr->value();
4653  DCHECK(input->IsRegister() || input->IsStackSlot());
4654  LOperand* output = instr->result();
4655  DCHECK(output->IsDoubleRegister());
4656  FPURegister single_scratch = double_scratch0().low();
4657  if (input->IsStackSlot()) {
4658    Register scratch = scratch0();
4659    __ ld(scratch, ToMemOperand(input));
4660    __ mtc1(scratch, single_scratch);
4661  } else {
4662    __ mtc1(ToRegister(input), single_scratch);
4663  }
4664  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4665}
4666
4667
4668void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4669  LOperand* input = instr->value();
4670  LOperand* output = instr->result();
4671
4672  FPURegister dbl_scratch = double_scratch0();
4673  __ mtc1(ToRegister(input), dbl_scratch);
4674  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);  // TODO(plind): f22?
4675}
4676
4677
4678void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4679  class DeferredNumberTagU FINAL : public LDeferredCode {
4680   public:
4681    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4682        : LDeferredCode(codegen), instr_(instr) { }
4683    virtual void Generate() OVERRIDE {
4684      codegen()->DoDeferredNumberTagIU(instr_,
4685                                       instr_->value(),
4686                                       instr_->temp1(),
4687                                       instr_->temp2(),
4688                                       UNSIGNED_INT32);
4689    }
4690    virtual LInstruction* instr() OVERRIDE { return instr_; }
4691   private:
4692    LNumberTagU* instr_;
4693  };
4694
4695  Register input = ToRegister(instr->value());
4696  Register result = ToRegister(instr->result());
4697
4698  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4699  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4700  __ SmiTag(result, input);
4701  __ bind(deferred->exit());
4702}
4703
4704
4705void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4706                                     LOperand* value,
4707                                     LOperand* temp1,
4708                                     LOperand* temp2,
4709                                     IntegerSignedness signedness) {
4710  Label done, slow;
4711  Register src = ToRegister(value);
4712  Register dst = ToRegister(instr->result());
4713  Register tmp1 = scratch0();
4714  Register tmp2 = ToRegister(temp1);
4715  Register tmp3 = ToRegister(temp2);
4716  DoubleRegister dbl_scratch = double_scratch0();
4717
4718  if (signedness == SIGNED_INT32) {
4719    // There was overflow, so bits 30 and 31 of the original integer
4720    // disagree. Try to allocate a heap number in new space and store
4721    // the value in there. If that fails, call the runtime system.
4722    if (dst.is(src)) {
4723      __ SmiUntag(src, dst);
4724      __ Xor(src, src, Operand(0x80000000));
4725    }
4726    __ mtc1(src, dbl_scratch);
4727    __ cvt_d_w(dbl_scratch, dbl_scratch);
4728  } else {
4729    __ mtc1(src, dbl_scratch);
4730    __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4731  }
4732
4733  if (FLAG_inline_new) {
4734    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4735    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
4736    __ Branch(&done);
4737  }
4738
4739  // Slow case: Call the runtime system to do the number allocation.
4740  __ bind(&slow);
4741  {
4742    // TODO(3095996): Put a valid pointer value in the stack slot where the
4743    // result register is stored, as this register is in the pointer map, but
4744    // contains an integer value.
4745    __ mov(dst, zero_reg);
4746    // Preserve the value of all registers.
4747    PushSafepointRegistersScope scope(this);
4748
4749    // NumberTagI and NumberTagD use the context from the frame, rather than
4750    // the environment's HContext or HInlinedContext value.
4751    // They only call Runtime::kAllocateHeapNumber.
4752    // The corresponding HChange instructions are added in a phase that does
4753    // not have easy access to the local context.
4754    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4755    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4756    RecordSafepointWithRegisters(
4757        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4758    __ StoreToSafepointRegisterSlot(v0, dst);
4759  }
4760
4761  // Done. Put the value in dbl_scratch into the value of the allocated heap
4762  // number.
4763  __ bind(&done);
4764  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4765}
4766
4767
4768void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4769  class DeferredNumberTagD FINAL : public LDeferredCode {
4770   public:
4771    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4772        : LDeferredCode(codegen), instr_(instr) { }
4773    virtual void Generate() OVERRIDE {
4774      codegen()->DoDeferredNumberTagD(instr_);
4775    }
4776    virtual LInstruction* instr() OVERRIDE { return instr_; }
4777   private:
4778    LNumberTagD* instr_;
4779  };
4780
4781  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4782  Register scratch = scratch0();
4783  Register reg = ToRegister(instr->result());
4784  Register temp1 = ToRegister(instr->temp());
4785  Register temp2 = ToRegister(instr->temp2());
4786
4787  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4788  if (FLAG_inline_new) {
4789    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4790    // We want the untagged address first for performance
4791    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4792                          DONT_TAG_RESULT);
4793  } else {
4794    __ Branch(deferred->entry());
4795  }
4796  __ bind(deferred->exit());
4797  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4798  // Now that we have finished with the object's real address tag it
4799  __ Daddu(reg, reg, kHeapObjectTag);
4800}
4801
4802
4803void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4804  // TODO(3095996): Get rid of this. For now, we need to make the
4805  // result register contain a valid pointer because it is already
4806  // contained in the register pointer map.
4807  Register reg = ToRegister(instr->result());
4808  __ mov(reg, zero_reg);
4809
4810  PushSafepointRegistersScope scope(this);
4811  // NumberTagI and NumberTagD use the context from the frame, rather than
4812  // the environment's HContext or HInlinedContext value.
4813  // They only call Runtime::kAllocateHeapNumber.
4814  // The corresponding HChange instructions are added in a phase that does
4815  // not have easy access to the local context.
4816  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4817  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4818  RecordSafepointWithRegisters(
4819      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4820  __ Dsubu(v0, v0, kHeapObjectTag);
4821  __ StoreToSafepointRegisterSlot(v0, reg);
4822}
4823
4824
4825void LCodeGen::DoSmiTag(LSmiTag* instr) {
4826  HChange* hchange = instr->hydrogen();
4827  Register input = ToRegister(instr->value());
4828  Register output = ToRegister(instr->result());
4829  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4830      hchange->value()->CheckFlag(HValue::kUint32)) {
4831    __ And(at, input, Operand(0x80000000));
4832    DeoptimizeIf(ne, instr, at, Operand(zero_reg));
4833  }
4834  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4835      !hchange->value()->CheckFlag(HValue::kUint32)) {
4836    __ SmiTagCheckOverflow(output, input, at);
4837    DeoptimizeIf(lt, instr, at, Operand(zero_reg));
4838  } else {
4839    __ SmiTag(output, input);
4840  }
4841}
4842
4843
4844void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4845  Register scratch = scratch0();
4846  Register input = ToRegister(instr->value());
4847  Register result = ToRegister(instr->result());
4848  if (instr->needs_check()) {
4849    STATIC_ASSERT(kHeapObjectTag == 1);
4850    // If the input is a HeapObject, value of scratch won't be zero.
4851    __ And(scratch, input, Operand(kHeapObjectTag));
4852    __ SmiUntag(result, input);
4853    DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
4854  } else {
4855    __ SmiUntag(result, input);
4856  }
4857}
4858
4859
4860void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4861                                DoubleRegister result_reg,
4862                                NumberUntagDMode mode) {
4863  bool can_convert_undefined_to_nan =
4864      instr->hydrogen()->can_convert_undefined_to_nan();
4865  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4866
4867  Register scratch = scratch0();
4868  Label convert, load_smi, done;
4869  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4870    // Smi check.
4871    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4872    // Heap number map check.
4873    __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4874    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4875    if (can_convert_undefined_to_nan) {
4876      __ Branch(&convert, ne, scratch, Operand(at));
4877    } else {
4878      DeoptimizeIf(ne, instr, scratch, Operand(at));
4879    }
4880    // Load heap number.
4881    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4882    if (deoptimize_on_minus_zero) {
4883      __ mfc1(at, result_reg);
4884      __ Branch(&done, ne, at, Operand(zero_reg));
4885      __ mfhc1(scratch, result_reg);  // Get exponent/sign bits.
4886      DeoptimizeIf(eq, instr, scratch, Operand(HeapNumber::kSignMask));
4887    }
4888    __ Branch(&done);
4889    if (can_convert_undefined_to_nan) {
4890      __ bind(&convert);
4891      // Convert undefined (and hole) to NaN.
4892      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4893      DeoptimizeIf(ne, instr, input_reg, Operand(at));
4894      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4895      __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4896      __ Branch(&done);
4897    }
4898  } else {
4899    __ SmiUntag(scratch, input_reg);
4900    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4901  }
4902  // Smi to double register conversion
4903  __ bind(&load_smi);
4904  // scratch: untagged value of input_reg
4905  __ mtc1(scratch, result_reg);
4906  __ cvt_d_w(result_reg, result_reg);
4907  __ bind(&done);
4908}
4909
4910
4911void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4912  Register input_reg = ToRegister(instr->value());
4913  Register scratch1 = scratch0();
4914  Register scratch2 = ToRegister(instr->temp());
4915  DoubleRegister double_scratch = double_scratch0();
4916  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4917
4918  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4919  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4920
4921  Label done;
4922
4923  // The input is a tagged HeapObject.
4924  // Heap number map check.
4925  __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4926  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4927  // This 'at' value and scratch1 map value are used for tests in both clauses
4928  // of the if.
4929
4930  if (instr->truncating()) {
4931    // Performs a truncating conversion of a floating point number as used by
4932    // the JS bitwise operations.
4933    Label no_heap_number, check_bools, check_false;
4934    // Check HeapNumber map.
4935    __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4936    __ mov(scratch2, input_reg);  // In delay slot.
4937    __ TruncateHeapNumberToI(input_reg, scratch2);
4938    __ Branch(&done);
4939
4940    // Check for Oddballs. Undefined/False is converted to zero and True to one
4941    // for truncating conversions.
4942    __ bind(&no_heap_number);
4943    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4944    __ Branch(&check_bools, ne, input_reg, Operand(at));
4945    DCHECK(ToRegister(instr->result()).is(input_reg));
4946    __ Branch(USE_DELAY_SLOT, &done);
4947    __ mov(input_reg, zero_reg);  // In delay slot.
4948
4949    __ bind(&check_bools);
4950    __ LoadRoot(at, Heap::kTrueValueRootIndex);
4951    __ Branch(&check_false, ne, scratch2, Operand(at));
4952    __ Branch(USE_DELAY_SLOT, &done);
4953    __ li(input_reg, Operand(1));  // In delay slot.
4954
4955    __ bind(&check_false);
4956    __ LoadRoot(at, Heap::kFalseValueRootIndex);
4957    DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate");
4958    __ Branch(USE_DELAY_SLOT, &done);
4959    __ mov(input_reg, zero_reg);  // In delay slot.
4960  } else {
4961    DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number");
4962
4963    // Load the double value.
4964    __ ldc1(double_scratch,
4965            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4966
4967    Register except_flag = scratch2;
4968    __ EmitFPUTruncate(kRoundToZero,
4969                       input_reg,
4970                       double_scratch,
4971                       scratch1,
4972                       double_scratch2,
4973                       except_flag,
4974                       kCheckForInexactConversion);
4975
4976    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg),
4977                 "lost precision or NaN");
4978
4979    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4980      __ Branch(&done, ne, input_reg, Operand(zero_reg));
4981
4982      __ mfhc1(scratch1, double_scratch);  // Get exponent/sign bits.
4983      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4984      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero");
4985    }
4986  }
4987  __ bind(&done);
4988}
4989
4990
4991void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4992  class DeferredTaggedToI FINAL : public LDeferredCode {
4993   public:
4994    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4995        : LDeferredCode(codegen), instr_(instr) { }
4996    virtual void Generate() OVERRIDE {
4997      codegen()->DoDeferredTaggedToI(instr_);
4998    }
4999    virtual LInstruction* instr() OVERRIDE { return instr_; }
5000   private:
5001    LTaggedToI* instr_;
5002  };
5003
5004  LOperand* input = instr->value();
5005  DCHECK(input->IsRegister());
5006  DCHECK(input->Equals(instr->result()));
5007
5008  Register input_reg = ToRegister(input);
5009
5010  if (instr->hydrogen()->value()->representation().IsSmi()) {
5011    __ SmiUntag(input_reg);
5012  } else {
5013    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5014
5015    // Let the deferred code handle the HeapObject case.
5016    __ JumpIfNotSmi(input_reg, deferred->entry());
5017
5018    // Smi to int32 conversion.
5019    __ SmiUntag(input_reg);
5020    __ bind(deferred->exit());
5021  }
5022}
5023
5024
5025void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5026  LOperand* input = instr->value();
5027  DCHECK(input->IsRegister());
5028  LOperand* result = instr->result();
5029  DCHECK(result->IsDoubleRegister());
5030
5031  Register input_reg = ToRegister(input);
5032  DoubleRegister result_reg = ToDoubleRegister(result);
5033
5034  HValue* value = instr->hydrogen()->value();
5035  NumberUntagDMode mode = value->representation().IsSmi()
5036      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5037
5038  EmitNumberUntagD(instr, input_reg, result_reg, mode);
5039}
5040
5041
5042void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5043  Register result_reg = ToRegister(instr->result());
5044  Register scratch1 = scratch0();
5045  DoubleRegister double_input = ToDoubleRegister(instr->value());
5046
5047  if (instr->truncating()) {
5048    __ TruncateDoubleToI(result_reg, double_input);
5049  } else {
5050    Register except_flag = LCodeGen::scratch1();
5051
5052    __ EmitFPUTruncate(kRoundToMinusInf,
5053                       result_reg,
5054                       double_input,
5055                       scratch1,
5056                       double_scratch0(),
5057                       except_flag,
5058                       kCheckForInexactConversion);
5059
5060    // Deopt if the operation did not succeed (except_flag != 0).
5061    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
5062
5063    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5064      Label done;
5065      __ Branch(&done, ne, result_reg, Operand(zero_reg));
5066      __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
5067      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5068      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
5069      __ bind(&done);
5070    }
5071  }
5072}
5073
5074
5075void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5076  Register result_reg = ToRegister(instr->result());
5077  Register scratch1 = LCodeGen::scratch0();
5078  DoubleRegister double_input = ToDoubleRegister(instr->value());
5079
5080  if (instr->truncating()) {
5081    __ TruncateDoubleToI(result_reg, double_input);
5082  } else {
5083    Register except_flag = LCodeGen::scratch1();
5084
5085    __ EmitFPUTruncate(kRoundToMinusInf,
5086                       result_reg,
5087                       double_input,
5088                       scratch1,
5089                       double_scratch0(),
5090                       except_flag,
5091                       kCheckForInexactConversion);
5092
5093    // Deopt if the operation did not succeed (except_flag != 0).
5094    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
5095
5096    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5097      Label done;
5098      __ Branch(&done, ne, result_reg, Operand(zero_reg));
5099      __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
5100      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5101      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
5102      __ bind(&done);
5103    }
5104  }
5105  __ SmiTag(result_reg, result_reg);
5106}
5107
5108
5109void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5110  LOperand* input = instr->value();
5111  __ SmiTst(ToRegister(input), at);
5112  DeoptimizeIf(ne, instr, at, Operand(zero_reg));
5113}
5114
5115
5116void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5117  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5118    LOperand* input = instr->value();
5119    __ SmiTst(ToRegister(input), at);
5120    DeoptimizeIf(eq, instr, at, Operand(zero_reg));
5121  }
5122}
5123
5124
5125void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5126  Register input = ToRegister(instr->value());
5127  Register scratch = scratch0();
5128
5129  __ GetObjectType(input, scratch, scratch);
5130
5131  if (instr->hydrogen()->is_interval_check()) {
5132    InstanceType first;
5133    InstanceType last;
5134    instr->hydrogen()->GetCheckInterval(&first, &last);
5135
5136    // If there is only one type in the interval check for equality.
5137    if (first == last) {
5138      DeoptimizeIf(ne, instr, scratch, Operand(first));
5139    } else {
5140      DeoptimizeIf(lo, instr, scratch, Operand(first));
5141      // Omit check for the last type.
5142      if (last != LAST_TYPE) {
5143        DeoptimizeIf(hi, instr, scratch, Operand(last));
5144      }
5145    }
5146  } else {
5147    uint8_t mask;
5148    uint8_t tag;
5149    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5150
5151    if (base::bits::IsPowerOfTwo32(mask)) {
5152      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5153      __ And(at, scratch, mask);
5154      DeoptimizeIf(tag == 0 ? ne : eq, instr, at, Operand(zero_reg));
5155    } else {
5156      __ And(scratch, scratch, Operand(mask));
5157      DeoptimizeIf(ne, instr, scratch, Operand(tag));
5158    }
5159  }
5160}
5161
5162
5163void LCodeGen::DoCheckValue(LCheckValue* instr) {
5164  Register reg = ToRegister(instr->value());
5165  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5166  AllowDeferredHandleDereference smi_check;
5167  if (isolate()->heap()->InNewSpace(*object)) {
5168    Register reg = ToRegister(instr->value());
5169    Handle<Cell> cell = isolate()->factory()->NewCell(object);
5170    __ li(at, Operand(Handle<Object>(cell)));
5171    __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
5172    DeoptimizeIf(ne, instr, reg, Operand(at));
5173  } else {
5174    DeoptimizeIf(ne, instr, reg, Operand(object));
5175  }
5176}
5177
5178
5179void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5180  {
5181    PushSafepointRegistersScope scope(this);
5182    __ push(object);
5183    __ mov(cp, zero_reg);
5184    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5185    RecordSafepointWithRegisters(
5186        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5187    __ StoreToSafepointRegisterSlot(v0, scratch0());
5188  }
5189  __ SmiTst(scratch0(), at);
5190  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
5191}
5192
5193
5194void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5195  class DeferredCheckMaps FINAL : public LDeferredCode {
5196   public:
5197    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5198        : LDeferredCode(codegen), instr_(instr), object_(object) {
5199      SetExit(check_maps());
5200    }
5201    virtual void Generate() OVERRIDE {
5202      codegen()->DoDeferredInstanceMigration(instr_, object_);
5203    }
5204    Label* check_maps() { return &check_maps_; }
5205    virtual LInstruction* instr() OVERRIDE { return instr_; }
5206   private:
5207    LCheckMaps* instr_;
5208    Label check_maps_;
5209    Register object_;
5210  };
5211
5212  if (instr->hydrogen()->IsStabilityCheck()) {
5213    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5214    for (int i = 0; i < maps->size(); ++i) {
5215      AddStabilityDependency(maps->at(i).handle());
5216    }
5217    return;
5218  }
5219
5220  Register map_reg = scratch0();
5221  LOperand* input = instr->value();
5222  DCHECK(input->IsRegister());
5223  Register reg = ToRegister(input);
5224  __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5225
5226  DeferredCheckMaps* deferred = NULL;
5227  if (instr->hydrogen()->HasMigrationTarget()) {
5228    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5229    __ bind(deferred->check_maps());
5230  }
5231
5232  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5233  Label success;
5234  for (int i = 0; i < maps->size() - 1; i++) {
5235    Handle<Map> map = maps->at(i).handle();
5236    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5237  }
5238  Handle<Map> map = maps->at(maps->size() - 1).handle();
5239  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5240  if (instr->hydrogen()->HasMigrationTarget()) {
5241    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5242  } else {
5243    DeoptimizeIf(ne, instr, map_reg, Operand(map));
5244  }
5245
5246  __ bind(&success);
5247}
5248
5249
5250void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5251  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5252  Register result_reg = ToRegister(instr->result());
5253  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5254  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5255}
5256
5257
5258void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5259  Register unclamped_reg = ToRegister(instr->unclamped());
5260  Register result_reg = ToRegister(instr->result());
5261  __ ClampUint8(result_reg, unclamped_reg);
5262}
5263
5264
5265void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5266  Register scratch = scratch0();
5267  Register input_reg = ToRegister(instr->unclamped());
5268  Register result_reg = ToRegister(instr->result());
5269  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5270  Label is_smi, done, heap_number;
5271
5272  // Both smi and heap number cases are handled.
5273  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5274
5275  // Check for heap number
5276  __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5277  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5278
5279  // Check for undefined. Undefined is converted to zero for clamping
5280  // conversions.
5281  DeoptimizeIf(ne, instr, input_reg, Operand(factory()->undefined_value()));
5282  __ mov(result_reg, zero_reg);
5283  __ jmp(&done);
5284
5285  // Heap number
5286  __ bind(&heap_number);
5287  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5288                                             HeapNumber::kValueOffset));
5289  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5290  __ jmp(&done);
5291
5292  __ bind(&is_smi);
5293  __ ClampUint8(result_reg, scratch);
5294
5295  __ bind(&done);
5296}
5297
5298
5299void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5300  DoubleRegister value_reg = ToDoubleRegister(instr->value());
5301  Register result_reg = ToRegister(instr->result());
5302  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5303    __ FmoveHigh(result_reg, value_reg);
5304  } else {
5305    __ FmoveLow(result_reg, value_reg);
5306  }
5307}
5308
5309
5310void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5311  Register hi_reg = ToRegister(instr->hi());
5312  Register lo_reg = ToRegister(instr->lo());
5313  DoubleRegister result_reg = ToDoubleRegister(instr->result());
5314  __ Move(result_reg, lo_reg, hi_reg);
5315}
5316
5317
5318void LCodeGen::DoAllocate(LAllocate* instr) {
5319  class DeferredAllocate FINAL : public LDeferredCode {
5320   public:
5321    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5322        : LDeferredCode(codegen), instr_(instr) { }
5323    virtual void Generate() OVERRIDE {
5324      codegen()->DoDeferredAllocate(instr_);
5325    }
5326    virtual LInstruction* instr() OVERRIDE { return instr_; }
5327   private:
5328    LAllocate* instr_;
5329  };
5330
5331  DeferredAllocate* deferred =
5332      new(zone()) DeferredAllocate(this, instr);
5333
5334  Register result = ToRegister(instr->result());
5335  Register scratch = ToRegister(instr->temp1());
5336  Register scratch2 = ToRegister(instr->temp2());
5337
5338  // Allocate memory for the object.
5339  AllocationFlags flags = TAG_OBJECT;
5340  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5341    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5342  }
5343  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5344    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5345    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5346    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5347  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5348    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5349    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5350  }
5351  if (instr->size()->IsConstantOperand()) {
5352    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5353    if (size <= Page::kMaxRegularHeapObjectSize) {
5354      __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5355    } else {
5356      __ jmp(deferred->entry());
5357    }
5358  } else {
5359    Register size = ToRegister(instr->size());
5360    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5361  }
5362
5363  __ bind(deferred->exit());
5364
5365  if (instr->hydrogen()->MustPrefillWithFiller()) {
5366    STATIC_ASSERT(kHeapObjectTag == 1);
5367    if (instr->size()->IsConstantOperand()) {
5368      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5369      __ li(scratch, Operand(size - kHeapObjectTag));
5370    } else {
5371      __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5372    }
5373    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5374    Label loop;
5375    __ bind(&loop);
5376    __ Dsubu(scratch, scratch, Operand(kPointerSize));
5377    __ Daddu(at, result, Operand(scratch));
5378    __ sd(scratch2, MemOperand(at));
5379    __ Branch(&loop, ge, scratch, Operand(zero_reg));
5380  }
5381}
5382
5383
5384void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5385  Register result = ToRegister(instr->result());
5386
5387  // TODO(3095996): Get rid of this. For now, we need to make the
5388  // result register contain a valid pointer because it is already
5389  // contained in the register pointer map.
5390  __ mov(result, zero_reg);
5391
5392  PushSafepointRegistersScope scope(this);
5393  if (instr->size()->IsRegister()) {
5394    Register size = ToRegister(instr->size());
5395    DCHECK(!size.is(result));
5396    __ SmiTag(size);
5397    __ push(size);
5398  } else {
5399    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5400    if (size >= 0 && size <= Smi::kMaxValue) {
5401      __ li(v0, Operand(Smi::FromInt(size)));
5402      __ Push(v0);
5403    } else {
5404      // We should never get here at runtime => abort
5405      __ stop("invalid allocation size");
5406      return;
5407    }
5408  }
5409
5410  int flags = AllocateDoubleAlignFlag::encode(
5411      instr->hydrogen()->MustAllocateDoubleAligned());
5412  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5413    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5414    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5415    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5416  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5417    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5418    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5419  } else {
5420    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5421  }
5422  __ li(v0, Operand(Smi::FromInt(flags)));
5423  __ Push(v0);
5424
5425  CallRuntimeFromDeferred(
5426      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5427  __ StoreToSafepointRegisterSlot(v0, result);
5428}
5429
5430
5431void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5432  DCHECK(ToRegister(instr->value()).is(a0));
5433  DCHECK(ToRegister(instr->result()).is(v0));
5434  __ push(a0);
5435  CallRuntime(Runtime::kToFastProperties, 1, instr);
5436}
5437
5438
5439void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5440  DCHECK(ToRegister(instr->context()).is(cp));
5441  Label materialized;
5442  // Registers will be used as follows:
5443  // a7 = literals array.
5444  // a1 = regexp literal.
5445  // a0 = regexp literal clone.
5446  // a2 and a4-a6 are used as temporaries.
5447  int literal_offset =
5448      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5449  __ li(a7, instr->hydrogen()->literals());
5450  __ ld(a1, FieldMemOperand(a7, literal_offset));
5451  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5452  __ Branch(&materialized, ne, a1, Operand(at));
5453
5454  // Create regexp literal using runtime function
5455  // Result will be in v0.
5456  __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5457  __ li(a5, Operand(instr->hydrogen()->pattern()));
5458  __ li(a4, Operand(instr->hydrogen()->flags()));
5459  __ Push(a7, a6, a5, a4);
5460  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5461  __ mov(a1, v0);
5462
5463  __ bind(&materialized);
5464  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5465  Label allocated, runtime_allocate;
5466
5467  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5468  __ jmp(&allocated);
5469
5470  __ bind(&runtime_allocate);
5471  __ li(a0, Operand(Smi::FromInt(size)));
5472  __ Push(a1, a0);
5473  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5474  __ pop(a1);
5475
5476  __ bind(&allocated);
5477  // Copy the content into the newly allocated memory.
5478  // (Unroll copy loop once for better throughput).
5479  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5480    __ ld(a3, FieldMemOperand(a1, i));
5481    __ ld(a2, FieldMemOperand(a1, i + kPointerSize));
5482    __ sd(a3, FieldMemOperand(v0, i));
5483    __ sd(a2, FieldMemOperand(v0, i + kPointerSize));
5484  }
5485  if ((size % (2 * kPointerSize)) != 0) {
5486    __ ld(a3, FieldMemOperand(a1, size - kPointerSize));
5487    __ sd(a3, FieldMemOperand(v0, size - kPointerSize));
5488  }
5489}
5490
5491
5492void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5493  DCHECK(ToRegister(instr->context()).is(cp));
5494  // Use the fast case closure allocation code that allocates in new
5495  // space for nested functions that don't need literals cloning.
5496  bool pretenure = instr->hydrogen()->pretenure();
5497  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5498    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5499                            instr->hydrogen()->kind());
5500    __ li(a2, Operand(instr->hydrogen()->shared_info()));
5501    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5502  } else {
5503    __ li(a2, Operand(instr->hydrogen()->shared_info()));
5504    __ li(a1, Operand(pretenure ? factory()->true_value()
5505                                : factory()->false_value()));
5506    __ Push(cp, a2, a1);
5507    CallRuntime(Runtime::kNewClosure, 3, instr);
5508  }
5509}
5510
5511
5512void LCodeGen::DoTypeof(LTypeof* instr) {
5513  DCHECK(ToRegister(instr->result()).is(v0));
5514  Register input = ToRegister(instr->value());
5515  __ push(input);
5516  CallRuntime(Runtime::kTypeof, 1, instr);
5517}
5518
5519
5520void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5521  Register input = ToRegister(instr->value());
5522
5523  Register cmp1 = no_reg;
5524  Operand cmp2 = Operand(no_reg);
5525
5526  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5527                                                  instr->FalseLabel(chunk_),
5528                                                  input,
5529                                                  instr->type_literal(),
5530                                                  &cmp1,
5531                                                  &cmp2);
5532
5533  DCHECK(cmp1.is_valid());
5534  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5535
5536  if (final_branch_condition != kNoCondition) {
5537    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5538  }
5539}
5540
5541
5542Condition LCodeGen::EmitTypeofIs(Label* true_label,
5543                                 Label* false_label,
5544                                 Register input,
5545                                 Handle<String> type_name,
5546                                 Register* cmp1,
5547                                 Operand* cmp2) {
5548  // This function utilizes the delay slot heavily. This is used to load
5549  // values that are always usable without depending on the type of the input
5550  // register.
5551  Condition final_branch_condition = kNoCondition;
5552  Register scratch = scratch0();
5553  Factory* factory = isolate()->factory();
5554  if (String::Equals(type_name, factory->number_string())) {
5555    __ JumpIfSmi(input, true_label);
5556    __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5557    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5558    *cmp1 = input;
5559    *cmp2 = Operand(at);
5560    final_branch_condition = eq;
5561
5562  } else if (String::Equals(type_name, factory->string_string())) {
5563    __ JumpIfSmi(input, false_label);
5564    __ GetObjectType(input, input, scratch);
5565    __ Branch(USE_DELAY_SLOT, false_label,
5566              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5567    // input is an object so we can load the BitFieldOffset even if we take the
5568    // other branch.
5569    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5570    __ And(at, at, 1 << Map::kIsUndetectable);
5571    *cmp1 = at;
5572    *cmp2 = Operand(zero_reg);
5573    final_branch_condition = eq;
5574
5575  } else if (String::Equals(type_name, factory->symbol_string())) {
5576    __ JumpIfSmi(input, false_label);
5577    __ GetObjectType(input, input, scratch);
5578    *cmp1 = scratch;
5579    *cmp2 = Operand(SYMBOL_TYPE);
5580    final_branch_condition = eq;
5581
5582  } else if (String::Equals(type_name, factory->boolean_string())) {
5583    __ LoadRoot(at, Heap::kTrueValueRootIndex);
5584    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5585    __ LoadRoot(at, Heap::kFalseValueRootIndex);
5586    *cmp1 = at;
5587    *cmp2 = Operand(input);
5588    final_branch_condition = eq;
5589
5590  } else if (String::Equals(type_name, factory->undefined_string())) {
5591    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5592    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5593    // The first instruction of JumpIfSmi is an And - it is safe in the delay
5594    // slot.
5595    __ JumpIfSmi(input, false_label);
5596    // Check for undetectable objects => true.
5597    __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5598    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5599    __ And(at, at, 1 << Map::kIsUndetectable);
5600    *cmp1 = at;
5601    *cmp2 = Operand(zero_reg);
5602    final_branch_condition = ne;
5603
5604  } else if (String::Equals(type_name, factory->function_string())) {
5605    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5606    __ JumpIfSmi(input, false_label);
5607    __ GetObjectType(input, scratch, input);
5608    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5609    *cmp1 = input;
5610    *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5611    final_branch_condition = eq;
5612
5613  } else if (String::Equals(type_name, factory->object_string())) {
5614    __ JumpIfSmi(input, false_label);
5615    __ LoadRoot(at, Heap::kNullValueRootIndex);
5616    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5617    Register map = input;
5618    __ GetObjectType(input, map, scratch);
5619    __ Branch(false_label,
5620              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5621    __ Branch(USE_DELAY_SLOT, false_label,
5622              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5623    // map is still valid, so the BitField can be loaded in delay slot.
5624    // Check for undetectable objects => false.
5625    __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
5626    __ And(at, at, 1 << Map::kIsUndetectable);
5627    *cmp1 = at;
5628    *cmp2 = Operand(zero_reg);
5629    final_branch_condition = eq;
5630
5631  } else {
5632    *cmp1 = at;
5633    *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
5634    __ Branch(false_label);
5635  }
5636
5637  return final_branch_condition;
5638}
5639
5640
5641void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5642  Register temp1 = ToRegister(instr->temp());
5643
5644  EmitIsConstructCall(temp1, scratch0());
5645
5646  EmitBranch(instr, eq, temp1,
5647             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5648}
5649
5650
5651void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5652  DCHECK(!temp1.is(temp2));
5653  // Get the frame pointer for the calling frame.
5654  __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5655
5656  // Skip the arguments adaptor frame if it exists.
5657  Label check_frame_marker;
5658  __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5659  __ Branch(&check_frame_marker, ne, temp2,
5660            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5661  __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5662
5663  // Check the marker in the calling frame.
5664  __ bind(&check_frame_marker);
5665  __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5666}
5667
5668
5669void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5670  if (!info()->IsStub()) {
5671    // Ensure that we have enough space after the previous lazy-bailout
5672    // instruction for patching the code here.
5673    int current_pc = masm()->pc_offset();
5674    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5675      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5676      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5677      while (padding_size > 0) {
5678        __ nop();
5679        padding_size -= Assembler::kInstrSize;
5680      }
5681    }
5682  }
5683  last_lazy_deopt_pc_ = masm()->pc_offset();
5684}
5685
5686
5687void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5688  last_lazy_deopt_pc_ = masm()->pc_offset();
5689  DCHECK(instr->HasEnvironment());
5690  LEnvironment* env = instr->environment();
5691  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5692  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5693}
5694
5695
5696void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5697  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5698  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5699  // needed return address), even though the implementation of LAZY and EAGER is
5700  // now identical. When LAZY is eventually completely folded into EAGER, remove
5701  // the special case below.
5702  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5703    type = Deoptimizer::LAZY;
5704  }
5705
5706  DeoptimizeIf(al, instr, type, zero_reg, Operand(zero_reg),
5707               instr->hydrogen()->reason());
5708}
5709
5710
5711void LCodeGen::DoDummy(LDummy* instr) {
5712  // Nothing to see here, move on!
5713}
5714
5715
5716void LCodeGen::DoDummyUse(LDummyUse* instr) {
5717  // Nothing to see here, move on!
5718}
5719
5720
5721void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5722  PushSafepointRegistersScope scope(this);
5723  LoadContextFromDeferred(instr->context());
5724  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5725  RecordSafepointWithLazyDeopt(
5726      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5727  DCHECK(instr->HasEnvironment());
5728  LEnvironment* env = instr->environment();
5729  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5730}
5731
5732
5733void LCodeGen::DoStackCheck(LStackCheck* instr) {
5734  class DeferredStackCheck FINAL : public LDeferredCode {
5735   public:
5736    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5737        : LDeferredCode(codegen), instr_(instr) { }
5738    virtual void Generate() OVERRIDE {
5739      codegen()->DoDeferredStackCheck(instr_);
5740    }
5741    virtual LInstruction* instr() OVERRIDE { return instr_; }
5742   private:
5743    LStackCheck* instr_;
5744  };
5745
5746  DCHECK(instr->HasEnvironment());
5747  LEnvironment* env = instr->environment();
5748  // There is no LLazyBailout instruction for stack-checks. We have to
5749  // prepare for lazy deoptimization explicitly here.
5750  if (instr->hydrogen()->is_function_entry()) {
5751    // Perform stack overflow check.
5752    Label done;
5753    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5754    __ Branch(&done, hs, sp, Operand(at));
5755    DCHECK(instr->context()->IsRegister());
5756    DCHECK(ToRegister(instr->context()).is(cp));
5757    CallCode(isolate()->builtins()->StackCheck(),
5758             RelocInfo::CODE_TARGET,
5759             instr);
5760    __ bind(&done);
5761  } else {
5762    DCHECK(instr->hydrogen()->is_backwards_branch());
5763    // Perform stack overflow check if this goto needs it before jumping.
5764    DeferredStackCheck* deferred_stack_check =
5765        new(zone()) DeferredStackCheck(this, instr);
5766    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5767    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5768    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5769    __ bind(instr->done_label());
5770    deferred_stack_check->SetExit(instr->done_label());
5771    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5772    // Don't record a deoptimization index for the safepoint here.
5773    // This will be done explicitly when emitting call and the safepoint in
5774    // the deferred code.
5775  }
5776}
5777
5778
5779void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5780  // This is a pseudo-instruction that ensures that the environment here is
5781  // properly registered for deoptimization and records the assembler's PC
5782  // offset.
5783  LEnvironment* environment = instr->environment();
5784
5785  // If the environment were already registered, we would have no way of
5786  // backpatching it with the spill slot operands.
5787  DCHECK(!environment->HasBeenRegistered());
5788  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5789
5790  GenerateOsrPrologue();
5791}
5792
5793
5794void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5795  Register result = ToRegister(instr->result());
5796  Register object = ToRegister(instr->object());
5797  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5798  DeoptimizeIf(eq, instr, object, Operand(at));
5799
5800  Register null_value = a5;
5801  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5802  DeoptimizeIf(eq, instr, object, Operand(null_value));
5803
5804  __ And(at, object, kSmiTagMask);
5805  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
5806
5807  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5808  __ GetObjectType(object, a1, a1);
5809  DeoptimizeIf(le, instr, a1, Operand(LAST_JS_PROXY_TYPE));
5810
5811  Label use_cache, call_runtime;
5812  DCHECK(object.is(a0));
5813  __ CheckEnumCache(null_value, &call_runtime);
5814
5815  __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
5816  __ Branch(&use_cache);
5817
5818  // Get the set of properties to enumerate.
5819  __ bind(&call_runtime);
5820  __ push(object);
5821  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5822
5823  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5824  DCHECK(result.is(v0));
5825  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5826  DeoptimizeIf(ne, instr, a1, Operand(at));
5827  __ bind(&use_cache);
5828}
5829
5830
5831void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5832  Register map = ToRegister(instr->map());
5833  Register result = ToRegister(instr->result());
5834  Label load_cache, done;
5835  __ EnumLength(result, map);
5836  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5837  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5838  __ jmp(&done);
5839
5840  __ bind(&load_cache);
5841  __ LoadInstanceDescriptors(map, result);
5842  __ ld(result,
5843        FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5844  __ ld(result,
5845        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5846  DeoptimizeIf(eq, instr, result, Operand(zero_reg));
5847
5848  __ bind(&done);
5849}
5850
5851
5852void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5853  Register object = ToRegister(instr->value());
5854  Register map = ToRegister(instr->map());
5855  __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5856  DeoptimizeIf(ne, instr, map, Operand(scratch0()));
5857}
5858
5859
5860void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5861                                           Register result,
5862                                           Register object,
5863                                           Register index) {
5864  PushSafepointRegistersScope scope(this);
5865  __ Push(object, index);
5866  __ mov(cp, zero_reg);
5867  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5868  RecordSafepointWithRegisters(
5869     instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5870  __ StoreToSafepointRegisterSlot(v0, result);
5871}
5872
5873
5874void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5875  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5876   public:
5877    DeferredLoadMutableDouble(LCodeGen* codegen,
5878                              LLoadFieldByIndex* instr,
5879                              Register result,
5880                              Register object,
5881                              Register index)
5882        : LDeferredCode(codegen),
5883          instr_(instr),
5884          result_(result),
5885          object_(object),
5886          index_(index) {
5887    }
5888    virtual void Generate() OVERRIDE {
5889      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5890    }
5891    virtual LInstruction* instr() OVERRIDE { return instr_; }
5892   private:
5893    LLoadFieldByIndex* instr_;
5894    Register result_;
5895    Register object_;
5896    Register index_;
5897  };
5898
5899  Register object = ToRegister(instr->object());
5900  Register index = ToRegister(instr->index());
5901  Register result = ToRegister(instr->result());
5902  Register scratch = scratch0();
5903
5904  DeferredLoadMutableDouble* deferred;
5905  deferred = new(zone()) DeferredLoadMutableDouble(
5906      this, instr, result, object, index);
5907
5908  Label out_of_object, done;
5909
5910  __ And(scratch, index, Operand(Smi::FromInt(1)));
5911  __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5912  __ dsra(index, index, 1);
5913
5914  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5915  __ SmiScale(scratch, index, kPointerSizeLog2);  // In delay slot.
5916  __ Daddu(scratch, object, scratch);
5917  __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5918
5919  __ Branch(&done);
5920
5921  __ bind(&out_of_object);
5922  __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5923  // Index is equal to negated out of object property index plus 1.
5924  __ Dsubu(scratch, result, scratch);
5925  __ ld(result, FieldMemOperand(scratch,
5926                                FixedArray::kHeaderSize - kPointerSize));
5927  __ bind(deferred->exit());
5928  __ bind(&done);
5929}
5930
5931
5932void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5933  Register context = ToRegister(instr->context());
5934  __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5935}
5936
5937
5938void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5939  Handle<ScopeInfo> scope_info = instr->scope_info();
5940  __ li(at, scope_info);
5941  __ Push(at, ToRegister(instr->function()));
5942  CallRuntime(Runtime::kPushBlockContext, 2, instr);
5943  RecordSafepoint(Safepoint::kNoLazyDeopt);
5944}
5945
5946
5947#undef __
5948
5949} }  // namespace v8::internal
5950