1// Copyright 2012 the V8 project authors. All rights reserved.7
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "src/v8.h"
29
30#include "src/base/bits.h"
31#include "src/code-factory.h"
32#include "src/code-stubs.h"
33#include "src/hydrogen-osr.h"
34#include "src/ic/ic.h"
35#include "src/ic/stub-cache.h"
36#include "src/mips/lithium-codegen-mips.h"
37#include "src/mips/lithium-gap-resolver-mips.h"
38
39
40namespace v8 {
41namespace internal {
42
43
44class SafepointGenerator FINAL  : public CallWrapper {
45 public:
46  SafepointGenerator(LCodeGen* codegen,
47                     LPointerMap* pointers,
48                     Safepoint::DeoptMode mode)
49      : codegen_(codegen),
50        pointers_(pointers),
51        deopt_mode_(mode) { }
52  virtual ~SafepointGenerator() {}
53
54  virtual void BeforeCall(int call_size) const OVERRIDE {}
55
56  virtual void AfterCall() const OVERRIDE {
57    codegen_->RecordSafepoint(pointers_, deopt_mode_);
58  }
59
60 private:
61  LCodeGen* codegen_;
62  LPointerMap* pointers_;
63  Safepoint::DeoptMode deopt_mode_;
64};
65
66
67#define __ masm()->
68
69bool LCodeGen::GenerateCode() {
70  LPhase phase("Z_Code generation", chunk());
71  DCHECK(is_unused());
72  status_ = GENERATING;
73
74  // Open a frame scope to indicate that there is a frame on the stack.  The
75  // NONE indicates that the scope shouldn't actually generate code to set up
76  // the frame (that is done in GeneratePrologue).
77  FrameScope frame_scope(masm_, StackFrame::NONE);
78
79  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
80         GenerateJumpTable() && GenerateSafepointTable();
81}
82
83
84void LCodeGen::FinishCode(Handle<Code> code) {
85  DCHECK(is_done());
86  code->set_stack_slots(GetStackSlotCount());
87  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
89  PopulateDeoptimizationData(code);
90}
91
92
93void LCodeGen::SaveCallerDoubles() {
94  DCHECK(info()->saves_caller_doubles());
95  DCHECK(NeedsEagerFrame());
96  Comment(";;; Save clobbered callee double registers");
97  int count = 0;
98  BitVector* doubles = chunk()->allocated_double_registers();
99  BitVector::Iterator save_iterator(doubles);
100  while (!save_iterator.Done()) {
101    __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
102            MemOperand(sp, count * kDoubleSize));
103    save_iterator.Advance();
104    count++;
105  }
106}
107
108
109void LCodeGen::RestoreCallerDoubles() {
110  DCHECK(info()->saves_caller_doubles());
111  DCHECK(NeedsEagerFrame());
112  Comment(";;; Restore clobbered callee double registers");
113  BitVector* doubles = chunk()->allocated_double_registers();
114  BitVector::Iterator save_iterator(doubles);
115  int count = 0;
116  while (!save_iterator.Done()) {
117    __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
118            MemOperand(sp, count * kDoubleSize));
119    save_iterator.Advance();
120    count++;
121  }
122}
123
124
125bool LCodeGen::GeneratePrologue() {
126  DCHECK(is_generating());
127
128  if (info()->IsOptimizing()) {
129    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
130
131#ifdef DEBUG
132    if (strlen(FLAG_stop_at) > 0 &&
133        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
134      __ stop("stop_at");
135    }
136#endif
137
138    // a1: Callee's JS function.
139    // cp: Callee's context.
140    // fp: Caller's frame pointer.
141    // lr: Caller's pc.
142
143    // Sloppy mode functions and builtins need to replace the receiver with the
144    // global proxy when called as functions (without an explicit receiver
145    // object).
146    if (info_->this_has_uses() &&
147        info_->strict_mode() == SLOPPY &&
148        !info_->is_native()) {
149      Label ok;
150      int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
151      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
152      __ lw(a2, MemOperand(sp, receiver_offset));
153      __ Branch(&ok, ne, a2, Operand(at));
154
155      __ lw(a2, GlobalObjectOperand());
156      __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
157
158      __ sw(a2, MemOperand(sp, receiver_offset));
159
160      __ bind(&ok);
161    }
162  }
163
164  info()->set_prologue_offset(masm_->pc_offset());
165  if (NeedsEagerFrame()) {
166    if (info()->IsStub()) {
167      __ StubPrologue();
168    } else {
169      __ Prologue(info()->IsCodePreAgingActive());
170    }
171    frame_is_built_ = true;
172    info_->AddNoFrameRange(0, masm_->pc_offset());
173  }
174
175  // Reserve space for the stack slots needed by the code.
176  int slots = GetStackSlotCount();
177  if (slots > 0) {
178    if (FLAG_debug_code) {
179      __ Subu(sp,  sp, Operand(slots * kPointerSize));
180      __ Push(a0, a1);
181      __ Addu(a0, sp, Operand(slots *  kPointerSize));
182      __ li(a1, Operand(kSlotsZapValue));
183      Label loop;
184      __ bind(&loop);
185      __ Subu(a0, a0, Operand(kPointerSize));
186      __ sw(a1, MemOperand(a0, 2 * kPointerSize));
187      __ Branch(&loop, ne, a0, Operand(sp));
188      __ Pop(a0, a1);
189    } else {
190      __ Subu(sp, sp, Operand(slots * kPointerSize));
191    }
192  }
193
194  if (info()->saves_caller_doubles()) {
195    SaveCallerDoubles();
196  }
197
198  // Possibly allocate a local context.
199  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
200  if (heap_slots > 0) {
201    Comment(";;; Allocate local context");
202    bool need_write_barrier = true;
203    // Argument to NewContext is the function, which is in a1.
204    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
205      FastNewContextStub stub(isolate(), heap_slots);
206      __ CallStub(&stub);
207      // Result of FastNewContextStub is always in new space.
208      need_write_barrier = false;
209    } else {
210      __ push(a1);
211      __ CallRuntime(Runtime::kNewFunctionContext, 1);
212    }
213    RecordSafepoint(Safepoint::kNoLazyDeopt);
214    // Context is returned in both v0. It replaces the context passed to us.
215    // It's saved in the stack and kept live in cp.
216    __ mov(cp, v0);
217    __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
218    // Copy any necessary parameters into the context.
219    int num_parameters = scope()->num_parameters();
220    for (int i = 0; i < num_parameters; i++) {
221      Variable* var = scope()->parameter(i);
222      if (var->IsContextSlot()) {
223        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
224            (num_parameters - 1 - i) * kPointerSize;
225        // Load parameter from stack.
226        __ lw(a0, MemOperand(fp, parameter_offset));
227        // Store it in the context.
228        MemOperand target = ContextOperand(cp, var->index());
229        __ sw(a0, target);
230        // Update the write barrier. This clobbers a3 and a0.
231        if (need_write_barrier) {
232          __ RecordWriteContextSlot(
233              cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
234        } else if (FLAG_debug_code) {
235          Label done;
236          __ JumpIfInNewSpace(cp, a0, &done);
237          __ Abort(kExpectedNewSpaceObject);
238          __ bind(&done);
239        }
240      }
241    }
242    Comment(";;; End allocate local context");
243  }
244
245  // Trace the call.
246  if (FLAG_trace && info()->IsOptimizing()) {
247    // We have not executed any compiled code yet, so cp still holds the
248    // incoming context.
249    __ CallRuntime(Runtime::kTraceEnter, 0);
250  }
251  return !is_aborted();
252}
253
254
255void LCodeGen::GenerateOsrPrologue() {
256  // Generate the OSR entry prologue at the first unknown OSR value, or if there
257  // are none, at the OSR entrypoint instruction.
258  if (osr_pc_offset_ >= 0) return;
259
260  osr_pc_offset_ = masm()->pc_offset();
261
262  // Adjust the frame size, subsuming the unoptimized frame into the
263  // optimized frame.
264  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
265  DCHECK(slots >= 0);
266  __ Subu(sp, sp, Operand(slots * kPointerSize));
267}
268
269
270void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
271  if (instr->IsCall()) {
272    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
273  }
274  if (!instr->IsLazyBailout() && !instr->IsGap()) {
275    safepoints_.BumpLastLazySafepointIndex();
276  }
277}
278
279
280bool LCodeGen::GenerateDeferredCode() {
281  DCHECK(is_generating());
282  if (deferred_.length() > 0) {
283    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
284      LDeferredCode* code = deferred_[i];
285
286      HValue* value =
287          instructions_->at(code->instruction_index())->hydrogen_value();
288      RecordAndWritePosition(
289          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
290
291      Comment(";;; <@%d,#%d> "
292              "-------------------- Deferred %s --------------------",
293              code->instruction_index(),
294              code->instr()->hydrogen_value()->id(),
295              code->instr()->Mnemonic());
296      __ bind(code->entry());
297      if (NeedsDeferredFrame()) {
298        Comment(";;; Build frame");
299        DCHECK(!frame_is_built_);
300        DCHECK(info()->IsStub());
301        frame_is_built_ = true;
302        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
303        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
304        __ push(scratch0());
305        __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
306        Comment(";;; Deferred code");
307      }
308      code->Generate();
309      if (NeedsDeferredFrame()) {
310        Comment(";;; Destroy frame");
311        DCHECK(frame_is_built_);
312        __ pop(at);
313        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
314        frame_is_built_ = false;
315      }
316      __ jmp(code->exit());
317    }
318  }
319  // Deferred code is the last part of the instruction sequence. Mark
320  // the generated code as done unless we bailed out.
321  if (!is_aborted()) status_ = DONE;
322  return !is_aborted();
323}
324
325
326bool LCodeGen::GenerateJumpTable() {
327  if (jump_table_.length() > 0) {
328    Label needs_frame, call_deopt_entry;
329
330    Comment(";;; -------------------- Jump table --------------------");
331    Address base = jump_table_[0].address;
332
333    Register entry_offset = t9;
334
335    int length = jump_table_.length();
336    for (int i = 0; i < length; i++) {
337      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
338      __ bind(&table_entry->label);
339
340      DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
341      Address entry = table_entry->address;
342      DeoptComment(table_entry->reason);
343
344      // Second-level deopt table entries are contiguous and small, so instead
345      // of loading the full, absolute address of each one, load an immediate
346      // offset which will be added to the base address later.
347      __ li(entry_offset, Operand(entry - base));
348
349      if (table_entry->needs_frame) {
350        DCHECK(!info()->saves_caller_doubles());
351        if (needs_frame.is_bound()) {
352          __ Branch(&needs_frame);
353        } else {
354          __ bind(&needs_frame);
355          Comment(";;; call deopt with frame");
356          __ MultiPush(cp.bit() | fp.bit() | ra.bit());
357          // This variant of deopt can only be used with stubs. Since we don't
358          // have a function pointer to install in the stack frame that we're
359          // building, install a special marker there instead.
360          DCHECK(info()->IsStub());
361          __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
362          __ push(at);
363          __ Addu(fp, sp,
364                  Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
365          __ bind(&call_deopt_entry);
366          // Add the base address to the offset previously loaded in
367          // entry_offset.
368          __ Addu(entry_offset, entry_offset,
369                  Operand(ExternalReference::ForDeoptEntry(base)));
370          __ Call(entry_offset);
371        }
372      } else {
373        // The last entry can fall through into `call_deopt_entry`, avoiding a
374        // branch.
375        bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
376
377        if (need_branch) __ Branch(&call_deopt_entry);
378      }
379    }
380
381    if (!call_deopt_entry.is_bound()) {
382      Comment(";;; call deopt");
383      __ bind(&call_deopt_entry);
384
385      if (info()->saves_caller_doubles()) {
386        DCHECK(info()->IsStub());
387        RestoreCallerDoubles();
388      }
389
390      // Add the base address to the offset previously loaded in entry_offset.
391      __ Addu(entry_offset, entry_offset,
392              Operand(ExternalReference::ForDeoptEntry(base)));
393      __ Call(entry_offset);
394    }
395  }
396  __ RecordComment("]");
397
398  // The deoptimization jump table is the last part of the instruction
399  // sequence. Mark the generated code as done unless we bailed out.
400  if (!is_aborted()) status_ = DONE;
401  return !is_aborted();
402}
403
404
405bool LCodeGen::GenerateSafepointTable() {
406  DCHECK(is_done());
407  safepoints_.Emit(masm(), GetStackSlotCount());
408  return !is_aborted();
409}
410
411
412Register LCodeGen::ToRegister(int index) const {
413  return Register::FromAllocationIndex(index);
414}
415
416
417DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
418  return DoubleRegister::FromAllocationIndex(index);
419}
420
421
422Register LCodeGen::ToRegister(LOperand* op) const {
423  DCHECK(op->IsRegister());
424  return ToRegister(op->index());
425}
426
427
428Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
429  if (op->IsRegister()) {
430    return ToRegister(op->index());
431  } else if (op->IsConstantOperand()) {
432    LConstantOperand* const_op = LConstantOperand::cast(op);
433    HConstant* constant = chunk_->LookupConstant(const_op);
434    Handle<Object> literal = constant->handle(isolate());
435    Representation r = chunk_->LookupLiteralRepresentation(const_op);
436    if (r.IsInteger32()) {
437      DCHECK(literal->IsNumber());
438      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
439    } else if (r.IsSmi()) {
440      DCHECK(constant->HasSmiValue());
441      __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
442    } else if (r.IsDouble()) {
443      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
444    } else {
445      DCHECK(r.IsSmiOrTagged());
446      __ li(scratch, literal);
447    }
448    return scratch;
449  } else if (op->IsStackSlot()) {
450    __ lw(scratch, ToMemOperand(op));
451    return scratch;
452  }
453  UNREACHABLE();
454  return scratch;
455}
456
457
458DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
459  DCHECK(op->IsDoubleRegister());
460  return ToDoubleRegister(op->index());
461}
462
463
464DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
465                                                FloatRegister flt_scratch,
466                                                DoubleRegister dbl_scratch) {
467  if (op->IsDoubleRegister()) {
468    return ToDoubleRegister(op->index());
469  } else if (op->IsConstantOperand()) {
470    LConstantOperand* const_op = LConstantOperand::cast(op);
471    HConstant* constant = chunk_->LookupConstant(const_op);
472    Handle<Object> literal = constant->handle(isolate());
473    Representation r = chunk_->LookupLiteralRepresentation(const_op);
474    if (r.IsInteger32()) {
475      DCHECK(literal->IsNumber());
476      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
477      __ mtc1(at, flt_scratch);
478      __ cvt_d_w(dbl_scratch, flt_scratch);
479      return dbl_scratch;
480    } else if (r.IsDouble()) {
481      Abort(kUnsupportedDoubleImmediate);
482    } else if (r.IsTagged()) {
483      Abort(kUnsupportedTaggedImmediate);
484    }
485  } else if (op->IsStackSlot()) {
486    MemOperand mem_op = ToMemOperand(op);
487    __ ldc1(dbl_scratch, mem_op);
488    return dbl_scratch;
489  }
490  UNREACHABLE();
491  return dbl_scratch;
492}
493
494
495Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
496  HConstant* constant = chunk_->LookupConstant(op);
497  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
498  return constant->handle(isolate());
499}
500
501
502bool LCodeGen::IsInteger32(LConstantOperand* op) const {
503  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
504}
505
506
507bool LCodeGen::IsSmi(LConstantOperand* op) const {
508  return chunk_->LookupLiteralRepresentation(op).IsSmi();
509}
510
511
512int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
513  return ToRepresentation(op, Representation::Integer32());
514}
515
516
517int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
518                                   const Representation& r) const {
519  HConstant* constant = chunk_->LookupConstant(op);
520  int32_t value = constant->Integer32Value();
521  if (r.IsInteger32()) return value;
522  DCHECK(r.IsSmiOrTagged());
523  return reinterpret_cast<int32_t>(Smi::FromInt(value));
524}
525
526
527Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
528  HConstant* constant = chunk_->LookupConstant(op);
529  return Smi::FromInt(constant->Integer32Value());
530}
531
532
533double LCodeGen::ToDouble(LConstantOperand* op) const {
534  HConstant* constant = chunk_->LookupConstant(op);
535  DCHECK(constant->HasDoubleValue());
536  return constant->DoubleValue();
537}
538
539
540Operand LCodeGen::ToOperand(LOperand* op) {
541  if (op->IsConstantOperand()) {
542    LConstantOperand* const_op = LConstantOperand::cast(op);
543    HConstant* constant = chunk()->LookupConstant(const_op);
544    Representation r = chunk_->LookupLiteralRepresentation(const_op);
545    if (r.IsSmi()) {
546      DCHECK(constant->HasSmiValue());
547      return Operand(Smi::FromInt(constant->Integer32Value()));
548    } else if (r.IsInteger32()) {
549      DCHECK(constant->HasInteger32Value());
550      return Operand(constant->Integer32Value());
551    } else if (r.IsDouble()) {
552      Abort(kToOperandUnsupportedDoubleImmediate);
553    }
554    DCHECK(r.IsTagged());
555    return Operand(constant->handle(isolate()));
556  } else if (op->IsRegister()) {
557    return Operand(ToRegister(op));
558  } else if (op->IsDoubleRegister()) {
559    Abort(kToOperandIsDoubleRegisterUnimplemented);
560    return Operand(0);
561  }
562  // Stack slots not implemented, use ToMemOperand instead.
563  UNREACHABLE();
564  return Operand(0);
565}
566
567
568static int ArgumentsOffsetWithoutFrame(int index) {
569  DCHECK(index < 0);
570  return -(index + 1) * kPointerSize;
571}
572
573
574MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
575  DCHECK(!op->IsRegister());
576  DCHECK(!op->IsDoubleRegister());
577  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
578  if (NeedsEagerFrame()) {
579    return MemOperand(fp, StackSlotOffset(op->index()));
580  } else {
581    // Retrieve parameter without eager stack-frame relative to the
582    // stack-pointer.
583    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
584  }
585}
586
587
588MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
589  DCHECK(op->IsDoubleStackSlot());
590  if (NeedsEagerFrame()) {
591    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
592  } else {
593    // Retrieve parameter without eager stack-frame relative to the
594    // stack-pointer.
595    return MemOperand(
596        sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
597  }
598}
599
600
601void LCodeGen::WriteTranslation(LEnvironment* environment,
602                                Translation* translation) {
603  if (environment == NULL) return;
604
605  // The translation includes one command per value in the environment.
606  int translation_size = environment->translation_size();
607  // The output frame height does not include the parameters.
608  int height = translation_size - environment->parameter_count();
609
610  WriteTranslation(environment->outer(), translation);
611  bool has_closure_id = !info()->closure().is_null() &&
612      !info()->closure().is_identical_to(environment->closure());
613  int closure_id = has_closure_id
614      ? DefineDeoptimizationLiteral(environment->closure())
615      : Translation::kSelfLiteralId;
616
617  switch (environment->frame_type()) {
618    case JS_FUNCTION:
619      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
620      break;
621    case JS_CONSTRUCT:
622      translation->BeginConstructStubFrame(closure_id, translation_size);
623      break;
624    case JS_GETTER:
625      DCHECK(translation_size == 1);
626      DCHECK(height == 0);
627      translation->BeginGetterStubFrame(closure_id);
628      break;
629    case JS_SETTER:
630      DCHECK(translation_size == 2);
631      DCHECK(height == 0);
632      translation->BeginSetterStubFrame(closure_id);
633      break;
634    case STUB:
635      translation->BeginCompiledStubFrame();
636      break;
637    case ARGUMENTS_ADAPTOR:
638      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
639      break;
640  }
641
642  int object_index = 0;
643  int dematerialized_index = 0;
644  for (int i = 0; i < translation_size; ++i) {
645    LOperand* value = environment->values()->at(i);
646    AddToTranslation(environment,
647                     translation,
648                     value,
649                     environment->HasTaggedValueAt(i),
650                     environment->HasUint32ValueAt(i),
651                     &object_index,
652                     &dematerialized_index);
653  }
654}
655
656
657void LCodeGen::AddToTranslation(LEnvironment* environment,
658                                Translation* translation,
659                                LOperand* op,
660                                bool is_tagged,
661                                bool is_uint32,
662                                int* object_index_pointer,
663                                int* dematerialized_index_pointer) {
664  if (op == LEnvironment::materialization_marker()) {
665    int object_index = (*object_index_pointer)++;
666    if (environment->ObjectIsDuplicateAt(object_index)) {
667      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
668      translation->DuplicateObject(dupe_of);
669      return;
670    }
671    int object_length = environment->ObjectLengthAt(object_index);
672    if (environment->ObjectIsArgumentsAt(object_index)) {
673      translation->BeginArgumentsObject(object_length);
674    } else {
675      translation->BeginCapturedObject(object_length);
676    }
677    int dematerialized_index = *dematerialized_index_pointer;
678    int env_offset = environment->translation_size() + dematerialized_index;
679    *dematerialized_index_pointer += object_length;
680    for (int i = 0; i < object_length; ++i) {
681      LOperand* value = environment->values()->at(env_offset + i);
682      AddToTranslation(environment,
683                       translation,
684                       value,
685                       environment->HasTaggedValueAt(env_offset + i),
686                       environment->HasUint32ValueAt(env_offset + i),
687                       object_index_pointer,
688                       dematerialized_index_pointer);
689    }
690    return;
691  }
692
693  if (op->IsStackSlot()) {
694    if (is_tagged) {
695      translation->StoreStackSlot(op->index());
696    } else if (is_uint32) {
697      translation->StoreUint32StackSlot(op->index());
698    } else {
699      translation->StoreInt32StackSlot(op->index());
700    }
701  } else if (op->IsDoubleStackSlot()) {
702    translation->StoreDoubleStackSlot(op->index());
703  } else if (op->IsRegister()) {
704    Register reg = ToRegister(op);
705    if (is_tagged) {
706      translation->StoreRegister(reg);
707    } else if (is_uint32) {
708      translation->StoreUint32Register(reg);
709    } else {
710      translation->StoreInt32Register(reg);
711    }
712  } else if (op->IsDoubleRegister()) {
713    DoubleRegister reg = ToDoubleRegister(op);
714    translation->StoreDoubleRegister(reg);
715  } else if (op->IsConstantOperand()) {
716    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
717    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
718    translation->StoreLiteral(src_index);
719  } else {
720    UNREACHABLE();
721  }
722}
723
724
725void LCodeGen::CallCode(Handle<Code> code,
726                        RelocInfo::Mode mode,
727                        LInstruction* instr) {
728  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
729}
730
731
732void LCodeGen::CallCodeGeneric(Handle<Code> code,
733                               RelocInfo::Mode mode,
734                               LInstruction* instr,
735                               SafepointMode safepoint_mode) {
736  DCHECK(instr != NULL);
737  __ Call(code, mode);
738  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
739}
740
741
742void LCodeGen::CallRuntime(const Runtime::Function* function,
743                           int num_arguments,
744                           LInstruction* instr,
745                           SaveFPRegsMode save_doubles) {
746  DCHECK(instr != NULL);
747
748  __ CallRuntime(function, num_arguments, save_doubles);
749
750  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
751}
752
753
754void LCodeGen::LoadContextFromDeferred(LOperand* context) {
755  if (context->IsRegister()) {
756    __ Move(cp, ToRegister(context));
757  } else if (context->IsStackSlot()) {
758    __ lw(cp, ToMemOperand(context));
759  } else if (context->IsConstantOperand()) {
760    HConstant* constant =
761        chunk_->LookupConstant(LConstantOperand::cast(context));
762    __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
763  } else {
764    UNREACHABLE();
765  }
766}
767
768
769void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
770                                       int argc,
771                                       LInstruction* instr,
772                                       LOperand* context) {
773  LoadContextFromDeferred(context);
774  __ CallRuntimeSaveDoubles(id);
775  RecordSafepointWithRegisters(
776      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
777}
778
779
780void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
781                                                    Safepoint::DeoptMode mode) {
782  environment->set_has_been_used();
783  if (!environment->HasBeenRegistered()) {
784    // Physical stack frame layout:
785    // -x ............. -4  0 ..................................... y
786    // [incoming arguments] [spill slots] [pushed outgoing arguments]
787
788    // Layout of the environment:
789    // 0 ..................................................... size-1
790    // [parameters] [locals] [expression stack including arguments]
791
792    // Layout of the translation:
793    // 0 ........................................................ size - 1 + 4
794    // [expression stack including arguments] [locals] [4 words] [parameters]
795    // |>------------  translation_size ------------<|
796
797    int frame_count = 0;
798    int jsframe_count = 0;
799    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
800      ++frame_count;
801      if (e->frame_type() == JS_FUNCTION) {
802        ++jsframe_count;
803      }
804    }
805    Translation translation(&translations_, frame_count, jsframe_count, zone());
806    WriteTranslation(environment, &translation);
807    int deoptimization_index = deoptimizations_.length();
808    int pc_offset = masm()->pc_offset();
809    environment->Register(deoptimization_index,
810                          translation.index(),
811                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
812    deoptimizations_.Add(environment, zone());
813  }
814}
815
816
817void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
818                            Deoptimizer::BailoutType bailout_type,
819                            Register src1, const Operand& src2,
820                            const char* detail) {
821  LEnvironment* environment = instr->environment();
822  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
823  DCHECK(environment->HasBeenRegistered());
824  int id = environment->deoptimization_index();
825  DCHECK(info()->IsOptimizing() || info()->IsStub());
826  Address entry =
827      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
828  if (entry == NULL) {
829    Abort(kBailoutWasNotPrepared);
830    return;
831  }
832
833  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
834    Register scratch = scratch0();
835    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
836    Label no_deopt;
837    __ Push(a1, scratch);
838    __ li(scratch, Operand(count));
839    __ lw(a1, MemOperand(scratch));
840    __ Subu(a1, a1, Operand(1));
841    __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
842    __ li(a1, Operand(FLAG_deopt_every_n_times));
843    __ sw(a1, MemOperand(scratch));
844    __ Pop(a1, scratch);
845
846    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
847    __ bind(&no_deopt);
848    __ sw(a1, MemOperand(scratch));
849    __ Pop(a1, scratch);
850  }
851
852  if (info()->ShouldTrapOnDeopt()) {
853    Label skip;
854    if (condition != al) {
855      __ Branch(&skip, NegateCondition(condition), src1, src2);
856    }
857    __ stop("trap_on_deopt");
858    __ bind(&skip);
859  }
860
861  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
862                             instr->Mnemonic(), detail);
863  DCHECK(info()->IsStub() || frame_is_built_);
864  // Go through jump table if we need to handle condition, build frame, or
865  // restore caller doubles.
866  if (condition == al && frame_is_built_ &&
867      !info()->saves_caller_doubles()) {
868    DeoptComment(reason);
869    __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
870  } else {
871    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
872                                            !frame_is_built_);
873    // We often have several deopts to the same entry, reuse the last
874    // jump entry if this is the case.
875    if (jump_table_.is_empty() ||
876        !table_entry.IsEquivalentTo(jump_table_.last())) {
877      jump_table_.Add(table_entry, zone());
878    }
879    __ Branch(&jump_table_.last().label, condition, src1, src2);
880  }
881}
882
883
884void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
885                            Register src1, const Operand& src2,
886                            const char* detail) {
887  Deoptimizer::BailoutType bailout_type = info()->IsStub()
888      ? Deoptimizer::LAZY
889      : Deoptimizer::EAGER;
890  DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail);
891}
892
893
894void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
895  int length = deoptimizations_.length();
896  if (length == 0) return;
897  Handle<DeoptimizationInputData> data =
898      DeoptimizationInputData::New(isolate(), length, TENURED);
899
900  Handle<ByteArray> translations =
901      translations_.CreateByteArray(isolate()->factory());
902  data->SetTranslationByteArray(*translations);
903  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
904  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
905  if (info_->IsOptimizing()) {
906    // Reference to shared function info does not change between phases.
907    AllowDeferredHandleDereference allow_handle_dereference;
908    data->SetSharedFunctionInfo(*info_->shared_info());
909  } else {
910    data->SetSharedFunctionInfo(Smi::FromInt(0));
911  }
912
913  Handle<FixedArray> literals =
914      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
915  { AllowDeferredHandleDereference copy_handles;
916    for (int i = 0; i < deoptimization_literals_.length(); i++) {
917      literals->set(i, *deoptimization_literals_[i]);
918    }
919    data->SetLiteralArray(*literals);
920  }
921
922  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
923  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
924
925  // Populate the deoptimization entries.
926  for (int i = 0; i < length; i++) {
927    LEnvironment* env = deoptimizations_[i];
928    data->SetAstId(i, env->ast_id());
929    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
930    data->SetArgumentsStackHeight(i,
931                                  Smi::FromInt(env->arguments_stack_height()));
932    data->SetPc(i, Smi::FromInt(env->pc_offset()));
933  }
934  code->set_deoptimization_data(*data);
935}
936
937
938int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
939  int result = deoptimization_literals_.length();
940  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
941    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
942  }
943  deoptimization_literals_.Add(literal, zone());
944  return result;
945}
946
947
948void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
949  DCHECK(deoptimization_literals_.length() == 0);
950
951  const ZoneList<Handle<JSFunction> >* inlined_closures =
952      chunk()->inlined_closures();
953
954  for (int i = 0, length = inlined_closures->length();
955       i < length;
956       i++) {
957    DefineDeoptimizationLiteral(inlined_closures->at(i));
958  }
959
960  inlined_function_count_ = deoptimization_literals_.length();
961}
962
963
964void LCodeGen::RecordSafepointWithLazyDeopt(
965    LInstruction* instr, SafepointMode safepoint_mode) {
966  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
967    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
968  } else {
969    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
970    RecordSafepointWithRegisters(
971        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
972  }
973}
974
975
976void LCodeGen::RecordSafepoint(
977    LPointerMap* pointers,
978    Safepoint::Kind kind,
979    int arguments,
980    Safepoint::DeoptMode deopt_mode) {
981  DCHECK(expected_safepoint_kind_ == kind);
982
983  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
984  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
985      kind, arguments, deopt_mode);
986  for (int i = 0; i < operands->length(); i++) {
987    LOperand* pointer = operands->at(i);
988    if (pointer->IsStackSlot()) {
989      safepoint.DefinePointerSlot(pointer->index(), zone());
990    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
991      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
992    }
993  }
994}
995
996
997void LCodeGen::RecordSafepoint(LPointerMap* pointers,
998                               Safepoint::DeoptMode deopt_mode) {
999  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1000}
1001
1002
1003void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1004  LPointerMap empty_pointers(zone());
1005  RecordSafepoint(&empty_pointers, deopt_mode);
1006}
1007
1008
1009void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1010                                            int arguments,
1011                                            Safepoint::DeoptMode deopt_mode) {
1012  RecordSafepoint(
1013      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1014}
1015
1016
1017void LCodeGen::RecordAndWritePosition(int position) {
1018  if (position == RelocInfo::kNoPosition) return;
1019  masm()->positions_recorder()->RecordPosition(position);
1020  masm()->positions_recorder()->WriteRecordedPositions();
1021}
1022
1023
1024static const char* LabelType(LLabel* label) {
1025  if (label->is_loop_header()) return " (loop header)";
1026  if (label->is_osr_entry()) return " (OSR entry)";
1027  return "";
1028}
1029
1030
1031void LCodeGen::DoLabel(LLabel* label) {
1032  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1033          current_instruction_,
1034          label->hydrogen_value()->id(),
1035          label->block_id(),
1036          LabelType(label));
1037  __ bind(label->label());
1038  current_block_ = label->block_id();
1039  DoGap(label);
1040}
1041
1042
1043void LCodeGen::DoParallelMove(LParallelMove* move) {
1044  resolver_.Resolve(move);
1045}
1046
1047
1048void LCodeGen::DoGap(LGap* gap) {
1049  for (int i = LGap::FIRST_INNER_POSITION;
1050       i <= LGap::LAST_INNER_POSITION;
1051       i++) {
1052    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1053    LParallelMove* move = gap->GetParallelMove(inner_pos);
1054    if (move != NULL) DoParallelMove(move);
1055  }
1056}
1057
1058
1059void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1060  DoGap(instr);
1061}
1062
1063
1064void LCodeGen::DoParameter(LParameter* instr) {
1065  // Nothing to do.
1066}
1067
1068
1069void LCodeGen::DoCallStub(LCallStub* instr) {
1070  DCHECK(ToRegister(instr->context()).is(cp));
1071  DCHECK(ToRegister(instr->result()).is(v0));
1072  switch (instr->hydrogen()->major_key()) {
1073    case CodeStub::RegExpExec: {
1074      RegExpExecStub stub(isolate());
1075      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1076      break;
1077    }
1078    case CodeStub::SubString: {
1079      SubStringStub stub(isolate());
1080      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1081      break;
1082    }
1083    case CodeStub::StringCompare: {
1084      StringCompareStub stub(isolate());
1085      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1086      break;
1087    }
1088    default:
1089      UNREACHABLE();
1090  }
1091}
1092
1093
1094void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1095  GenerateOsrPrologue();
1096}
1097
1098
1099void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1100  Register dividend = ToRegister(instr->dividend());
1101  int32_t divisor = instr->divisor();
1102  DCHECK(dividend.is(ToRegister(instr->result())));
1103
1104  // Theoretically, a variation of the branch-free code for integer division by
1105  // a power of 2 (calculating the remainder via an additional multiplication
1106  // (which gets simplified to an 'and') and subtraction) should be faster, and
1107  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1108  // indicate that positive dividends are heavily favored, so the branching
1109  // version performs better.
1110  HMod* hmod = instr->hydrogen();
1111  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1112  Label dividend_is_not_negative, done;
1113
1114  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1115    __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
1116    // Note: The code below even works when right contains kMinInt.
1117    __ subu(dividend, zero_reg, dividend);
1118    __ And(dividend, dividend, Operand(mask));
1119    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1120      DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
1121    }
1122    __ Branch(USE_DELAY_SLOT, &done);
1123    __ subu(dividend, zero_reg, dividend);
1124  }
1125
1126  __ bind(&dividend_is_not_negative);
1127  __ And(dividend, dividend, Operand(mask));
1128  __ bind(&done);
1129}
1130
1131
1132void LCodeGen::DoModByConstI(LModByConstI* instr) {
1133  Register dividend = ToRegister(instr->dividend());
1134  int32_t divisor = instr->divisor();
1135  Register result = ToRegister(instr->result());
1136  DCHECK(!dividend.is(result));
1137
1138  if (divisor == 0) {
1139    DeoptimizeIf(al, instr);
1140    return;
1141  }
1142
1143  __ TruncatingDiv(result, dividend, Abs(divisor));
1144  __ Mul(result, result, Operand(Abs(divisor)));
1145  __ Subu(result, dividend, Operand(result));
1146
1147  // Check for negative zero.
1148  HMod* hmod = instr->hydrogen();
1149  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1150    Label remainder_not_zero;
1151    __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1152    DeoptimizeIf(lt, instr, dividend, Operand(zero_reg));
1153    __ bind(&remainder_not_zero);
1154  }
1155}
1156
1157
1158void LCodeGen::DoModI(LModI* instr) {
1159  HMod* hmod = instr->hydrogen();
1160  const Register left_reg = ToRegister(instr->left());
1161  const Register right_reg = ToRegister(instr->right());
1162  const Register result_reg = ToRegister(instr->result());
1163
1164  // div runs in the background while we check for special cases.
1165  __ Mod(result_reg, left_reg, right_reg);
1166
1167  Label done;
1168  // Check for x % 0, we have to deopt in this case because we can't return a
1169  // NaN.
1170  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1171    DeoptimizeIf(eq, instr, right_reg, Operand(zero_reg));
1172  }
1173
1174  // Check for kMinInt % -1, div will return kMinInt, which is not what we
1175  // want. We have to deopt if we care about -0, because we can't return that.
1176  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1177    Label no_overflow_possible;
1178    __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1179    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1180      DeoptimizeIf(eq, instr, right_reg, Operand(-1));
1181    } else {
1182      __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1183      __ Branch(USE_DELAY_SLOT, &done);
1184      __ mov(result_reg, zero_reg);
1185    }
1186    __ bind(&no_overflow_possible);
1187  }
1188
1189  // If we care about -0, test if the dividend is <0 and the result is 0.
1190  __ Branch(&done, ge, left_reg, Operand(zero_reg));
1191  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1192    DeoptimizeIf(eq, instr, result_reg, Operand(zero_reg));
1193  }
1194  __ bind(&done);
1195}
1196
1197
1198void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1199  Register dividend = ToRegister(instr->dividend());
1200  int32_t divisor = instr->divisor();
1201  Register result = ToRegister(instr->result());
1202  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1203  DCHECK(!result.is(dividend));
1204
1205  // Check for (0 / -x) that will produce negative zero.
1206  HDiv* hdiv = instr->hydrogen();
1207  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1208    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
1209  }
1210  // Check for (kMinInt / -1).
1211  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1212    DeoptimizeIf(eq, instr, dividend, Operand(kMinInt));
1213  }
1214  // Deoptimize if remainder will not be 0.
1215  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1216      divisor != 1 && divisor != -1) {
1217    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1218    __ And(at, dividend, Operand(mask));
1219    DeoptimizeIf(ne, instr, at, Operand(zero_reg));
1220  }
1221
1222  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
1223    __ Subu(result, zero_reg, dividend);
1224    return;
1225  }
1226  uint16_t shift = WhichPowerOf2Abs(divisor);
1227  if (shift == 0) {
1228    __ Move(result, dividend);
1229  } else if (shift == 1) {
1230    __ srl(result, dividend, 31);
1231    __ Addu(result, dividend, Operand(result));
1232  } else {
1233    __ sra(result, dividend, 31);
1234    __ srl(result, result, 32 - shift);
1235    __ Addu(result, dividend, Operand(result));
1236  }
1237  if (shift > 0) __ sra(result, result, shift);
1238  if (divisor < 0) __ Subu(result, zero_reg, result);
1239}
1240
1241
1242void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1243  Register dividend = ToRegister(instr->dividend());
1244  int32_t divisor = instr->divisor();
1245  Register result = ToRegister(instr->result());
1246  DCHECK(!dividend.is(result));
1247
1248  if (divisor == 0) {
1249    DeoptimizeIf(al, instr);
1250    return;
1251  }
1252
1253  // Check for (0 / -x) that will produce negative zero.
1254  HDiv* hdiv = instr->hydrogen();
1255  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1256    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
1257  }
1258
1259  __ TruncatingDiv(result, dividend, Abs(divisor));
1260  if (divisor < 0) __ Subu(result, zero_reg, result);
1261
1262  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1263    __ Mul(scratch0(), result, Operand(divisor));
1264    __ Subu(scratch0(), scratch0(), dividend);
1265    DeoptimizeIf(ne, instr, scratch0(), Operand(zero_reg));
1266  }
1267}
1268
1269
1270// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1271void LCodeGen::DoDivI(LDivI* instr) {
1272  HBinaryOperation* hdiv = instr->hydrogen();
1273  Register dividend = ToRegister(instr->dividend());
1274  Register divisor = ToRegister(instr->divisor());
1275  const Register result = ToRegister(instr->result());
1276  Register remainder = ToRegister(instr->temp());
1277
1278  // On MIPS div is asynchronous - it will run in the background while we
1279  // check for special cases.
1280  __ Div(remainder, result, dividend, divisor);
1281
1282  // Check for x / 0.
1283  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1284    DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
1285  }
1286
1287  // Check for (0 / -x) that will produce negative zero.
1288  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1289    Label left_not_zero;
1290    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1291    DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
1292    __ bind(&left_not_zero);
1293  }
1294
1295  // Check for (kMinInt / -1).
1296  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1297      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1298    Label left_not_min_int;
1299    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1300    DeoptimizeIf(eq, instr, divisor, Operand(-1));
1301    __ bind(&left_not_min_int);
1302  }
1303
1304  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1305    DeoptimizeIf(ne, instr, remainder, Operand(zero_reg));
1306  }
1307}
1308
1309
1310void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1311  DoubleRegister addend = ToDoubleRegister(instr->addend());
1312  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1313  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1314
1315  // This is computed in-place.
1316  DCHECK(addend.is(ToDoubleRegister(instr->result())));
1317
1318  __ madd_d(addend, addend, multiplier, multiplicand);
1319}
1320
1321
1322void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1323  Register dividend = ToRegister(instr->dividend());
1324  Register result = ToRegister(instr->result());
1325  int32_t divisor = instr->divisor();
1326  Register scratch = result.is(dividend) ? scratch0() : dividend;
1327  DCHECK(!result.is(dividend) || !scratch.is(dividend));
1328
1329  // If the divisor is 1, return the dividend.
1330  if (divisor == 1) {
1331    __ Move(result, dividend);
1332    return;
1333  }
1334
1335  // If the divisor is positive, things are easy: There can be no deopts and we
1336  // can simply do an arithmetic right shift.
1337  uint16_t shift = WhichPowerOf2Abs(divisor);
1338  if (divisor > 1) {
1339    __ sra(result, dividend, shift);
1340    return;
1341  }
1342
1343  // If the divisor is negative, we have to negate and handle edge cases.
1344
1345  // dividend can be the same register as result so save the value of it
1346  // for checking overflow.
1347  __ Move(scratch, dividend);
1348
1349  __ Subu(result, zero_reg, dividend);
1350  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1351    DeoptimizeIf(eq, instr, result, Operand(zero_reg));
1352  }
1353
1354  // Dividing by -1 is basically negation, unless we overflow.
1355  __ Xor(scratch, scratch, result);
1356  if (divisor == -1) {
1357    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1358      DeoptimizeIf(ge, instr, scratch, Operand(zero_reg));
1359    }
1360    return;
1361  }
1362
1363  // If the negation could not overflow, simply shifting is OK.
1364  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1365    __ sra(result, result, shift);
1366    return;
1367  }
1368
1369  Label no_overflow, done;
1370  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1371  __ li(result, Operand(kMinInt / divisor));
1372  __ Branch(&done);
1373  __ bind(&no_overflow);
1374  __ sra(result, result, shift);
1375  __ bind(&done);
1376}
1377
1378
1379void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1380  Register dividend = ToRegister(instr->dividend());
1381  int32_t divisor = instr->divisor();
1382  Register result = ToRegister(instr->result());
1383  DCHECK(!dividend.is(result));
1384
1385  if (divisor == 0) {
1386    DeoptimizeIf(al, instr);
1387    return;
1388  }
1389
1390  // Check for (0 / -x) that will produce negative zero.
1391  HMathFloorOfDiv* hdiv = instr->hydrogen();
1392  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1393    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
1394  }
1395
1396  // Easy case: We need no dynamic check for the dividend and the flooring
1397  // division is the same as the truncating division.
1398  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1399      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1400    __ TruncatingDiv(result, dividend, Abs(divisor));
1401    if (divisor < 0) __ Subu(result, zero_reg, result);
1402    return;
1403  }
1404
1405  // In the general case we may need to adjust before and after the truncating
1406  // division to get a flooring division.
1407  Register temp = ToRegister(instr->temp());
1408  DCHECK(!temp.is(dividend) && !temp.is(result));
1409  Label needs_adjustment, done;
1410  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1411            dividend, Operand(zero_reg));
1412  __ TruncatingDiv(result, dividend, Abs(divisor));
1413  if (divisor < 0) __ Subu(result, zero_reg, result);
1414  __ jmp(&done);
1415  __ bind(&needs_adjustment);
1416  __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1417  __ TruncatingDiv(result, temp, Abs(divisor));
1418  if (divisor < 0) __ Subu(result, zero_reg, result);
1419  __ Subu(result, result, Operand(1));
1420  __ bind(&done);
1421}
1422
1423
1424// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1425void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1426  HBinaryOperation* hdiv = instr->hydrogen();
1427  Register dividend = ToRegister(instr->dividend());
1428  Register divisor = ToRegister(instr->divisor());
1429  const Register result = ToRegister(instr->result());
1430  Register remainder = scratch0();
1431  // On MIPS div is asynchronous - it will run in the background while we
1432  // check for special cases.
1433  __ Div(remainder, result, dividend, divisor);
1434
1435  // Check for x / 0.
1436  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1437    DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
1438  }
1439
1440  // Check for (0 / -x) that will produce negative zero.
1441  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1442    Label left_not_zero;
1443    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1444    DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
1445    __ bind(&left_not_zero);
1446  }
1447
1448  // Check for (kMinInt / -1).
1449  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1450      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1451    Label left_not_min_int;
1452    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1453    DeoptimizeIf(eq, instr, divisor, Operand(-1));
1454    __ bind(&left_not_min_int);
1455  }
1456
1457  // We performed a truncating division. Correct the result if necessary.
1458  Label done;
1459  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1460  __ Xor(remainder, remainder, Operand(divisor));
1461  __ Branch(&done, ge, remainder, Operand(zero_reg));
1462  __ Subu(result, result, Operand(1));
1463  __ bind(&done);
1464}
1465
1466
1467void LCodeGen::DoMulI(LMulI* instr) {
1468  Register scratch = scratch0();
1469  Register result = ToRegister(instr->result());
1470  // Note that result may alias left.
1471  Register left = ToRegister(instr->left());
1472  LOperand* right_op = instr->right();
1473
1474  bool bailout_on_minus_zero =
1475    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1476  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1477
1478  if (right_op->IsConstantOperand()) {
1479    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1480
1481    if (bailout_on_minus_zero && (constant < 0)) {
1482      // The case of a null constant will be handled separately.
1483      // If constant is negative and left is null, the result should be -0.
1484      DeoptimizeIf(eq, instr, left, Operand(zero_reg));
1485    }
1486
1487    switch (constant) {
1488      case -1:
1489        if (overflow) {
1490          __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1491          DeoptimizeIf(lt, instr, scratch, Operand(zero_reg));
1492        } else {
1493          __ Subu(result, zero_reg, left);
1494        }
1495        break;
1496      case 0:
1497        if (bailout_on_minus_zero) {
1498          // If left is strictly negative and the constant is null, the
1499          // result is -0. Deoptimize if required, otherwise return 0.
1500          DeoptimizeIf(lt, instr, left, Operand(zero_reg));
1501        }
1502        __ mov(result, zero_reg);
1503        break;
1504      case 1:
1505        // Nothing to do.
1506        __ Move(result, left);
1507        break;
1508      default:
1509        // Multiplying by powers of two and powers of two plus or minus
1510        // one can be done faster with shifted operands.
1511        // For other constants we emit standard code.
1512        int32_t mask = constant >> 31;
1513        uint32_t constant_abs = (constant + mask) ^ mask;
1514
1515        if (base::bits::IsPowerOfTwo32(constant_abs)) {
1516          int32_t shift = WhichPowerOf2(constant_abs);
1517          __ sll(result, left, shift);
1518          // Correct the sign of the result if the constant is negative.
1519          if (constant < 0)  __ Subu(result, zero_reg, result);
1520        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1521          int32_t shift = WhichPowerOf2(constant_abs - 1);
1522          __ sll(scratch, left, shift);
1523          __ Addu(result, scratch, left);
1524          // Correct the sign of the result if the constant is negative.
1525          if (constant < 0)  __ Subu(result, zero_reg, result);
1526        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1527          int32_t shift = WhichPowerOf2(constant_abs + 1);
1528          __ sll(scratch, left, shift);
1529          __ Subu(result, scratch, left);
1530          // Correct the sign of the result if the constant is negative.
1531          if (constant < 0)  __ Subu(result, zero_reg, result);
1532        } else {
1533          // Generate standard code.
1534          __ li(at, constant);
1535          __ Mul(result, left, at);
1536        }
1537    }
1538
1539  } else {
1540    DCHECK(right_op->IsRegister());
1541    Register right = ToRegister(right_op);
1542
1543    if (overflow) {
1544      // hi:lo = left * right.
1545      if (instr->hydrogen()->representation().IsSmi()) {
1546        __ SmiUntag(result, left);
1547        __ Mul(scratch, result, result, right);
1548      } else {
1549        __ Mul(scratch, result, left, right);
1550      }
1551      __ sra(at, result, 31);
1552      DeoptimizeIf(ne, instr, scratch, Operand(at));
1553    } else {
1554      if (instr->hydrogen()->representation().IsSmi()) {
1555        __ SmiUntag(result, left);
1556        __ Mul(result, result, right);
1557      } else {
1558        __ Mul(result, left, right);
1559      }
1560    }
1561
1562    if (bailout_on_minus_zero) {
1563      Label done;
1564      __ Xor(at, left, right);
1565      __ Branch(&done, ge, at, Operand(zero_reg));
1566      // Bail out if the result is minus zero.
1567      DeoptimizeIf(eq, instr, result, Operand(zero_reg));
1568      __ bind(&done);
1569    }
1570  }
1571}
1572
1573
1574void LCodeGen::DoBitI(LBitI* instr) {
1575  LOperand* left_op = instr->left();
1576  LOperand* right_op = instr->right();
1577  DCHECK(left_op->IsRegister());
1578  Register left = ToRegister(left_op);
1579  Register result = ToRegister(instr->result());
1580  Operand right(no_reg);
1581
1582  if (right_op->IsStackSlot()) {
1583    right = Operand(EmitLoadRegister(right_op, at));
1584  } else {
1585    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1586    right = ToOperand(right_op);
1587  }
1588
1589  switch (instr->op()) {
1590    case Token::BIT_AND:
1591      __ And(result, left, right);
1592      break;
1593    case Token::BIT_OR:
1594      __ Or(result, left, right);
1595      break;
1596    case Token::BIT_XOR:
1597      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1598        __ Nor(result, zero_reg, left);
1599      } else {
1600        __ Xor(result, left, right);
1601      }
1602      break;
1603    default:
1604      UNREACHABLE();
1605      break;
1606  }
1607}
1608
1609
1610void LCodeGen::DoShiftI(LShiftI* instr) {
1611  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1612  // result may alias either of them.
1613  LOperand* right_op = instr->right();
1614  Register left = ToRegister(instr->left());
1615  Register result = ToRegister(instr->result());
1616  Register scratch = scratch0();
1617
1618  if (right_op->IsRegister()) {
1619    // No need to mask the right operand on MIPS, it is built into the variable
1620    // shift instructions.
1621    switch (instr->op()) {
1622      case Token::ROR:
1623        __ Ror(result, left, Operand(ToRegister(right_op)));
1624        break;
1625      case Token::SAR:
1626        __ srav(result, left, ToRegister(right_op));
1627        break;
1628      case Token::SHR:
1629        __ srlv(result, left, ToRegister(right_op));
1630        if (instr->can_deopt()) {
1631          DeoptimizeIf(lt, instr, result, Operand(zero_reg));
1632        }
1633        break;
1634      case Token::SHL:
1635        __ sllv(result, left, ToRegister(right_op));
1636        break;
1637      default:
1638        UNREACHABLE();
1639        break;
1640    }
1641  } else {
1642    // Mask the right_op operand.
1643    int value = ToInteger32(LConstantOperand::cast(right_op));
1644    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1645    switch (instr->op()) {
1646      case Token::ROR:
1647        if (shift_count != 0) {
1648          __ Ror(result, left, Operand(shift_count));
1649        } else {
1650          __ Move(result, left);
1651        }
1652        break;
1653      case Token::SAR:
1654        if (shift_count != 0) {
1655          __ sra(result, left, shift_count);
1656        } else {
1657          __ Move(result, left);
1658        }
1659        break;
1660      case Token::SHR:
1661        if (shift_count != 0) {
1662          __ srl(result, left, shift_count);
1663        } else {
1664          if (instr->can_deopt()) {
1665            __ And(at, left, Operand(0x80000000));
1666            DeoptimizeIf(ne, instr, at, Operand(zero_reg));
1667          }
1668          __ Move(result, left);
1669        }
1670        break;
1671      case Token::SHL:
1672        if (shift_count != 0) {
1673          if (instr->hydrogen_value()->representation().IsSmi() &&
1674              instr->can_deopt()) {
1675            if (shift_count != 1) {
1676              __ sll(result, left, shift_count - 1);
1677              __ SmiTagCheckOverflow(result, result, scratch);
1678            } else {
1679              __ SmiTagCheckOverflow(result, left, scratch);
1680            }
1681            DeoptimizeIf(lt, instr, scratch, Operand(zero_reg));
1682          } else {
1683            __ sll(result, left, shift_count);
1684          }
1685        } else {
1686          __ Move(result, left);
1687        }
1688        break;
1689      default:
1690        UNREACHABLE();
1691        break;
1692    }
1693  }
1694}
1695
1696
1697void LCodeGen::DoSubI(LSubI* instr) {
1698  LOperand* left = instr->left();
1699  LOperand* right = instr->right();
1700  LOperand* result = instr->result();
1701  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1702
1703  if (!can_overflow) {
1704    if (right->IsStackSlot()) {
1705      Register right_reg = EmitLoadRegister(right, at);
1706      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1707    } else {
1708      DCHECK(right->IsRegister() || right->IsConstantOperand());
1709      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1710    }
1711  } else {  // can_overflow.
1712    Register overflow = scratch0();
1713    Register scratch = scratch1();
1714    if (right->IsStackSlot() || right->IsConstantOperand()) {
1715      Register right_reg = EmitLoadRegister(right, scratch);
1716      __ SubuAndCheckForOverflow(ToRegister(result),
1717                                 ToRegister(left),
1718                                 right_reg,
1719                                 overflow);  // Reg at also used as scratch.
1720    } else {
1721      DCHECK(right->IsRegister());
1722      // Due to overflow check macros not supporting constant operands,
1723      // handling the IsConstantOperand case was moved to prev if clause.
1724      __ SubuAndCheckForOverflow(ToRegister(result),
1725                                 ToRegister(left),
1726                                 ToRegister(right),
1727                                 overflow);  // Reg at also used as scratch.
1728    }
1729    DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
1730  }
1731}
1732
1733
1734void LCodeGen::DoConstantI(LConstantI* instr) {
1735  __ li(ToRegister(instr->result()), Operand(instr->value()));
1736}
1737
1738
1739void LCodeGen::DoConstantS(LConstantS* instr) {
1740  __ li(ToRegister(instr->result()), Operand(instr->value()));
1741}
1742
1743
1744void LCodeGen::DoConstantD(LConstantD* instr) {
1745  DCHECK(instr->result()->IsDoubleRegister());
1746  DoubleRegister result = ToDoubleRegister(instr->result());
1747  double v = instr->value();
1748  __ Move(result, v);
1749}
1750
1751
1752void LCodeGen::DoConstantE(LConstantE* instr) {
1753  __ li(ToRegister(instr->result()), Operand(instr->value()));
1754}
1755
1756
1757void LCodeGen::DoConstantT(LConstantT* instr) {
1758  Handle<Object> object = instr->value(isolate());
1759  AllowDeferredHandleDereference smi_check;
1760  __ li(ToRegister(instr->result()), object);
1761}
1762
1763
1764void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1765  Register result = ToRegister(instr->result());
1766  Register map = ToRegister(instr->value());
1767  __ EnumLength(result, map);
1768}
1769
1770
1771void LCodeGen::DoDateField(LDateField* instr) {
1772  Register object = ToRegister(instr->date());
1773  Register result = ToRegister(instr->result());
1774  Register scratch = ToRegister(instr->temp());
1775  Smi* index = instr->index();
1776  Label runtime, done;
1777  DCHECK(object.is(a0));
1778  DCHECK(result.is(v0));
1779  DCHECK(!scratch.is(scratch0()));
1780  DCHECK(!scratch.is(object));
1781
1782  __ SmiTst(object, at);
1783  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
1784  __ GetObjectType(object, scratch, scratch);
1785  DeoptimizeIf(ne, instr, scratch, Operand(JS_DATE_TYPE));
1786
1787  if (index->value() == 0) {
1788    __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1789  } else {
1790    if (index->value() < JSDate::kFirstUncachedField) {
1791      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1792      __ li(scratch, Operand(stamp));
1793      __ lw(scratch, MemOperand(scratch));
1794      __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1795      __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1796      __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1797                                            kPointerSize * index->value()));
1798      __ jmp(&done);
1799    }
1800    __ bind(&runtime);
1801    __ PrepareCallCFunction(2, scratch);
1802    __ li(a1, Operand(index));
1803    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1804    __ bind(&done);
1805  }
1806}
1807
1808
1809MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1810                                           LOperand* index,
1811                                           String::Encoding encoding) {
1812  if (index->IsConstantOperand()) {
1813    int offset = ToInteger32(LConstantOperand::cast(index));
1814    if (encoding == String::TWO_BYTE_ENCODING) {
1815      offset *= kUC16Size;
1816    }
1817    STATIC_ASSERT(kCharSize == 1);
1818    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1819  }
1820  Register scratch = scratch0();
1821  DCHECK(!scratch.is(string));
1822  DCHECK(!scratch.is(ToRegister(index)));
1823  if (encoding == String::ONE_BYTE_ENCODING) {
1824    __ Addu(scratch, string, ToRegister(index));
1825  } else {
1826    STATIC_ASSERT(kUC16Size == 2);
1827    __ sll(scratch, ToRegister(index), 1);
1828    __ Addu(scratch, string, scratch);
1829  }
1830  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1831}
1832
1833
1834void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1835  String::Encoding encoding = instr->hydrogen()->encoding();
1836  Register string = ToRegister(instr->string());
1837  Register result = ToRegister(instr->result());
1838
1839  if (FLAG_debug_code) {
1840    Register scratch = scratch0();
1841    __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1842    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1843
1844    __ And(scratch, scratch,
1845           Operand(kStringRepresentationMask | kStringEncodingMask));
1846    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1847    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1848    __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1849                                ? one_byte_seq_type : two_byte_seq_type));
1850    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1851  }
1852
1853  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1854  if (encoding == String::ONE_BYTE_ENCODING) {
1855    __ lbu(result, operand);
1856  } else {
1857    __ lhu(result, operand);
1858  }
1859}
1860
1861
1862void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1863  String::Encoding encoding = instr->hydrogen()->encoding();
1864  Register string = ToRegister(instr->string());
1865  Register value = ToRegister(instr->value());
1866
1867  if (FLAG_debug_code) {
1868    Register scratch = scratch0();
1869    Register index = ToRegister(instr->index());
1870    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1871    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1872    int encoding_mask =
1873        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1874        ? one_byte_seq_type : two_byte_seq_type;
1875    __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1876  }
1877
1878  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1879  if (encoding == String::ONE_BYTE_ENCODING) {
1880    __ sb(value, operand);
1881  } else {
1882    __ sh(value, operand);
1883  }
1884}
1885
1886
1887void LCodeGen::DoAddI(LAddI* instr) {
1888  LOperand* left = instr->left();
1889  LOperand* right = instr->right();
1890  LOperand* result = instr->result();
1891  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1892
1893  if (!can_overflow) {
1894    if (right->IsStackSlot()) {
1895      Register right_reg = EmitLoadRegister(right, at);
1896      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1897    } else {
1898      DCHECK(right->IsRegister() || right->IsConstantOperand());
1899      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1900    }
1901  } else {  // can_overflow.
1902    Register overflow = scratch0();
1903    Register scratch = scratch1();
1904    if (right->IsStackSlot() ||
1905        right->IsConstantOperand()) {
1906      Register right_reg = EmitLoadRegister(right, scratch);
1907      __ AdduAndCheckForOverflow(ToRegister(result),
1908                                 ToRegister(left),
1909                                 right_reg,
1910                                 overflow);  // Reg at also used as scratch.
1911    } else {
1912      DCHECK(right->IsRegister());
1913      // Due to overflow check macros not supporting constant operands,
1914      // handling the IsConstantOperand case was moved to prev if clause.
1915      __ AdduAndCheckForOverflow(ToRegister(result),
1916                                 ToRegister(left),
1917                                 ToRegister(right),
1918                                 overflow);  // Reg at also used as scratch.
1919    }
1920    DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
1921  }
1922}
1923
1924
1925void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1926  LOperand* left = instr->left();
1927  LOperand* right = instr->right();
1928  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1929  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1930  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1931    Register left_reg = ToRegister(left);
1932    Register right_reg = EmitLoadRegister(right, scratch0());
1933    Register result_reg = ToRegister(instr->result());
1934    Label return_right, done;
1935    Register scratch = scratch1();
1936    __ Slt(scratch, left_reg, Operand(right_reg));
1937    if (condition == ge) {
1938     __  Movz(result_reg, left_reg, scratch);
1939     __  Movn(result_reg, right_reg, scratch);
1940    } else {
1941     DCHECK(condition == le);
1942     __  Movn(result_reg, left_reg, scratch);
1943     __  Movz(result_reg, right_reg, scratch);
1944    }
1945  } else {
1946    DCHECK(instr->hydrogen()->representation().IsDouble());
1947    FPURegister left_reg = ToDoubleRegister(left);
1948    FPURegister right_reg = ToDoubleRegister(right);
1949    FPURegister result_reg = ToDoubleRegister(instr->result());
1950    Label check_nan_left, check_zero, return_left, return_right, done;
1951    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1952    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1953    __ Branch(&return_right);
1954
1955    __ bind(&check_zero);
1956    // left == right != 0.
1957    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1958    // At this point, both left and right are either 0 or -0.
1959    if (operation == HMathMinMax::kMathMin) {
1960      __ neg_d(left_reg, left_reg);
1961      __ sub_d(result_reg, left_reg, right_reg);
1962      __ neg_d(result_reg, result_reg);
1963    } else {
1964      __ add_d(result_reg, left_reg, right_reg);
1965    }
1966    __ Branch(&done);
1967
1968    __ bind(&check_nan_left);
1969    // left == NaN.
1970    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1971    __ bind(&return_right);
1972    if (!right_reg.is(result_reg)) {
1973      __ mov_d(result_reg, right_reg);
1974    }
1975    __ Branch(&done);
1976
1977    __ bind(&return_left);
1978    if (!left_reg.is(result_reg)) {
1979      __ mov_d(result_reg, left_reg);
1980    }
1981    __ bind(&done);
1982  }
1983}
1984
1985
1986void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1987  DoubleRegister left = ToDoubleRegister(instr->left());
1988  DoubleRegister right = ToDoubleRegister(instr->right());
1989  DoubleRegister result = ToDoubleRegister(instr->result());
1990  switch (instr->op()) {
1991    case Token::ADD:
1992      __ add_d(result, left, right);
1993      break;
1994    case Token::SUB:
1995      __ sub_d(result, left, right);
1996      break;
1997    case Token::MUL:
1998      __ mul_d(result, left, right);
1999      break;
2000    case Token::DIV:
2001      __ div_d(result, left, right);
2002      break;
2003    case Token::MOD: {
2004      // Save a0-a3 on the stack.
2005      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
2006      __ MultiPush(saved_regs);
2007
2008      __ PrepareCallCFunction(0, 2, scratch0());
2009      __ MovToFloatParameters(left, right);
2010      __ CallCFunction(
2011          ExternalReference::mod_two_doubles_operation(isolate()),
2012          0, 2);
2013      // Move the result in the double result register.
2014      __ MovFromFloatResult(result);
2015
2016      // Restore saved register.
2017      __ MultiPop(saved_regs);
2018      break;
2019    }
2020    default:
2021      UNREACHABLE();
2022      break;
2023  }
2024}
2025
2026
2027void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2028  DCHECK(ToRegister(instr->context()).is(cp));
2029  DCHECK(ToRegister(instr->left()).is(a1));
2030  DCHECK(ToRegister(instr->right()).is(a0));
2031  DCHECK(ToRegister(instr->result()).is(v0));
2032
2033  Handle<Code> code =
2034      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2035  CallCode(code, RelocInfo::CODE_TARGET, instr);
2036  // Other arch use a nop here, to signal that there is no inlined
2037  // patchable code. Mips does not need the nop, since our marker
2038  // instruction (andi zero_reg) will never be used in normal code.
2039}
2040
2041
2042template<class InstrType>
2043void LCodeGen::EmitBranch(InstrType instr,
2044                          Condition condition,
2045                          Register src1,
2046                          const Operand& src2) {
2047  int left_block = instr->TrueDestination(chunk_);
2048  int right_block = instr->FalseDestination(chunk_);
2049
2050  int next_block = GetNextEmittedBlock();
2051  if (right_block == left_block || condition == al) {
2052    EmitGoto(left_block);
2053  } else if (left_block == next_block) {
2054    __ Branch(chunk_->GetAssemblyLabel(right_block),
2055              NegateCondition(condition), src1, src2);
2056  } else if (right_block == next_block) {
2057    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2058  } else {
2059    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2060    __ Branch(chunk_->GetAssemblyLabel(right_block));
2061  }
2062}
2063
2064
2065template<class InstrType>
2066void LCodeGen::EmitBranchF(InstrType instr,
2067                           Condition condition,
2068                           FPURegister src1,
2069                           FPURegister src2) {
2070  int right_block = instr->FalseDestination(chunk_);
2071  int left_block = instr->TrueDestination(chunk_);
2072
2073  int next_block = GetNextEmittedBlock();
2074  if (right_block == left_block) {
2075    EmitGoto(left_block);
2076  } else if (left_block == next_block) {
2077    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2078               NegateCondition(condition), src1, src2);
2079  } else if (right_block == next_block) {
2080    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2081               condition, src1, src2);
2082  } else {
2083    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2084               condition, src1, src2);
2085    __ Branch(chunk_->GetAssemblyLabel(right_block));
2086  }
2087}
2088
2089
2090template<class InstrType>
2091void LCodeGen::EmitFalseBranch(InstrType instr,
2092                               Condition condition,
2093                               Register src1,
2094                               const Operand& src2) {
2095  int false_block = instr->FalseDestination(chunk_);
2096  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2097}
2098
2099
2100template<class InstrType>
2101void LCodeGen::EmitFalseBranchF(InstrType instr,
2102                                Condition condition,
2103                                FPURegister src1,
2104                                FPURegister src2) {
2105  int false_block = instr->FalseDestination(chunk_);
2106  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2107             condition, src1, src2);
2108}
2109
2110
2111void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2112  __ stop("LDebugBreak");
2113}
2114
2115
2116void LCodeGen::DoBranch(LBranch* instr) {
2117  Representation r = instr->hydrogen()->value()->representation();
2118  if (r.IsInteger32() || r.IsSmi()) {
2119    DCHECK(!info()->IsStub());
2120    Register reg = ToRegister(instr->value());
2121    EmitBranch(instr, ne, reg, Operand(zero_reg));
2122  } else if (r.IsDouble()) {
2123    DCHECK(!info()->IsStub());
2124    DoubleRegister reg = ToDoubleRegister(instr->value());
2125    // Test the double value. Zero and NaN are false.
2126    EmitBranchF(instr, nue, reg, kDoubleRegZero);
2127  } else {
2128    DCHECK(r.IsTagged());
2129    Register reg = ToRegister(instr->value());
2130    HType type = instr->hydrogen()->value()->type();
2131    if (type.IsBoolean()) {
2132      DCHECK(!info()->IsStub());
2133      __ LoadRoot(at, Heap::kTrueValueRootIndex);
2134      EmitBranch(instr, eq, reg, Operand(at));
2135    } else if (type.IsSmi()) {
2136      DCHECK(!info()->IsStub());
2137      EmitBranch(instr, ne, reg, Operand(zero_reg));
2138    } else if (type.IsJSArray()) {
2139      DCHECK(!info()->IsStub());
2140      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2141    } else if (type.IsHeapNumber()) {
2142      DCHECK(!info()->IsStub());
2143      DoubleRegister dbl_scratch = double_scratch0();
2144      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2145      // Test the double value. Zero and NaN are false.
2146      EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2147    } else if (type.IsString()) {
2148      DCHECK(!info()->IsStub());
2149      __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2150      EmitBranch(instr, ne, at, Operand(zero_reg));
2151    } else {
2152      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2153      // Avoid deopts in the case where we've never executed this path before.
2154      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2155
2156      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2157        // undefined -> false.
2158        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2159        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2160      }
2161      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2162        // Boolean -> its value.
2163        __ LoadRoot(at, Heap::kTrueValueRootIndex);
2164        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2165        __ LoadRoot(at, Heap::kFalseValueRootIndex);
2166        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2167      }
2168      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2169        // 'null' -> false.
2170        __ LoadRoot(at, Heap::kNullValueRootIndex);
2171        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2172      }
2173
2174      if (expected.Contains(ToBooleanStub::SMI)) {
2175        // Smis: 0 -> false, all other -> true.
2176        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2177        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2178      } else if (expected.NeedsMap()) {
2179        // If we need a map later and have a Smi -> deopt.
2180        __ SmiTst(reg, at);
2181        DeoptimizeIf(eq, instr, at, Operand(zero_reg));
2182      }
2183
2184      const Register map = scratch0();
2185      if (expected.NeedsMap()) {
2186        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2187        if (expected.CanBeUndetectable()) {
2188          // Undetectable -> false.
2189          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2190          __ And(at, at, Operand(1 << Map::kIsUndetectable));
2191          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2192        }
2193      }
2194
2195      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2196        // spec object -> true.
2197        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2198        __ Branch(instr->TrueLabel(chunk_),
2199                  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2200      }
2201
2202      if (expected.Contains(ToBooleanStub::STRING)) {
2203        // String value -> false iff empty.
2204        Label not_string;
2205        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2206        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2207        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2208        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2209        __ Branch(instr->FalseLabel(chunk_));
2210        __ bind(&not_string);
2211      }
2212
2213      if (expected.Contains(ToBooleanStub::SYMBOL)) {
2214        // Symbol value -> true.
2215        const Register scratch = scratch1();
2216        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2217        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2218      }
2219
2220      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2221        // heap number -> false iff +0, -0, or NaN.
2222        DoubleRegister dbl_scratch = double_scratch0();
2223        Label not_heap_number;
2224        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2225        __ Branch(&not_heap_number, ne, map, Operand(at));
2226        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2227        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2228                   ne, dbl_scratch, kDoubleRegZero);
2229        // Falls through if dbl_scratch == 0.
2230        __ Branch(instr->FalseLabel(chunk_));
2231        __ bind(&not_heap_number);
2232      }
2233
2234      if (!expected.IsGeneric()) {
2235        // We've seen something for the first time -> deopt.
2236        // This can only happen if we are not generic already.
2237        DeoptimizeIf(al, instr, zero_reg, Operand(zero_reg));
2238      }
2239    }
2240  }
2241}
2242
2243
2244void LCodeGen::EmitGoto(int block) {
2245  if (!IsNextEmittedBlock(block)) {
2246    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2247  }
2248}
2249
2250
2251void LCodeGen::DoGoto(LGoto* instr) {
2252  EmitGoto(instr->block_id());
2253}
2254
2255
2256Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2257  Condition cond = kNoCondition;
2258  switch (op) {
2259    case Token::EQ:
2260    case Token::EQ_STRICT:
2261      cond = eq;
2262      break;
2263    case Token::NE:
2264    case Token::NE_STRICT:
2265      cond = ne;
2266      break;
2267    case Token::LT:
2268      cond = is_unsigned ? lo : lt;
2269      break;
2270    case Token::GT:
2271      cond = is_unsigned ? hi : gt;
2272      break;
2273    case Token::LTE:
2274      cond = is_unsigned ? ls : le;
2275      break;
2276    case Token::GTE:
2277      cond = is_unsigned ? hs : ge;
2278      break;
2279    case Token::IN:
2280    case Token::INSTANCEOF:
2281    default:
2282      UNREACHABLE();
2283  }
2284  return cond;
2285}
2286
2287
2288void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2289  LOperand* left = instr->left();
2290  LOperand* right = instr->right();
2291  bool is_unsigned =
2292      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2293      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2294  Condition cond = TokenToCondition(instr->op(), is_unsigned);
2295
2296  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2297    // We can statically evaluate the comparison.
2298    double left_val = ToDouble(LConstantOperand::cast(left));
2299    double right_val = ToDouble(LConstantOperand::cast(right));
2300    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2301        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2302    EmitGoto(next_block);
2303  } else {
2304    if (instr->is_double()) {
2305      // Compare left and right as doubles and load the
2306      // resulting flags into the normal status register.
2307      FPURegister left_reg = ToDoubleRegister(left);
2308      FPURegister right_reg = ToDoubleRegister(right);
2309
2310      // If a NaN is involved, i.e. the result is unordered,
2311      // jump to false block label.
2312      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2313                 left_reg, right_reg);
2314
2315      EmitBranchF(instr, cond, left_reg, right_reg);
2316    } else {
2317      Register cmp_left;
2318      Operand cmp_right = Operand(0);
2319
2320      if (right->IsConstantOperand()) {
2321        int32_t value = ToInteger32(LConstantOperand::cast(right));
2322        if (instr->hydrogen_value()->representation().IsSmi()) {
2323          cmp_left = ToRegister(left);
2324          cmp_right = Operand(Smi::FromInt(value));
2325        } else {
2326          cmp_left = ToRegister(left);
2327          cmp_right = Operand(value);
2328        }
2329      } else if (left->IsConstantOperand()) {
2330        int32_t value = ToInteger32(LConstantOperand::cast(left));
2331        if (instr->hydrogen_value()->representation().IsSmi()) {
2332           cmp_left = ToRegister(right);
2333           cmp_right = Operand(Smi::FromInt(value));
2334        } else {
2335          cmp_left = ToRegister(right);
2336          cmp_right = Operand(value);
2337        }
2338        // We commuted the operands, so commute the condition.
2339        cond = CommuteCondition(cond);
2340      } else {
2341        cmp_left = ToRegister(left);
2342        cmp_right = Operand(ToRegister(right));
2343      }
2344
2345      EmitBranch(instr, cond, cmp_left, cmp_right);
2346    }
2347  }
2348}
2349
2350
2351void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2352  Register left = ToRegister(instr->left());
2353  Register right = ToRegister(instr->right());
2354
2355  EmitBranch(instr, eq, left, Operand(right));
2356}
2357
2358
2359void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2360  if (instr->hydrogen()->representation().IsTagged()) {
2361    Register input_reg = ToRegister(instr->object());
2362    __ li(at, Operand(factory()->the_hole_value()));
2363    EmitBranch(instr, eq, input_reg, Operand(at));
2364    return;
2365  }
2366
2367  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2368  EmitFalseBranchF(instr, eq, input_reg, input_reg);
2369
2370  Register scratch = scratch0();
2371  __ FmoveHigh(scratch, input_reg);
2372  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2373}
2374
2375
2376void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2377  Representation rep = instr->hydrogen()->value()->representation();
2378  DCHECK(!rep.IsInteger32());
2379  Register scratch = ToRegister(instr->temp());
2380
2381  if (rep.IsDouble()) {
2382    DoubleRegister value = ToDoubleRegister(instr->value());
2383    EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2384    __ FmoveHigh(scratch, value);
2385    __ li(at, 0x80000000);
2386  } else {
2387    Register value = ToRegister(instr->value());
2388    __ CheckMap(value,
2389                scratch,
2390                Heap::kHeapNumberMapRootIndex,
2391                instr->FalseLabel(chunk()),
2392                DO_SMI_CHECK);
2393    __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2394    EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2395    __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2396    __ mov(at, zero_reg);
2397  }
2398  EmitBranch(instr, eq, scratch, Operand(at));
2399}
2400
2401
2402Condition LCodeGen::EmitIsObject(Register input,
2403                                 Register temp1,
2404                                 Register temp2,
2405                                 Label* is_not_object,
2406                                 Label* is_object) {
2407  __ JumpIfSmi(input, is_not_object);
2408
2409  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2410  __ Branch(is_object, eq, input, Operand(temp2));
2411
2412  // Load map.
2413  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2414  // Undetectable objects behave like undefined.
2415  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2416  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2417  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2418
2419  // Load instance type and check that it is in object type range.
2420  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2421  __ Branch(is_not_object,
2422            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2423
2424  return le;
2425}
2426
2427
2428void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2429  Register reg = ToRegister(instr->value());
2430  Register temp1 = ToRegister(instr->temp());
2431  Register temp2 = scratch0();
2432
2433  Condition true_cond =
2434      EmitIsObject(reg, temp1, temp2,
2435          instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2436
2437  EmitBranch(instr, true_cond, temp2,
2438             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2439}
2440
2441
2442Condition LCodeGen::EmitIsString(Register input,
2443                                 Register temp1,
2444                                 Label* is_not_string,
2445                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2446  if (check_needed == INLINE_SMI_CHECK) {
2447    __ JumpIfSmi(input, is_not_string);
2448  }
2449  __ GetObjectType(input, temp1, temp1);
2450
2451  return lt;
2452}
2453
2454
2455void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2456  Register reg = ToRegister(instr->value());
2457  Register temp1 = ToRegister(instr->temp());
2458
2459  SmiCheck check_needed =
2460      instr->hydrogen()->value()->type().IsHeapObject()
2461          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2462  Condition true_cond =
2463      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2464
2465  EmitBranch(instr, true_cond, temp1,
2466             Operand(FIRST_NONSTRING_TYPE));
2467}
2468
2469
2470void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2471  Register input_reg = EmitLoadRegister(instr->value(), at);
2472  __ And(at, input_reg, kSmiTagMask);
2473  EmitBranch(instr, eq, at, Operand(zero_reg));
2474}
2475
2476
2477void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2478  Register input = ToRegister(instr->value());
2479  Register temp = ToRegister(instr->temp());
2480
2481  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2482    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2483  }
2484  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2485  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2486  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2487  EmitBranch(instr, ne, at, Operand(zero_reg));
2488}
2489
2490
2491static Condition ComputeCompareCondition(Token::Value op) {
2492  switch (op) {
2493    case Token::EQ_STRICT:
2494    case Token::EQ:
2495      return eq;
2496    case Token::LT:
2497      return lt;
2498    case Token::GT:
2499      return gt;
2500    case Token::LTE:
2501      return le;
2502    case Token::GTE:
2503      return ge;
2504    default:
2505      UNREACHABLE();
2506      return kNoCondition;
2507  }
2508}
2509
2510
2511void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2512  DCHECK(ToRegister(instr->context()).is(cp));
2513  Token::Value op = instr->op();
2514
2515  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2516  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2517
2518  Condition condition = ComputeCompareCondition(op);
2519
2520  EmitBranch(instr, condition, v0, Operand(zero_reg));
2521}
2522
2523
2524static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2525  InstanceType from = instr->from();
2526  InstanceType to = instr->to();
2527  if (from == FIRST_TYPE) return to;
2528  DCHECK(from == to || to == LAST_TYPE);
2529  return from;
2530}
2531
2532
2533static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2534  InstanceType from = instr->from();
2535  InstanceType to = instr->to();
2536  if (from == to) return eq;
2537  if (to == LAST_TYPE) return hs;
2538  if (from == FIRST_TYPE) return ls;
2539  UNREACHABLE();
2540  return eq;
2541}
2542
2543
2544void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2545  Register scratch = scratch0();
2546  Register input = ToRegister(instr->value());
2547
2548  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2549    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2550  }
2551
2552  __ GetObjectType(input, scratch, scratch);
2553  EmitBranch(instr,
2554             BranchCondition(instr->hydrogen()),
2555             scratch,
2556             Operand(TestType(instr->hydrogen())));
2557}
2558
2559
2560void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2561  Register input = ToRegister(instr->value());
2562  Register result = ToRegister(instr->result());
2563
2564  __ AssertString(input);
2565
2566  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2567  __ IndexFromHash(result, result);
2568}
2569
2570
2571void LCodeGen::DoHasCachedArrayIndexAndBranch(
2572    LHasCachedArrayIndexAndBranch* instr) {
2573  Register input = ToRegister(instr->value());
2574  Register scratch = scratch0();
2575
2576  __ lw(scratch,
2577         FieldMemOperand(input, String::kHashFieldOffset));
2578  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2579  EmitBranch(instr, eq, at, Operand(zero_reg));
2580}
2581
2582
2583// Branches to a label or falls through with the answer in flags.  Trashes
2584// the temp registers, but not the input.
2585void LCodeGen::EmitClassOfTest(Label* is_true,
2586                               Label* is_false,
2587                               Handle<String>class_name,
2588                               Register input,
2589                               Register temp,
2590                               Register temp2) {
2591  DCHECK(!input.is(temp));
2592  DCHECK(!input.is(temp2));
2593  DCHECK(!temp.is(temp2));
2594
2595  __ JumpIfSmi(input, is_false);
2596
2597  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2598    // Assuming the following assertions, we can use the same compares to test
2599    // for both being a function type and being in the object type range.
2600    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2601    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2602                  FIRST_SPEC_OBJECT_TYPE + 1);
2603    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2604                  LAST_SPEC_OBJECT_TYPE - 1);
2605    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2606
2607    __ GetObjectType(input, temp, temp2);
2608    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2609    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2610    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2611  } else {
2612    // Faster code path to avoid two compares: subtract lower bound from the
2613    // actual type and do a signed compare with the width of the type range.
2614    __ GetObjectType(input, temp, temp2);
2615    __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2616    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2617                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2618  }
2619
2620  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2621  // Check if the constructor in the map is a function.
2622  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2623
2624  // Objects with a non-function constructor have class 'Object'.
2625  __ GetObjectType(temp, temp2, temp2);
2626  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2627    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2628  } else {
2629    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2630  }
2631
2632  // temp now contains the constructor function. Grab the
2633  // instance class name from there.
2634  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2635  __ lw(temp, FieldMemOperand(temp,
2636                               SharedFunctionInfo::kInstanceClassNameOffset));
2637  // The class name we are testing against is internalized since it's a literal.
2638  // The name in the constructor is internalized because of the way the context
2639  // is booted.  This routine isn't expected to work for random API-created
2640  // classes and it doesn't have to because you can't access it with natives
2641  // syntax.  Since both sides are internalized it is sufficient to use an
2642  // identity comparison.
2643
2644  // End with the address of this class_name instance in temp register.
2645  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2646}
2647
2648
2649void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2650  Register input = ToRegister(instr->value());
2651  Register temp = scratch0();
2652  Register temp2 = ToRegister(instr->temp());
2653  Handle<String> class_name = instr->hydrogen()->class_name();
2654
2655  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2656                  class_name, input, temp, temp2);
2657
2658  EmitBranch(instr, eq, temp, Operand(class_name));
2659}
2660
2661
2662void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2663  Register reg = ToRegister(instr->value());
2664  Register temp = ToRegister(instr->temp());
2665
2666  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2667  EmitBranch(instr, eq, temp, Operand(instr->map()));
2668}
2669
2670
2671void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2672  DCHECK(ToRegister(instr->context()).is(cp));
2673  Label true_label, done;
2674  DCHECK(ToRegister(instr->left()).is(a0));  // Object is in a0.
2675  DCHECK(ToRegister(instr->right()).is(a1));  // Function is in a1.
2676  Register result = ToRegister(instr->result());
2677  DCHECK(result.is(v0));
2678
2679  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2680  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2681
2682  __ Branch(&true_label, eq, result, Operand(zero_reg));
2683  __ li(result, Operand(factory()->false_value()));
2684  __ Branch(&done);
2685  __ bind(&true_label);
2686  __ li(result, Operand(factory()->true_value()));
2687  __ bind(&done);
2688}
2689
2690
2691void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2692  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2693   public:
2694    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2695                                  LInstanceOfKnownGlobal* instr)
2696        : LDeferredCode(codegen), instr_(instr) { }
2697    virtual void Generate() OVERRIDE {
2698      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2699    }
2700    virtual LInstruction* instr() OVERRIDE { return instr_; }
2701    Label* map_check() { return &map_check_; }
2702
2703   private:
2704    LInstanceOfKnownGlobal* instr_;
2705    Label map_check_;
2706  };
2707
2708  DeferredInstanceOfKnownGlobal* deferred;
2709  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2710
2711  Label done, false_result;
2712  Register object = ToRegister(instr->value());
2713  Register temp = ToRegister(instr->temp());
2714  Register result = ToRegister(instr->result());
2715
2716  DCHECK(object.is(a0));
2717  DCHECK(result.is(v0));
2718
2719  // A Smi is not instance of anything.
2720  __ JumpIfSmi(object, &false_result);
2721
2722  // This is the inlined call site instanceof cache. The two occurences of the
2723  // hole value will be patched to the last map/result pair generated by the
2724  // instanceof stub.
2725  Label cache_miss;
2726  Register map = temp;
2727  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2728
2729  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2730  __ bind(deferred->map_check());  // Label for calculating code patching.
2731  // We use Factory::the_hole_value() on purpose instead of loading from the
2732  // root array to force relocation to be able to later patch with
2733  // the cached map.
2734  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2735  __ li(at, Operand(Handle<Object>(cell)));
2736  __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
2737  __ BranchShort(&cache_miss, ne, map, Operand(at));
2738  // We use Factory::the_hole_value() on purpose instead of loading from the
2739  // root array to force relocation to be able to later patch
2740  // with true or false. The distance from map check has to be constant.
2741  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2742  __ Branch(&done);
2743
2744  // The inlined call site cache did not match. Check null and string before
2745  // calling the deferred code.
2746  __ bind(&cache_miss);
2747  // Null is not instance of anything.
2748  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2749  __ Branch(&false_result, eq, object, Operand(temp));
2750
2751  // String values is not instance of anything.
2752  Condition cc = __ IsObjectStringType(object, temp, temp);
2753  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2754
2755  // Go to the deferred code.
2756  __ Branch(deferred->entry());
2757
2758  __ bind(&false_result);
2759  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2760
2761  // Here result has either true or false. Deferred code also produces true or
2762  // false object.
2763  __ bind(deferred->exit());
2764  __ bind(&done);
2765}
2766
2767
2768void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2769                                               Label* map_check) {
2770  Register result = ToRegister(instr->result());
2771  DCHECK(result.is(v0));
2772
2773  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2774  flags = static_cast<InstanceofStub::Flags>(
2775      flags | InstanceofStub::kArgsInRegisters);
2776  flags = static_cast<InstanceofStub::Flags>(
2777      flags | InstanceofStub::kCallSiteInlineCheck);
2778  flags = static_cast<InstanceofStub::Flags>(
2779      flags | InstanceofStub::kReturnTrueFalseObject);
2780  InstanceofStub stub(isolate(), flags);
2781
2782  PushSafepointRegistersScope scope(this);
2783  LoadContextFromDeferred(instr->context());
2784
2785  // Get the temp register reserved by the instruction. This needs to be t0 as
2786  // its slot of the pushing of safepoint registers is used to communicate the
2787  // offset to the location of the map check.
2788  Register temp = ToRegister(instr->temp());
2789  DCHECK(temp.is(t0));
2790  __ li(InstanceofStub::right(), instr->function());
2791  static const int kAdditionalDelta = 7;
2792  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2793  Label before_push_delta;
2794  __ bind(&before_push_delta);
2795  {
2796    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2797    __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2798    __ StoreToSafepointRegisterSlot(temp, temp);
2799  }
2800  CallCodeGeneric(stub.GetCode(),
2801                  RelocInfo::CODE_TARGET,
2802                  instr,
2803                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2804  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2805  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2806  // Put the result value into the result register slot and
2807  // restore all registers.
2808  __ StoreToSafepointRegisterSlot(result, result);
2809}
2810
2811
2812void LCodeGen::DoCmpT(LCmpT* instr) {
2813  DCHECK(ToRegister(instr->context()).is(cp));
2814  Token::Value op = instr->op();
2815
2816  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2817  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2818  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2819
2820  Condition condition = ComputeCompareCondition(op);
2821  // A minor optimization that relies on LoadRoot always emitting one
2822  // instruction.
2823  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2824  Label done, check;
2825  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2826  __ bind(&check);
2827  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2828  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2829  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2830  __ bind(&done);
2831}
2832
2833
2834void LCodeGen::DoReturn(LReturn* instr) {
2835  if (FLAG_trace && info()->IsOptimizing()) {
2836    // Push the return value on the stack as the parameter.
2837    // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2838    // managed by the register allocator and tearing down the frame, it's
2839    // safe to write to the context register.
2840    __ push(v0);
2841    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2842    __ CallRuntime(Runtime::kTraceExit, 1);
2843  }
2844  if (info()->saves_caller_doubles()) {
2845    RestoreCallerDoubles();
2846  }
2847  int no_frame_start = -1;
2848  if (NeedsEagerFrame()) {
2849    __ mov(sp, fp);
2850    no_frame_start = masm_->pc_offset();
2851    __ Pop(ra, fp);
2852  }
2853  if (instr->has_constant_parameter_count()) {
2854    int parameter_count = ToInteger32(instr->constant_parameter_count());
2855    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2856    if (sp_delta != 0) {
2857      __ Addu(sp, sp, Operand(sp_delta));
2858    }
2859  } else {
2860    Register reg = ToRegister(instr->parameter_count());
2861    // The argument count parameter is a smi
2862    __ SmiUntag(reg);
2863    __ sll(at, reg, kPointerSizeLog2);
2864    __ Addu(sp, sp, at);
2865  }
2866
2867  __ Jump(ra);
2868
2869  if (no_frame_start != -1) {
2870    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2871  }
2872}
2873
2874
2875void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2876  Register result = ToRegister(instr->result());
2877  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2878  __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2879  if (instr->hydrogen()->RequiresHoleCheck()) {
2880    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2881    DeoptimizeIf(eq, instr, result, Operand(at));
2882  }
2883}
2884
2885
2886template <class T>
2887void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2888  DCHECK(FLAG_vector_ics);
2889  Register vector = ToRegister(instr->temp_vector());
2890  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
2891  __ li(vector, instr->hydrogen()->feedback_vector());
2892  // No need to allocate this register.
2893  DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
2894  __ li(VectorLoadICDescriptor::SlotRegister(),
2895        Operand(Smi::FromInt(instr->hydrogen()->slot())));
2896}
2897
2898
2899void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2900  DCHECK(ToRegister(instr->context()).is(cp));
2901  DCHECK(ToRegister(instr->global_object())
2902             .is(LoadDescriptor::ReceiverRegister()));
2903  DCHECK(ToRegister(instr->result()).is(v0));
2904
2905  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2906  if (FLAG_vector_ics) {
2907    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2908  }
2909  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2910  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
2911  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2912}
2913
2914
2915void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2916  Register value = ToRegister(instr->value());
2917  Register cell = scratch0();
2918
2919  // Load the cell.
2920  __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2921
2922  // If the cell we are storing to contains the hole it could have
2923  // been deleted from the property dictionary. In that case, we need
2924  // to update the property details in the property dictionary to mark
2925  // it as no longer deleted.
2926  if (instr->hydrogen()->RequiresHoleCheck()) {
2927    // We use a temp to check the payload.
2928    Register payload = ToRegister(instr->temp());
2929    __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
2930    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2931    DeoptimizeIf(eq, instr, payload, Operand(at));
2932  }
2933
2934  // Store the value.
2935  __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
2936  // Cells are always rescanned, so no write barrier here.
2937}
2938
2939
2940
2941void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2942  Register context = ToRegister(instr->context());
2943  Register result = ToRegister(instr->result());
2944
2945  __ lw(result, ContextOperand(context, instr->slot_index()));
2946  if (instr->hydrogen()->RequiresHoleCheck()) {
2947    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2948
2949    if (instr->hydrogen()->DeoptimizesOnHole()) {
2950      DeoptimizeIf(eq, instr, result, Operand(at));
2951    } else {
2952      Label is_not_hole;
2953      __ Branch(&is_not_hole, ne, result, Operand(at));
2954      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2955      __ bind(&is_not_hole);
2956    }
2957  }
2958}
2959
2960
2961void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2962  Register context = ToRegister(instr->context());
2963  Register value = ToRegister(instr->value());
2964  Register scratch = scratch0();
2965  MemOperand target = ContextOperand(context, instr->slot_index());
2966
2967  Label skip_assignment;
2968
2969  if (instr->hydrogen()->RequiresHoleCheck()) {
2970    __ lw(scratch, target);
2971    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2972
2973    if (instr->hydrogen()->DeoptimizesOnHole()) {
2974      DeoptimizeIf(eq, instr, scratch, Operand(at));
2975    } else {
2976      __ Branch(&skip_assignment, ne, scratch, Operand(at));
2977    }
2978  }
2979
2980  __ sw(value, target);
2981  if (instr->hydrogen()->NeedsWriteBarrier()) {
2982    SmiCheck check_needed =
2983        instr->hydrogen()->value()->type().IsHeapObject()
2984            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2985    __ RecordWriteContextSlot(context,
2986                              target.offset(),
2987                              value,
2988                              scratch0(),
2989                              GetRAState(),
2990                              kSaveFPRegs,
2991                              EMIT_REMEMBERED_SET,
2992                              check_needed);
2993  }
2994
2995  __ bind(&skip_assignment);
2996}
2997
2998
2999void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3000  HObjectAccess access = instr->hydrogen()->access();
3001  int offset = access.offset();
3002  Register object = ToRegister(instr->object());
3003
3004  if (access.IsExternalMemory()) {
3005    Register result = ToRegister(instr->result());
3006    MemOperand operand = MemOperand(object, offset);
3007    __ Load(result, operand, access.representation());
3008    return;
3009  }
3010
3011  if (instr->hydrogen()->representation().IsDouble()) {
3012    DoubleRegister result = ToDoubleRegister(instr->result());
3013    __ ldc1(result, FieldMemOperand(object, offset));
3014    return;
3015  }
3016
3017  Register result = ToRegister(instr->result());
3018  if (!access.IsInobject()) {
3019    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3020    object = result;
3021  }
3022  MemOperand operand = FieldMemOperand(object, offset);
3023  __ Load(result, operand, access.representation());
3024}
3025
3026
3027void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3028  DCHECK(ToRegister(instr->context()).is(cp));
3029  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3030  DCHECK(ToRegister(instr->result()).is(v0));
3031
3032  // Name is always in a2.
3033  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
3034  if (FLAG_vector_ics) {
3035    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3036  }
3037  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
3038  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3039}
3040
3041
3042void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3043  Register scratch = scratch0();
3044  Register function = ToRegister(instr->function());
3045  Register result = ToRegister(instr->result());
3046
3047  // Get the prototype or initial map from the function.
3048  __ lw(result,
3049         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3050
3051  // Check that the function has a prototype or an initial map.
3052  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3053  DeoptimizeIf(eq, instr, result, Operand(at));
3054
3055  // If the function does not have an initial map, we're done.
3056  Label done;
3057  __ GetObjectType(result, scratch, scratch);
3058  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
3059
3060  // Get the prototype from the initial map.
3061  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3062
3063  // All done.
3064  __ bind(&done);
3065}
3066
3067
3068void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3069  Register result = ToRegister(instr->result());
3070  __ LoadRoot(result, instr->index());
3071}
3072
3073
3074void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3075  Register arguments = ToRegister(instr->arguments());
3076  Register result = ToRegister(instr->result());
3077  // There are two words between the frame pointer and the last argument.
3078  // Subtracting from length accounts for one of them add one more.
3079  if (instr->length()->IsConstantOperand()) {
3080    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3081    if (instr->index()->IsConstantOperand()) {
3082      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3083      int index = (const_length - const_index) + 1;
3084      __ lw(result, MemOperand(arguments, index * kPointerSize));
3085    } else {
3086      Register index = ToRegister(instr->index());
3087      __ li(at, Operand(const_length + 1));
3088      __ Subu(result, at, index);
3089      __ sll(at, result, kPointerSizeLog2);
3090      __ Addu(at, arguments, at);
3091      __ lw(result, MemOperand(at));
3092    }
3093  } else if (instr->index()->IsConstantOperand()) {
3094    Register length = ToRegister(instr->length());
3095    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3096    int loc = const_index - 1;
3097    if (loc != 0) {
3098      __ Subu(result, length, Operand(loc));
3099      __ sll(at, result, kPointerSizeLog2);
3100      __ Addu(at, arguments, at);
3101      __ lw(result, MemOperand(at));
3102    } else {
3103      __ sll(at, length, kPointerSizeLog2);
3104      __ Addu(at, arguments, at);
3105      __ lw(result, MemOperand(at));
3106    }
3107  } else {
3108    Register length = ToRegister(instr->length());
3109    Register index = ToRegister(instr->index());
3110    __ Subu(result, length, index);
3111    __ Addu(result, result, 1);
3112    __ sll(at, result, kPointerSizeLog2);
3113    __ Addu(at, arguments, at);
3114    __ lw(result, MemOperand(at));
3115  }
3116}
3117
3118
3119void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3120  Register external_pointer = ToRegister(instr->elements());
3121  Register key = no_reg;
3122  ElementsKind elements_kind = instr->elements_kind();
3123  bool key_is_constant = instr->key()->IsConstantOperand();
3124  int constant_key = 0;
3125  if (key_is_constant) {
3126    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3127    if (constant_key & 0xF0000000) {
3128      Abort(kArrayIndexConstantValueTooBig);
3129    }
3130  } else {
3131    key = ToRegister(instr->key());
3132  }
3133  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3134  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3135      ? (element_size_shift - kSmiTagSize) : element_size_shift;
3136  int base_offset = instr->base_offset();
3137
3138  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3139      elements_kind == FLOAT32_ELEMENTS ||
3140      elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3141      elements_kind == FLOAT64_ELEMENTS) {
3142    int base_offset = instr->base_offset();
3143    FPURegister result = ToDoubleRegister(instr->result());
3144    if (key_is_constant) {
3145      __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3146    } else {
3147      __ sll(scratch0(), key, shift_size);
3148      __ Addu(scratch0(), scratch0(), external_pointer);
3149    }
3150    if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3151        elements_kind == FLOAT32_ELEMENTS) {
3152      __ lwc1(result, MemOperand(scratch0(), base_offset));
3153      __ cvt_d_s(result, result);
3154    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3155      __ ldc1(result, MemOperand(scratch0(), base_offset));
3156    }
3157  } else {
3158    Register result = ToRegister(instr->result());
3159    MemOperand mem_operand = PrepareKeyedOperand(
3160        key, external_pointer, key_is_constant, constant_key,
3161        element_size_shift, shift_size, base_offset);
3162    switch (elements_kind) {
3163      case EXTERNAL_INT8_ELEMENTS:
3164      case INT8_ELEMENTS:
3165        __ lb(result, mem_operand);
3166        break;
3167      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3168      case EXTERNAL_UINT8_ELEMENTS:
3169      case UINT8_ELEMENTS:
3170      case UINT8_CLAMPED_ELEMENTS:
3171        __ lbu(result, mem_operand);
3172        break;
3173      case EXTERNAL_INT16_ELEMENTS:
3174      case INT16_ELEMENTS:
3175        __ lh(result, mem_operand);
3176        break;
3177      case EXTERNAL_UINT16_ELEMENTS:
3178      case UINT16_ELEMENTS:
3179        __ lhu(result, mem_operand);
3180        break;
3181      case EXTERNAL_INT32_ELEMENTS:
3182      case INT32_ELEMENTS:
3183        __ lw(result, mem_operand);
3184        break;
3185      case EXTERNAL_UINT32_ELEMENTS:
3186      case UINT32_ELEMENTS:
3187        __ lw(result, mem_operand);
3188        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3189          DeoptimizeIf(Ugreater_equal, instr, result, Operand(0x80000000));
3190        }
3191        break;
3192      case FLOAT32_ELEMENTS:
3193      case FLOAT64_ELEMENTS:
3194      case EXTERNAL_FLOAT32_ELEMENTS:
3195      case EXTERNAL_FLOAT64_ELEMENTS:
3196      case FAST_DOUBLE_ELEMENTS:
3197      case FAST_ELEMENTS:
3198      case FAST_SMI_ELEMENTS:
3199      case FAST_HOLEY_DOUBLE_ELEMENTS:
3200      case FAST_HOLEY_ELEMENTS:
3201      case FAST_HOLEY_SMI_ELEMENTS:
3202      case DICTIONARY_ELEMENTS:
3203      case SLOPPY_ARGUMENTS_ELEMENTS:
3204        UNREACHABLE();
3205        break;
3206    }
3207  }
3208}
3209
3210
3211void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3212  Register elements = ToRegister(instr->elements());
3213  bool key_is_constant = instr->key()->IsConstantOperand();
3214  Register key = no_reg;
3215  DoubleRegister result = ToDoubleRegister(instr->result());
3216  Register scratch = scratch0();
3217
3218  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3219
3220  int base_offset = instr->base_offset();
3221  if (key_is_constant) {
3222    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3223    if (constant_key & 0xF0000000) {
3224      Abort(kArrayIndexConstantValueTooBig);
3225    }
3226    base_offset += constant_key * kDoubleSize;
3227  }
3228  __ Addu(scratch, elements, Operand(base_offset));
3229
3230  if (!key_is_constant) {
3231    key = ToRegister(instr->key());
3232    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3233        ? (element_size_shift - kSmiTagSize) : element_size_shift;
3234    __ sll(at, key, shift_size);
3235    __ Addu(scratch, scratch, at);
3236  }
3237
3238  __ ldc1(result, MemOperand(scratch));
3239
3240  if (instr->hydrogen()->RequiresHoleCheck()) {
3241    __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
3242    DeoptimizeIf(eq, instr, scratch, Operand(kHoleNanUpper32));
3243  }
3244}
3245
3246
3247void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3248  Register elements = ToRegister(instr->elements());
3249  Register result = ToRegister(instr->result());
3250  Register scratch = scratch0();
3251  Register store_base = scratch;
3252  int offset = instr->base_offset();
3253
3254  if (instr->key()->IsConstantOperand()) {
3255    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3256    offset += ToInteger32(const_operand) * kPointerSize;
3257    store_base = elements;
3258  } else {
3259    Register key = ToRegister(instr->key());
3260    // Even though the HLoadKeyed instruction forces the input
3261    // representation for the key to be an integer, the input gets replaced
3262    // during bound check elimination with the index argument to the bounds
3263    // check, which can be tagged, so that case must be handled here, too.
3264    if (instr->hydrogen()->key()->representation().IsSmi()) {
3265      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3266      __ addu(scratch, elements, scratch);
3267    } else {
3268      __ sll(scratch, key, kPointerSizeLog2);
3269      __ addu(scratch, elements, scratch);
3270    }
3271  }
3272  __ lw(result, MemOperand(store_base, offset));
3273
3274  // Check for the hole value.
3275  if (instr->hydrogen()->RequiresHoleCheck()) {
3276    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3277      __ SmiTst(result, scratch);
3278      DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
3279    } else {
3280      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3281      DeoptimizeIf(eq, instr, result, Operand(scratch));
3282    }
3283  }
3284}
3285
3286
3287void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3288  if (instr->is_typed_elements()) {
3289    DoLoadKeyedExternalArray(instr);
3290  } else if (instr->hydrogen()->representation().IsDouble()) {
3291    DoLoadKeyedFixedDoubleArray(instr);
3292  } else {
3293    DoLoadKeyedFixedArray(instr);
3294  }
3295}
3296
3297
3298MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3299                                         Register base,
3300                                         bool key_is_constant,
3301                                         int constant_key,
3302                                         int element_size,
3303                                         int shift_size,
3304                                         int base_offset) {
3305  if (key_is_constant) {
3306    return MemOperand(base, (constant_key << element_size) + base_offset);
3307  }
3308
3309  if (base_offset == 0) {
3310    if (shift_size >= 0) {
3311      __ sll(scratch0(), key, shift_size);
3312      __ Addu(scratch0(), base, scratch0());
3313      return MemOperand(scratch0());
3314    } else {
3315      DCHECK_EQ(-1, shift_size);
3316      __ srl(scratch0(), key, 1);
3317      __ Addu(scratch0(), base, scratch0());
3318      return MemOperand(scratch0());
3319    }
3320  }
3321
3322  if (shift_size >= 0) {
3323    __ sll(scratch0(), key, shift_size);
3324    __ Addu(scratch0(), base, scratch0());
3325    return MemOperand(scratch0(), base_offset);
3326  } else {
3327    DCHECK_EQ(-1, shift_size);
3328    __ sra(scratch0(), key, 1);
3329    __ Addu(scratch0(), base, scratch0());
3330    return MemOperand(scratch0(), base_offset);
3331  }
3332}
3333
3334
3335void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3336  DCHECK(ToRegister(instr->context()).is(cp));
3337  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3338  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3339
3340  if (FLAG_vector_ics) {
3341    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3342  }
3343
3344  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
3345  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3346}
3347
3348
3349void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3350  Register scratch = scratch0();
3351  Register temp = scratch1();
3352  Register result = ToRegister(instr->result());
3353
3354  if (instr->hydrogen()->from_inlined()) {
3355    __ Subu(result, sp, 2 * kPointerSize);
3356  } else {
3357    // Check if the calling frame is an arguments adaptor frame.
3358    Label done, adapted;
3359    __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3360    __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3361    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3362
3363    // Result is the frame pointer for the frame if not adapted and for the real
3364    // frame below the adaptor frame if adapted.
3365    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
3366    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
3367  }
3368}
3369
3370
3371void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3372  Register elem = ToRegister(instr->elements());
3373  Register result = ToRegister(instr->result());
3374
3375  Label done;
3376
3377  // If no arguments adaptor frame the number of arguments is fixed.
3378  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3379  __ Branch(&done, eq, fp, Operand(elem));
3380
3381  // Arguments adaptor frame present. Get argument length from there.
3382  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3383  __ lw(result,
3384        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3385  __ SmiUntag(result);
3386
3387  // Argument length is in result register.
3388  __ bind(&done);
3389}
3390
3391
3392void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3393  Register receiver = ToRegister(instr->receiver());
3394  Register function = ToRegister(instr->function());
3395  Register result = ToRegister(instr->result());
3396  Register scratch = scratch0();
3397
3398  // If the receiver is null or undefined, we have to pass the global
3399  // object as a receiver to normal functions. Values have to be
3400  // passed unchanged to builtins and strict-mode functions.
3401  Label global_object, result_in_receiver;
3402
3403  if (!instr->hydrogen()->known_function()) {
3404    // Do not transform the receiver to object for strict mode
3405    // functions.
3406    __ lw(scratch,
3407           FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3408    __ lw(scratch,
3409           FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3410
3411    // Do not transform the receiver to object for builtins.
3412    int32_t strict_mode_function_mask =
3413        1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3414    int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3415    __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3416    __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
3417  }
3418
3419  // Normal function. Replace undefined or null with global receiver.
3420  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3421  __ Branch(&global_object, eq, receiver, Operand(scratch));
3422  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3423  __ Branch(&global_object, eq, receiver, Operand(scratch));
3424
3425  // Deoptimize if the receiver is not a JS object.
3426  __ SmiTst(receiver, scratch);
3427  DeoptimizeIf(eq, instr, scratch, Operand(zero_reg));
3428
3429  __ GetObjectType(receiver, scratch, scratch);
3430  DeoptimizeIf(lt, instr, scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3431
3432  __ Branch(&result_in_receiver);
3433  __ bind(&global_object);
3434  __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3435  __ lw(result,
3436        ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3437  __ lw(result,
3438        FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3439
3440  if (result.is(receiver)) {
3441    __ bind(&result_in_receiver);
3442  } else {
3443    Label result_ok;
3444    __ Branch(&result_ok);
3445    __ bind(&result_in_receiver);
3446    __ mov(result, receiver);
3447    __ bind(&result_ok);
3448  }
3449}
3450
3451
3452void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3453  Register receiver = ToRegister(instr->receiver());
3454  Register function = ToRegister(instr->function());
3455  Register length = ToRegister(instr->length());
3456  Register elements = ToRegister(instr->elements());
3457  Register scratch = scratch0();
3458  DCHECK(receiver.is(a0));  // Used for parameter count.
3459  DCHECK(function.is(a1));  // Required by InvokeFunction.
3460  DCHECK(ToRegister(instr->result()).is(v0));
3461
3462  // Copy the arguments to this function possibly from the
3463  // adaptor frame below it.
3464  const uint32_t kArgumentsLimit = 1 * KB;
3465  DeoptimizeIf(hi, instr, length, Operand(kArgumentsLimit));
3466
3467  // Push the receiver and use the register to keep the original
3468  // number of arguments.
3469  __ push(receiver);
3470  __ Move(receiver, length);
3471  // The arguments are at a one pointer size offset from elements.
3472  __ Addu(elements, elements, Operand(1 * kPointerSize));
3473
3474  // Loop through the arguments pushing them onto the execution
3475  // stack.
3476  Label invoke, loop;
3477  // length is a small non-negative integer, due to the test above.
3478  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3479  __ sll(scratch, length, 2);
3480  __ bind(&loop);
3481  __ Addu(scratch, elements, scratch);
3482  __ lw(scratch, MemOperand(scratch));
3483  __ push(scratch);
3484  __ Subu(length, length, Operand(1));
3485  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3486  __ sll(scratch, length, 2);
3487
3488  __ bind(&invoke);
3489  DCHECK(instr->HasPointerMap());
3490  LPointerMap* pointers = instr->pointer_map();
3491  SafepointGenerator safepoint_generator(
3492      this, pointers, Safepoint::kLazyDeopt);
3493  // The number of arguments is stored in receiver which is a0, as expected
3494  // by InvokeFunction.
3495  ParameterCount actual(receiver);
3496  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3497}
3498
3499
3500void LCodeGen::DoPushArgument(LPushArgument* instr) {
3501  LOperand* argument = instr->value();
3502  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3503    Abort(kDoPushArgumentNotImplementedForDoubleType);
3504  } else {
3505    Register argument_reg = EmitLoadRegister(argument, at);
3506    __ push(argument_reg);
3507  }
3508}
3509
3510
3511void LCodeGen::DoDrop(LDrop* instr) {
3512  __ Drop(instr->count());
3513}
3514
3515
3516void LCodeGen::DoThisFunction(LThisFunction* instr) {
3517  Register result = ToRegister(instr->result());
3518  __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3519}
3520
3521
3522void LCodeGen::DoContext(LContext* instr) {
3523  // If there is a non-return use, the context must be moved to a register.
3524  Register result = ToRegister(instr->result());
3525  if (info()->IsOptimizing()) {
3526    __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3527  } else {
3528    // If there is no frame, the context must be in cp.
3529    DCHECK(result.is(cp));
3530  }
3531}
3532
3533
3534void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3535  DCHECK(ToRegister(instr->context()).is(cp));
3536  __ li(scratch0(), instr->hydrogen()->pairs());
3537  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3538  // The context is the first argument.
3539  __ Push(cp, scratch0(), scratch1());
3540  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3541}
3542
3543
3544void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3545                                 int formal_parameter_count,
3546                                 int arity,
3547                                 LInstruction* instr,
3548                                 A1State a1_state) {
3549  bool dont_adapt_arguments =
3550      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3551  bool can_invoke_directly =
3552      dont_adapt_arguments || formal_parameter_count == arity;
3553
3554  LPointerMap* pointers = instr->pointer_map();
3555
3556  if (can_invoke_directly) {
3557    if (a1_state == A1_UNINITIALIZED) {
3558      __ li(a1, function);
3559    }
3560
3561    // Change context.
3562    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3563
3564    // Set r0 to arguments count if adaption is not needed. Assumes that r0
3565    // is available to write to at this point.
3566    if (dont_adapt_arguments) {
3567      __ li(a0, Operand(arity));
3568    }
3569
3570    // Invoke function.
3571    __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3572    __ Call(at);
3573
3574    // Set up deoptimization.
3575    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3576  } else {
3577    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3578    ParameterCount count(arity);
3579    ParameterCount expected(formal_parameter_count);
3580    __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3581  }
3582}
3583
3584
3585void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3586  DCHECK(instr->context() != NULL);
3587  DCHECK(ToRegister(instr->context()).is(cp));
3588  Register input = ToRegister(instr->value());
3589  Register result = ToRegister(instr->result());
3590  Register scratch = scratch0();
3591
3592  // Deoptimize if not a heap number.
3593  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3594  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3595  DeoptimizeIf(ne, instr, scratch, Operand(at));
3596
3597  Label done;
3598  Register exponent = scratch0();
3599  scratch = no_reg;
3600  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3601  // Check the sign of the argument. If the argument is positive, just
3602  // return it.
3603  __ Move(result, input);
3604  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3605  __ Branch(&done, eq, at, Operand(zero_reg));
3606
3607  // Input is negative. Reverse its sign.
3608  // Preserve the value of all registers.
3609  {
3610    PushSafepointRegistersScope scope(this);
3611
3612    // Registers were saved at the safepoint, so we can use
3613    // many scratch registers.
3614    Register tmp1 = input.is(a1) ? a0 : a1;
3615    Register tmp2 = input.is(a2) ? a0 : a2;
3616    Register tmp3 = input.is(a3) ? a0 : a3;
3617    Register tmp4 = input.is(t0) ? a0 : t0;
3618
3619    // exponent: floating point exponent value.
3620
3621    Label allocated, slow;
3622    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3623    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3624    __ Branch(&allocated);
3625
3626    // Slow case: Call the runtime system to do the number allocation.
3627    __ bind(&slow);
3628
3629    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3630                            instr->context());
3631    // Set the pointer to the new heap number in tmp.
3632    if (!tmp1.is(v0))
3633      __ mov(tmp1, v0);
3634    // Restore input_reg after call to runtime.
3635    __ LoadFromSafepointRegisterSlot(input, input);
3636    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3637
3638    __ bind(&allocated);
3639    // exponent: floating point exponent value.
3640    // tmp1: allocated heap number.
3641    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3642    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3643    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3644    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3645
3646    __ StoreToSafepointRegisterSlot(tmp1, result);
3647  }
3648
3649  __ bind(&done);
3650}
3651
3652
3653void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3654  Register input = ToRegister(instr->value());
3655  Register result = ToRegister(instr->result());
3656  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3657  Label done;
3658  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3659  __ mov(result, input);
3660  __ subu(result, zero_reg, input);
3661  // Overflow if result is still negative, i.e. 0x80000000.
3662  DeoptimizeIf(lt, instr, result, Operand(zero_reg));
3663  __ bind(&done);
3664}
3665
3666
3667void LCodeGen::DoMathAbs(LMathAbs* instr) {
3668  // Class for deferred case.
3669  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3670   public:
3671    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3672        : LDeferredCode(codegen), instr_(instr) { }
3673    virtual void Generate() OVERRIDE {
3674      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3675    }
3676    virtual LInstruction* instr() OVERRIDE { return instr_; }
3677   private:
3678    LMathAbs* instr_;
3679  };
3680
3681  Representation r = instr->hydrogen()->value()->representation();
3682  if (r.IsDouble()) {
3683    FPURegister input = ToDoubleRegister(instr->value());
3684    FPURegister result = ToDoubleRegister(instr->result());
3685    __ abs_d(result, input);
3686  } else if (r.IsSmiOrInteger32()) {
3687    EmitIntegerMathAbs(instr);
3688  } else {
3689    // Representation is tagged.
3690    DeferredMathAbsTaggedHeapNumber* deferred =
3691        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3692    Register input = ToRegister(instr->value());
3693    // Smi check.
3694    __ JumpIfNotSmi(input, deferred->entry());
3695    // If smi, handle it directly.
3696    EmitIntegerMathAbs(instr);
3697    __ bind(deferred->exit());
3698  }
3699}
3700
3701
3702void LCodeGen::DoMathFloor(LMathFloor* instr) {
3703  DoubleRegister input = ToDoubleRegister(instr->value());
3704  Register result = ToRegister(instr->result());
3705  Register scratch1 = scratch0();
3706  Register except_flag = ToRegister(instr->temp());
3707
3708  __ EmitFPUTruncate(kRoundToMinusInf,
3709                     result,
3710                     input,
3711                     scratch1,
3712                     double_scratch0(),
3713                     except_flag);
3714
3715  // Deopt if the operation did not succeed.
3716  DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
3717
3718  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3719    // Test for -0.
3720    Label done;
3721    __ Branch(&done, ne, result, Operand(zero_reg));
3722    __ Mfhc1(scratch1, input);
3723    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3724    DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
3725    __ bind(&done);
3726  }
3727}
3728
3729
3730void LCodeGen::DoMathRound(LMathRound* instr) {
3731  DoubleRegister input = ToDoubleRegister(instr->value());
3732  Register result = ToRegister(instr->result());
3733  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3734  Register scratch = scratch0();
3735  Label done, check_sign_on_zero;
3736
3737  // Extract exponent bits.
3738  __ Mfhc1(result, input);
3739  __ Ext(scratch,
3740         result,
3741         HeapNumber::kExponentShift,
3742         HeapNumber::kExponentBits);
3743
3744  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3745  Label skip1;
3746  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3747  __ mov(result, zero_reg);
3748  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3749    __ Branch(&check_sign_on_zero);
3750  } else {
3751    __ Branch(&done);
3752  }
3753  __ bind(&skip1);
3754
3755  // The following conversion will not work with numbers
3756  // outside of ]-2^32, 2^32[.
3757  DeoptimizeIf(ge, instr, scratch, Operand(HeapNumber::kExponentBias + 32));
3758
3759  // Save the original sign for later comparison.
3760  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3761
3762  __ Move(double_scratch0(), 0.5);
3763  __ add_d(double_scratch0(), input, double_scratch0());
3764
3765  // Check sign of the result: if the sign changed, the input
3766  // value was in ]0.5, 0[ and the result should be -0.
3767  __ Mfhc1(result, double_scratch0());
3768  __ Xor(result, result, Operand(scratch));
3769  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3770    // ARM uses 'mi' here, which is 'lt'
3771    DeoptimizeIf(lt, instr, result, Operand(zero_reg));
3772  } else {
3773    Label skip2;
3774    // ARM uses 'mi' here, which is 'lt'
3775    // Negating it results in 'ge'
3776    __ Branch(&skip2, ge, result, Operand(zero_reg));
3777    __ mov(result, zero_reg);
3778    __ Branch(&done);
3779    __ bind(&skip2);
3780  }
3781
3782  Register except_flag = scratch;
3783  __ EmitFPUTruncate(kRoundToMinusInf,
3784                     result,
3785                     double_scratch0(),
3786                     at,
3787                     double_scratch1,
3788                     except_flag);
3789
3790  DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
3791
3792  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3793    // Test for -0.
3794    __ Branch(&done, ne, result, Operand(zero_reg));
3795    __ bind(&check_sign_on_zero);
3796    __ Mfhc1(scratch, input);
3797    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3798    DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
3799  }
3800  __ bind(&done);
3801}
3802
3803
3804void LCodeGen::DoMathFround(LMathFround* instr) {
3805  DoubleRegister input = ToDoubleRegister(instr->value());
3806  DoubleRegister result = ToDoubleRegister(instr->result());
3807  __ cvt_s_d(result.low(), input);
3808  __ cvt_d_s(result, result.low());
3809}
3810
3811
3812void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3813  DoubleRegister input = ToDoubleRegister(instr->value());
3814  DoubleRegister result = ToDoubleRegister(instr->result());
3815  __ sqrt_d(result, input);
3816}
3817
3818
3819void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3820  DoubleRegister input = ToDoubleRegister(instr->value());
3821  DoubleRegister result = ToDoubleRegister(instr->result());
3822  DoubleRegister temp = ToDoubleRegister(instr->temp());
3823
3824  DCHECK(!input.is(result));
3825
3826  // Note that according to ECMA-262 15.8.2.13:
3827  // Math.pow(-Infinity, 0.5) == Infinity
3828  // Math.sqrt(-Infinity) == NaN
3829  Label done;
3830  __ Move(temp, -V8_INFINITY);
3831  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3832  // Set up Infinity in the delay slot.
3833  // result is overwritten if the branch is not taken.
3834  __ neg_d(result, temp);
3835
3836  // Add +0 to convert -0 to +0.
3837  __ add_d(result, input, kDoubleRegZero);
3838  __ sqrt_d(result, result);
3839  __ bind(&done);
3840}
3841
3842
3843void LCodeGen::DoPower(LPower* instr) {
3844  Representation exponent_type = instr->hydrogen()->right()->representation();
3845  // Having marked this as a call, we can use any registers.
3846  // Just make sure that the input/output registers are the expected ones.
3847  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3848  DCHECK(!instr->right()->IsDoubleRegister() ||
3849         ToDoubleRegister(instr->right()).is(f4));
3850  DCHECK(!instr->right()->IsRegister() ||
3851         ToRegister(instr->right()).is(tagged_exponent));
3852  DCHECK(ToDoubleRegister(instr->left()).is(f2));
3853  DCHECK(ToDoubleRegister(instr->result()).is(f0));
3854
3855  if (exponent_type.IsSmi()) {
3856    MathPowStub stub(isolate(), MathPowStub::TAGGED);
3857    __ CallStub(&stub);
3858  } else if (exponent_type.IsTagged()) {
3859    Label no_deopt;
3860    __ JumpIfSmi(tagged_exponent, &no_deopt);
3861    DCHECK(!t3.is(tagged_exponent));
3862    __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3863    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3864    DeoptimizeIf(ne, instr, t3, Operand(at));
3865    __ bind(&no_deopt);
3866    MathPowStub stub(isolate(), MathPowStub::TAGGED);
3867    __ CallStub(&stub);
3868  } else if (exponent_type.IsInteger32()) {
3869    MathPowStub stub(isolate(), MathPowStub::INTEGER);
3870    __ CallStub(&stub);
3871  } else {
3872    DCHECK(exponent_type.IsDouble());
3873    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3874    __ CallStub(&stub);
3875  }
3876}
3877
3878
3879void LCodeGen::DoMathExp(LMathExp* instr) {
3880  DoubleRegister input = ToDoubleRegister(instr->value());
3881  DoubleRegister result = ToDoubleRegister(instr->result());
3882  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3883  DoubleRegister double_scratch2 = double_scratch0();
3884  Register temp1 = ToRegister(instr->temp1());
3885  Register temp2 = ToRegister(instr->temp2());
3886
3887  MathExpGenerator::EmitMathExp(
3888      masm(), input, result, double_scratch1, double_scratch2,
3889      temp1, temp2, scratch0());
3890}
3891
3892
3893void LCodeGen::DoMathLog(LMathLog* instr) {
3894  __ PrepareCallCFunction(0, 1, scratch0());
3895  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3896  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3897                   0, 1);
3898  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3899}
3900
3901
3902void LCodeGen::DoMathClz32(LMathClz32* instr) {
3903  Register input = ToRegister(instr->value());
3904  Register result = ToRegister(instr->result());
3905  __ Clz(result, input);
3906}
3907
3908
3909void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3910  DCHECK(ToRegister(instr->context()).is(cp));
3911  DCHECK(ToRegister(instr->function()).is(a1));
3912  DCHECK(instr->HasPointerMap());
3913
3914  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3915  if (known_function.is_null()) {
3916    LPointerMap* pointers = instr->pointer_map();
3917    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3918    ParameterCount count(instr->arity());
3919    __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
3920  } else {
3921    CallKnownFunction(known_function,
3922                      instr->hydrogen()->formal_parameter_count(),
3923                      instr->arity(),
3924                      instr,
3925                      A1_CONTAINS_TARGET);
3926  }
3927}
3928
3929
3930void LCodeGen::DoTailCallThroughMegamorphicCache(
3931    LTailCallThroughMegamorphicCache* instr) {
3932  Register receiver = ToRegister(instr->receiver());
3933  Register name = ToRegister(instr->name());
3934  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3935  DCHECK(name.is(LoadDescriptor::NameRegister()));
3936  DCHECK(receiver.is(a1));
3937  DCHECK(name.is(a2));
3938
3939  Register scratch = a3;
3940  Register extra = t0;
3941  Register extra2 = t1;
3942  Register extra3 = t2;
3943
3944  // Important for the tail-call.
3945  bool must_teardown_frame = NeedsEagerFrame();
3946
3947  // The probe will tail call to a handler if found.
3948  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
3949                                         must_teardown_frame, receiver, name,
3950                                         scratch, extra, extra2, extra3);
3951
3952  // Tail call to miss if we ended up here.
3953  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
3954  LoadIC::GenerateMiss(masm());
3955}
3956
3957
3958void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3959  DCHECK(ToRegister(instr->result()).is(v0));
3960
3961  LPointerMap* pointers = instr->pointer_map();
3962  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3963
3964  if (instr->target()->IsConstantOperand()) {
3965    LConstantOperand* target = LConstantOperand::cast(instr->target());
3966    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3967    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3968    __ Call(code, RelocInfo::CODE_TARGET);
3969  } else {
3970    DCHECK(instr->target()->IsRegister());
3971    Register target = ToRegister(instr->target());
3972    generator.BeforeCall(__ CallSize(target));
3973    __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3974    __ Call(target);
3975  }
3976  generator.AfterCall();
3977}
3978
3979
3980void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3981  DCHECK(ToRegister(instr->function()).is(a1));
3982  DCHECK(ToRegister(instr->result()).is(v0));
3983
3984  if (instr->hydrogen()->pass_argument_count()) {
3985    __ li(a0, Operand(instr->arity()));
3986  }
3987
3988  // Change context.
3989  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3990
3991  // Load the code entry address
3992  __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3993  __ Call(at);
3994
3995  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3996}
3997
3998
3999void LCodeGen::DoCallFunction(LCallFunction* instr) {
4000  DCHECK(ToRegister(instr->context()).is(cp));
4001  DCHECK(ToRegister(instr->function()).is(a1));
4002  DCHECK(ToRegister(instr->result()).is(v0));
4003
4004  int arity = instr->arity();
4005  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
4006  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4007}
4008
4009
4010void LCodeGen::DoCallNew(LCallNew* instr) {
4011  DCHECK(ToRegister(instr->context()).is(cp));
4012  DCHECK(ToRegister(instr->constructor()).is(a1));
4013  DCHECK(ToRegister(instr->result()).is(v0));
4014
4015  __ li(a0, Operand(instr->arity()));
4016  // No cell in a2 for construct type feedback in optimized code
4017  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4018  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4019  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4020}
4021
4022
4023void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4024  DCHECK(ToRegister(instr->context()).is(cp));
4025  DCHECK(ToRegister(instr->constructor()).is(a1));
4026  DCHECK(ToRegister(instr->result()).is(v0));
4027
4028  __ li(a0, Operand(instr->arity()));
4029  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4030  ElementsKind kind = instr->hydrogen()->elements_kind();
4031  AllocationSiteOverrideMode override_mode =
4032      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4033          ? DISABLE_ALLOCATION_SITES
4034          : DONT_OVERRIDE;
4035
4036  if (instr->arity() == 0) {
4037    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4038    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4039  } else if (instr->arity() == 1) {
4040    Label done;
4041    if (IsFastPackedElementsKind(kind)) {
4042      Label packed_case;
4043      // We might need a change here,
4044      // look at the first argument.
4045      __ lw(t1, MemOperand(sp, 0));
4046      __ Branch(&packed_case, eq, t1, Operand(zero_reg));
4047
4048      ElementsKind holey_kind = GetHoleyElementsKind(kind);
4049      ArraySingleArgumentConstructorStub stub(isolate(),
4050                                              holey_kind,
4051                                              override_mode);
4052      CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4053      __ jmp(&done);
4054      __ bind(&packed_case);
4055    }
4056
4057    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4058    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4059    __ bind(&done);
4060  } else {
4061    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4062    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4063  }
4064}
4065
4066
4067void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4068  CallRuntime(instr->function(), instr->arity(), instr);
4069}
4070
4071
4072void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4073  Register function = ToRegister(instr->function());
4074  Register code_object = ToRegister(instr->code_object());
4075  __ Addu(code_object, code_object,
4076          Operand(Code::kHeaderSize - kHeapObjectTag));
4077  __ sw(code_object,
4078        FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4079}
4080
4081
4082void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4083  Register result = ToRegister(instr->result());
4084  Register base = ToRegister(instr->base_object());
4085  if (instr->offset()->IsConstantOperand()) {
4086    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4087    __ Addu(result, base, Operand(ToInteger32(offset)));
4088  } else {
4089    Register offset = ToRegister(instr->offset());
4090    __ Addu(result, base, offset);
4091  }
4092}
4093
4094
4095void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4096  Representation representation = instr->representation();
4097
4098  Register object = ToRegister(instr->object());
4099  Register scratch = scratch0();
4100  HObjectAccess access = instr->hydrogen()->access();
4101  int offset = access.offset();
4102
4103  if (access.IsExternalMemory()) {
4104    Register value = ToRegister(instr->value());
4105    MemOperand operand = MemOperand(object, offset);
4106    __ Store(value, operand, representation);
4107    return;
4108  }
4109
4110  __ AssertNotSmi(object);
4111
4112  DCHECK(!representation.IsSmi() ||
4113         !instr->value()->IsConstantOperand() ||
4114         IsSmi(LConstantOperand::cast(instr->value())));
4115  if (representation.IsDouble()) {
4116    DCHECK(access.IsInobject());
4117    DCHECK(!instr->hydrogen()->has_transition());
4118    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4119    DoubleRegister value = ToDoubleRegister(instr->value());
4120    __ sdc1(value, FieldMemOperand(object, offset));
4121    return;
4122  }
4123
4124  if (instr->hydrogen()->has_transition()) {
4125    Handle<Map> transition = instr->hydrogen()->transition_map();
4126    AddDeprecationDependency(transition);
4127    __ li(scratch, Operand(transition));
4128    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4129    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4130      Register temp = ToRegister(instr->temp());
4131      // Update the write barrier for the map field.
4132      __ RecordWriteForMap(object,
4133                           scratch,
4134                           temp,
4135                           GetRAState(),
4136                           kSaveFPRegs);
4137    }
4138  }
4139
4140  // Do the store.
4141  Register value = ToRegister(instr->value());
4142  if (access.IsInobject()) {
4143    MemOperand operand = FieldMemOperand(object, offset);
4144    __ Store(value, operand, representation);
4145    if (instr->hydrogen()->NeedsWriteBarrier()) {
4146      // Update the write barrier for the object for in-object properties.
4147      __ RecordWriteField(object,
4148                          offset,
4149                          value,
4150                          scratch,
4151                          GetRAState(),
4152                          kSaveFPRegs,
4153                          EMIT_REMEMBERED_SET,
4154                          instr->hydrogen()->SmiCheckForWriteBarrier(),
4155                          instr->hydrogen()->PointersToHereCheckForValue());
4156    }
4157  } else {
4158    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4159    MemOperand operand = FieldMemOperand(scratch, offset);
4160    __ Store(value, operand, representation);
4161    if (instr->hydrogen()->NeedsWriteBarrier()) {
4162      // Update the write barrier for the properties array.
4163      // object is used as a scratch register.
4164      __ RecordWriteField(scratch,
4165                          offset,
4166                          value,
4167                          object,
4168                          GetRAState(),
4169                          kSaveFPRegs,
4170                          EMIT_REMEMBERED_SET,
4171                          instr->hydrogen()->SmiCheckForWriteBarrier(),
4172                          instr->hydrogen()->PointersToHereCheckForValue());
4173    }
4174  }
4175}
4176
4177
4178void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4179  DCHECK(ToRegister(instr->context()).is(cp));
4180  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4181  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4182
4183  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4184  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4185  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4186}
4187
4188
4189void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4190  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4191  Operand operand(0);
4192  Register reg;
4193  if (instr->index()->IsConstantOperand()) {
4194    operand = ToOperand(instr->index());
4195    reg = ToRegister(instr->length());
4196    cc = CommuteCondition(cc);
4197  } else {
4198    reg = ToRegister(instr->index());
4199    operand = ToOperand(instr->length());
4200  }
4201  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4202    Label done;
4203    __ Branch(&done, NegateCondition(cc), reg, operand);
4204    __ stop("eliminated bounds check failed");
4205    __ bind(&done);
4206  } else {
4207    DeoptimizeIf(cc, instr, reg, operand);
4208  }
4209}
4210
4211
4212void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4213  Register external_pointer = ToRegister(instr->elements());
4214  Register key = no_reg;
4215  ElementsKind elements_kind = instr->elements_kind();
4216  bool key_is_constant = instr->key()->IsConstantOperand();
4217  int constant_key = 0;
4218  if (key_is_constant) {
4219    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4220    if (constant_key & 0xF0000000) {
4221      Abort(kArrayIndexConstantValueTooBig);
4222    }
4223  } else {
4224    key = ToRegister(instr->key());
4225  }
4226  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4227  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4228      ? (element_size_shift - kSmiTagSize) : element_size_shift;
4229  int base_offset = instr->base_offset();
4230
4231  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4232      elements_kind == FLOAT32_ELEMENTS ||
4233      elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4234      elements_kind == FLOAT64_ELEMENTS) {
4235    Register address = scratch0();
4236    FPURegister value(ToDoubleRegister(instr->value()));
4237    if (key_is_constant) {
4238      if (constant_key != 0) {
4239        __ Addu(address, external_pointer,
4240                Operand(constant_key << element_size_shift));
4241      } else {
4242        address = external_pointer;
4243      }
4244    } else {
4245      __ sll(address, key, shift_size);
4246      __ Addu(address, external_pointer, address);
4247    }
4248
4249    if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4250        elements_kind == FLOAT32_ELEMENTS) {
4251      __ cvt_s_d(double_scratch0(), value);
4252      __ swc1(double_scratch0(), MemOperand(address, base_offset));
4253    } else {  // Storing doubles, not floats.
4254      __ sdc1(value, MemOperand(address, base_offset));
4255    }
4256  } else {
4257    Register value(ToRegister(instr->value()));
4258    MemOperand mem_operand = PrepareKeyedOperand(
4259        key, external_pointer, key_is_constant, constant_key,
4260        element_size_shift, shift_size,
4261        base_offset);
4262    switch (elements_kind) {
4263      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4264      case EXTERNAL_INT8_ELEMENTS:
4265      case EXTERNAL_UINT8_ELEMENTS:
4266      case UINT8_ELEMENTS:
4267      case UINT8_CLAMPED_ELEMENTS:
4268      case INT8_ELEMENTS:
4269        __ sb(value, mem_operand);
4270        break;
4271      case EXTERNAL_INT16_ELEMENTS:
4272      case EXTERNAL_UINT16_ELEMENTS:
4273      case INT16_ELEMENTS:
4274      case UINT16_ELEMENTS:
4275        __ sh(value, mem_operand);
4276        break;
4277      case EXTERNAL_INT32_ELEMENTS:
4278      case EXTERNAL_UINT32_ELEMENTS:
4279      case INT32_ELEMENTS:
4280      case UINT32_ELEMENTS:
4281        __ sw(value, mem_operand);
4282        break;
4283      case FLOAT32_ELEMENTS:
4284      case FLOAT64_ELEMENTS:
4285      case EXTERNAL_FLOAT32_ELEMENTS:
4286      case EXTERNAL_FLOAT64_ELEMENTS:
4287      case FAST_DOUBLE_ELEMENTS:
4288      case FAST_ELEMENTS:
4289      case FAST_SMI_ELEMENTS:
4290      case FAST_HOLEY_DOUBLE_ELEMENTS:
4291      case FAST_HOLEY_ELEMENTS:
4292      case FAST_HOLEY_SMI_ELEMENTS:
4293      case DICTIONARY_ELEMENTS:
4294      case SLOPPY_ARGUMENTS_ELEMENTS:
4295        UNREACHABLE();
4296        break;
4297    }
4298  }
4299}
4300
4301
4302void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4303  DoubleRegister value = ToDoubleRegister(instr->value());
4304  Register elements = ToRegister(instr->elements());
4305  Register scratch = scratch0();
4306  DoubleRegister double_scratch = double_scratch0();
4307  bool key_is_constant = instr->key()->IsConstantOperand();
4308  int base_offset = instr->base_offset();
4309  Label not_nan, done;
4310
4311  // Calculate the effective address of the slot in the array to store the
4312  // double value.
4313  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4314  if (key_is_constant) {
4315    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4316    if (constant_key & 0xF0000000) {
4317      Abort(kArrayIndexConstantValueTooBig);
4318    }
4319    __ Addu(scratch, elements,
4320           Operand((constant_key << element_size_shift) + base_offset));
4321  } else {
4322    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4323        ? (element_size_shift - kSmiTagSize) : element_size_shift;
4324    __ Addu(scratch, elements, Operand(base_offset));
4325    __ sll(at, ToRegister(instr->key()), shift_size);
4326    __ Addu(scratch, scratch, at);
4327  }
4328
4329  if (instr->NeedsCanonicalization()) {
4330    Label is_nan;
4331    // Check for NaN. All NaNs must be canonicalized.
4332    __ BranchF(NULL, &is_nan, eq, value, value);
4333    __ Branch(&not_nan);
4334
4335    // Only load canonical NaN if the comparison above set the overflow.
4336    __ bind(&is_nan);
4337    __ LoadRoot(at, Heap::kNanValueRootIndex);
4338    __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
4339    __ sdc1(double_scratch, MemOperand(scratch, 0));
4340    __ Branch(&done);
4341  }
4342
4343  __ bind(&not_nan);
4344  __ sdc1(value, MemOperand(scratch, 0));
4345  __ bind(&done);
4346}
4347
4348
4349void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4350  Register value = ToRegister(instr->value());
4351  Register elements = ToRegister(instr->elements());
4352  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4353      : no_reg;
4354  Register scratch = scratch0();
4355  Register store_base = scratch;
4356  int offset = instr->base_offset();
4357
4358  // Do the store.
4359  if (instr->key()->IsConstantOperand()) {
4360    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4361    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4362    offset += ToInteger32(const_operand) * kPointerSize;
4363    store_base = elements;
4364  } else {
4365    // Even though the HLoadKeyed instruction forces the input
4366    // representation for the key to be an integer, the input gets replaced
4367    // during bound check elimination with the index argument to the bounds
4368    // check, which can be tagged, so that case must be handled here, too.
4369    if (instr->hydrogen()->key()->representation().IsSmi()) {
4370      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4371      __ addu(scratch, elements, scratch);
4372    } else {
4373      __ sll(scratch, key, kPointerSizeLog2);
4374      __ addu(scratch, elements, scratch);
4375    }
4376  }
4377  __ sw(value, MemOperand(store_base, offset));
4378
4379  if (instr->hydrogen()->NeedsWriteBarrier()) {
4380    SmiCheck check_needed =
4381        instr->hydrogen()->value()->type().IsHeapObject()
4382            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4383    // Compute address of modified element and store it into key register.
4384    __ Addu(key, store_base, Operand(offset));
4385    __ RecordWrite(elements,
4386                   key,
4387                   value,
4388                   GetRAState(),
4389                   kSaveFPRegs,
4390                   EMIT_REMEMBERED_SET,
4391                   check_needed,
4392                   instr->hydrogen()->PointersToHereCheckForValue());
4393  }
4394}
4395
4396
4397void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4398  // By cases: external, fast double
4399  if (instr->is_typed_elements()) {
4400    DoStoreKeyedExternalArray(instr);
4401  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4402    DoStoreKeyedFixedDoubleArray(instr);
4403  } else {
4404    DoStoreKeyedFixedArray(instr);
4405  }
4406}
4407
4408
4409void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4410  DCHECK(ToRegister(instr->context()).is(cp));
4411  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4412  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4413  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4414
4415  Handle<Code> ic =
4416      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
4417  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4418}
4419
4420
4421void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4422  Register object_reg = ToRegister(instr->object());
4423  Register scratch = scratch0();
4424
4425  Handle<Map> from_map = instr->original_map();
4426  Handle<Map> to_map = instr->transitioned_map();
4427  ElementsKind from_kind = instr->from_kind();
4428  ElementsKind to_kind = instr->to_kind();
4429
4430  Label not_applicable;
4431  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4432  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4433
4434  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4435    Register new_map_reg = ToRegister(instr->new_map_temp());
4436    __ li(new_map_reg, Operand(to_map));
4437    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4438    // Write barrier.
4439    __ RecordWriteForMap(object_reg,
4440                         new_map_reg,
4441                         scratch,
4442                         GetRAState(),
4443                         kDontSaveFPRegs);
4444  } else {
4445    DCHECK(object_reg.is(a0));
4446    DCHECK(ToRegister(instr->context()).is(cp));
4447    PushSafepointRegistersScope scope(this);
4448    __ li(a1, Operand(to_map));
4449    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4450    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4451    __ CallStub(&stub);
4452    RecordSafepointWithRegisters(
4453        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4454  }
4455  __ bind(&not_applicable);
4456}
4457
4458
4459void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4460  Register object = ToRegister(instr->object());
4461  Register temp = ToRegister(instr->temp());
4462  Label no_memento_found;
4463  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4464                                     ne, &no_memento_found);
4465  DeoptimizeIf(al, instr);
4466  __ bind(&no_memento_found);
4467}
4468
4469
4470void LCodeGen::DoStringAdd(LStringAdd* instr) {
4471  DCHECK(ToRegister(instr->context()).is(cp));
4472  DCHECK(ToRegister(instr->left()).is(a1));
4473  DCHECK(ToRegister(instr->right()).is(a0));
4474  StringAddStub stub(isolate(),
4475                     instr->hydrogen()->flags(),
4476                     instr->hydrogen()->pretenure_flag());
4477  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4478}
4479
4480
4481void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4482  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4483   public:
4484    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4485        : LDeferredCode(codegen), instr_(instr) { }
4486    virtual void Generate() OVERRIDE {
4487      codegen()->DoDeferredStringCharCodeAt(instr_);
4488    }
4489    virtual LInstruction* instr() OVERRIDE { return instr_; }
4490   private:
4491    LStringCharCodeAt* instr_;
4492  };
4493
4494  DeferredStringCharCodeAt* deferred =
4495      new(zone()) DeferredStringCharCodeAt(this, instr);
4496  StringCharLoadGenerator::Generate(masm(),
4497                                    ToRegister(instr->string()),
4498                                    ToRegister(instr->index()),
4499                                    ToRegister(instr->result()),
4500                                    deferred->entry());
4501  __ bind(deferred->exit());
4502}
4503
4504
4505void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4506  Register string = ToRegister(instr->string());
4507  Register result = ToRegister(instr->result());
4508  Register scratch = scratch0();
4509
4510  // TODO(3095996): Get rid of this. For now, we need to make the
4511  // result register contain a valid pointer because it is already
4512  // contained in the register pointer map.
4513  __ mov(result, zero_reg);
4514
4515  PushSafepointRegistersScope scope(this);
4516  __ push(string);
4517  // Push the index as a smi. This is safe because of the checks in
4518  // DoStringCharCodeAt above.
4519  if (instr->index()->IsConstantOperand()) {
4520    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4521    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4522    __ push(scratch);
4523  } else {
4524    Register index = ToRegister(instr->index());
4525    __ SmiTag(index);
4526    __ push(index);
4527  }
4528  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4529                          instr->context());
4530  __ AssertSmi(v0);
4531  __ SmiUntag(v0);
4532  __ StoreToSafepointRegisterSlot(v0, result);
4533}
4534
4535
4536void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4537  class DeferredStringCharFromCode FINAL : public LDeferredCode {
4538   public:
4539    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4540        : LDeferredCode(codegen), instr_(instr) { }
4541    virtual void Generate() OVERRIDE {
4542      codegen()->DoDeferredStringCharFromCode(instr_);
4543    }
4544    virtual LInstruction* instr() OVERRIDE { return instr_; }
4545   private:
4546    LStringCharFromCode* instr_;
4547  };
4548
4549  DeferredStringCharFromCode* deferred =
4550      new(zone()) DeferredStringCharFromCode(this, instr);
4551
4552  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4553  Register char_code = ToRegister(instr->char_code());
4554  Register result = ToRegister(instr->result());
4555  Register scratch = scratch0();
4556  DCHECK(!char_code.is(result));
4557
4558  __ Branch(deferred->entry(), hi,
4559            char_code, Operand(String::kMaxOneByteCharCode));
4560  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4561  __ sll(scratch, char_code, kPointerSizeLog2);
4562  __ Addu(result, result, scratch);
4563  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4564  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4565  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4566  __ bind(deferred->exit());
4567}
4568
4569
4570void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4571  Register char_code = ToRegister(instr->char_code());
4572  Register result = ToRegister(instr->result());
4573
4574  // TODO(3095996): Get rid of this. For now, we need to make the
4575  // result register contain a valid pointer because it is already
4576  // contained in the register pointer map.
4577  __ mov(result, zero_reg);
4578
4579  PushSafepointRegistersScope scope(this);
4580  __ SmiTag(char_code);
4581  __ push(char_code);
4582  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4583  __ StoreToSafepointRegisterSlot(v0, result);
4584}
4585
4586
4587void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4588  LOperand* input = instr->value();
4589  DCHECK(input->IsRegister() || input->IsStackSlot());
4590  LOperand* output = instr->result();
4591  DCHECK(output->IsDoubleRegister());
4592  FPURegister single_scratch = double_scratch0().low();
4593  if (input->IsStackSlot()) {
4594    Register scratch = scratch0();
4595    __ lw(scratch, ToMemOperand(input));
4596    __ mtc1(scratch, single_scratch);
4597  } else {
4598    __ mtc1(ToRegister(input), single_scratch);
4599  }
4600  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4601}
4602
4603
4604void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4605  LOperand* input = instr->value();
4606  LOperand* output = instr->result();
4607
4608  FPURegister dbl_scratch = double_scratch0();
4609  __ mtc1(ToRegister(input), dbl_scratch);
4610  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4611}
4612
4613
4614void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4615  class DeferredNumberTagI FINAL : public LDeferredCode {
4616   public:
4617    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4618        : LDeferredCode(codegen), instr_(instr) { }
4619    virtual void Generate() OVERRIDE {
4620      codegen()->DoDeferredNumberTagIU(instr_,
4621                                       instr_->value(),
4622                                       instr_->temp1(),
4623                                       instr_->temp2(),
4624                                       SIGNED_INT32);
4625    }
4626    virtual LInstruction* instr() OVERRIDE { return instr_; }
4627   private:
4628    LNumberTagI* instr_;
4629  };
4630
4631  Register src = ToRegister(instr->value());
4632  Register dst = ToRegister(instr->result());
4633  Register overflow = scratch0();
4634
4635  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4636  __ SmiTagCheckOverflow(dst, src, overflow);
4637  __ BranchOnOverflow(deferred->entry(), overflow);
4638  __ bind(deferred->exit());
4639}
4640
4641
4642void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4643  class DeferredNumberTagU FINAL : public LDeferredCode {
4644   public:
4645    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4646        : LDeferredCode(codegen), instr_(instr) { }
4647    virtual void Generate() OVERRIDE {
4648      codegen()->DoDeferredNumberTagIU(instr_,
4649                                       instr_->value(),
4650                                       instr_->temp1(),
4651                                       instr_->temp2(),
4652                                       UNSIGNED_INT32);
4653    }
4654    virtual LInstruction* instr() OVERRIDE { return instr_; }
4655   private:
4656    LNumberTagU* instr_;
4657  };
4658
4659  Register input = ToRegister(instr->value());
4660  Register result = ToRegister(instr->result());
4661
4662  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4663  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4664  __ SmiTag(result, input);
4665  __ bind(deferred->exit());
4666}
4667
4668
4669void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4670                                     LOperand* value,
4671                                     LOperand* temp1,
4672                                     LOperand* temp2,
4673                                     IntegerSignedness signedness) {
4674  Label done, slow;
4675  Register src = ToRegister(value);
4676  Register dst = ToRegister(instr->result());
4677  Register tmp1 = scratch0();
4678  Register tmp2 = ToRegister(temp1);
4679  Register tmp3 = ToRegister(temp2);
4680  DoubleRegister dbl_scratch = double_scratch0();
4681
4682  if (signedness == SIGNED_INT32) {
4683    // There was overflow, so bits 30 and 31 of the original integer
4684    // disagree. Try to allocate a heap number in new space and store
4685    // the value in there. If that fails, call the runtime system.
4686    if (dst.is(src)) {
4687      __ SmiUntag(src, dst);
4688      __ Xor(src, src, Operand(0x80000000));
4689    }
4690    __ mtc1(src, dbl_scratch);
4691    __ cvt_d_w(dbl_scratch, dbl_scratch);
4692  } else {
4693    __ mtc1(src, dbl_scratch);
4694    __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4695  }
4696
4697  if (FLAG_inline_new) {
4698    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4699    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4700    __ Branch(&done);
4701  }
4702
4703  // Slow case: Call the runtime system to do the number allocation.
4704  __ bind(&slow);
4705  {
4706    // TODO(3095996): Put a valid pointer value in the stack slot where the
4707    // result register is stored, as this register is in the pointer map, but
4708    // contains an integer value.
4709    __ mov(dst, zero_reg);
4710
4711    // Preserve the value of all registers.
4712    PushSafepointRegistersScope scope(this);
4713
4714    // NumberTagI and NumberTagD use the context from the frame, rather than
4715    // the environment's HContext or HInlinedContext value.
4716    // They only call Runtime::kAllocateHeapNumber.
4717    // The corresponding HChange instructions are added in a phase that does
4718    // not have easy access to the local context.
4719    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4720    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4721    RecordSafepointWithRegisters(
4722        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4723    __ Subu(v0, v0, kHeapObjectTag);
4724    __ StoreToSafepointRegisterSlot(v0, dst);
4725  }
4726
4727
4728  // Done. Put the value in dbl_scratch into the value of the allocated heap
4729  // number.
4730  __ bind(&done);
4731  __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4732  __ Addu(dst, dst, kHeapObjectTag);
4733}
4734
4735
4736void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4737  class DeferredNumberTagD FINAL : public LDeferredCode {
4738   public:
4739    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4740        : LDeferredCode(codegen), instr_(instr) { }
4741    virtual void Generate() OVERRIDE {
4742      codegen()->DoDeferredNumberTagD(instr_);
4743    }
4744    virtual LInstruction* instr() OVERRIDE { return instr_; }
4745   private:
4746    LNumberTagD* instr_;
4747  };
4748
4749  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4750  Register scratch = scratch0();
4751  Register reg = ToRegister(instr->result());
4752  Register temp1 = ToRegister(instr->temp());
4753  Register temp2 = ToRegister(instr->temp2());
4754
4755  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4756  if (FLAG_inline_new) {
4757    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4758    // We want the untagged address first for performance
4759    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4760                          DONT_TAG_RESULT);
4761  } else {
4762    __ Branch(deferred->entry());
4763  }
4764  __ bind(deferred->exit());
4765  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4766  // Now that we have finished with the object's real address tag it
4767  __ Addu(reg, reg, kHeapObjectTag);
4768}
4769
4770
4771void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4772  // TODO(3095996): Get rid of this. For now, we need to make the
4773  // result register contain a valid pointer because it is already
4774  // contained in the register pointer map.
4775  Register reg = ToRegister(instr->result());
4776  __ mov(reg, zero_reg);
4777
4778  PushSafepointRegistersScope scope(this);
4779  // NumberTagI and NumberTagD use the context from the frame, rather than
4780  // the environment's HContext or HInlinedContext value.
4781  // They only call Runtime::kAllocateHeapNumber.
4782  // The corresponding HChange instructions are added in a phase that does
4783  // not have easy access to the local context.
4784  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4785  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4786  RecordSafepointWithRegisters(
4787      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4788  __ Subu(v0, v0, kHeapObjectTag);
4789  __ StoreToSafepointRegisterSlot(v0, reg);
4790}
4791
4792
4793void LCodeGen::DoSmiTag(LSmiTag* instr) {
4794  HChange* hchange = instr->hydrogen();
4795  Register input = ToRegister(instr->value());
4796  Register output = ToRegister(instr->result());
4797  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4798      hchange->value()->CheckFlag(HValue::kUint32)) {
4799    __ And(at, input, Operand(0xc0000000));
4800    DeoptimizeIf(ne, instr, at, Operand(zero_reg));
4801  }
4802  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4803      !hchange->value()->CheckFlag(HValue::kUint32)) {
4804    __ SmiTagCheckOverflow(output, input, at);
4805    DeoptimizeIf(lt, instr, at, Operand(zero_reg));
4806  } else {
4807    __ SmiTag(output, input);
4808  }
4809}
4810
4811
4812void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4813  Register scratch = scratch0();
4814  Register input = ToRegister(instr->value());
4815  Register result = ToRegister(instr->result());
4816  if (instr->needs_check()) {
4817    STATIC_ASSERT(kHeapObjectTag == 1);
4818    // If the input is a HeapObject, value of scratch won't be zero.
4819    __ And(scratch, input, Operand(kHeapObjectTag));
4820    __ SmiUntag(result, input);
4821    DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
4822  } else {
4823    __ SmiUntag(result, input);
4824  }
4825}
4826
4827
4828void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4829                                DoubleRegister result_reg,
4830                                NumberUntagDMode mode) {
4831  bool can_convert_undefined_to_nan =
4832      instr->hydrogen()->can_convert_undefined_to_nan();
4833  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4834
4835  Register scratch = scratch0();
4836  Label convert, load_smi, done;
4837  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4838    // Smi check.
4839    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4840    // Heap number map check.
4841    __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4842    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4843    if (can_convert_undefined_to_nan) {
4844      __ Branch(&convert, ne, scratch, Operand(at));
4845    } else {
4846      DeoptimizeIf(ne, instr, scratch, Operand(at));
4847    }
4848    // Load heap number.
4849    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4850    if (deoptimize_on_minus_zero) {
4851      __ mfc1(at, result_reg.low());
4852      __ Branch(&done, ne, at, Operand(zero_reg));
4853      __ Mfhc1(scratch, result_reg);
4854      DeoptimizeIf(eq, instr, scratch, Operand(HeapNumber::kSignMask));
4855    }
4856    __ Branch(&done);
4857    if (can_convert_undefined_to_nan) {
4858      __ bind(&convert);
4859      // Convert undefined (and hole) to NaN.
4860      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4861      DeoptimizeIf(ne, instr, input_reg, Operand(at));
4862      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4863      __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4864      __ Branch(&done);
4865    }
4866  } else {
4867    __ SmiUntag(scratch, input_reg);
4868    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4869  }
4870  // Smi to double register conversion
4871  __ bind(&load_smi);
4872  // scratch: untagged value of input_reg
4873  __ mtc1(scratch, result_reg);
4874  __ cvt_d_w(result_reg, result_reg);
4875  __ bind(&done);
4876}
4877
4878
4879void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4880  Register input_reg = ToRegister(instr->value());
4881  Register scratch1 = scratch0();
4882  Register scratch2 = ToRegister(instr->temp());
4883  DoubleRegister double_scratch = double_scratch0();
4884  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4885
4886  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4887  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4888
4889  Label done;
4890
4891  // The input is a tagged HeapObject.
4892  // Heap number map check.
4893  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4894  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4895  // This 'at' value and scratch1 map value are used for tests in both clauses
4896  // of the if.
4897
4898  if (instr->truncating()) {
4899    // Performs a truncating conversion of a floating point number as used by
4900    // the JS bitwise operations.
4901    Label no_heap_number, check_bools, check_false;
4902    // Check HeapNumber map.
4903    __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4904    __ mov(scratch2, input_reg);  // In delay slot.
4905    __ TruncateHeapNumberToI(input_reg, scratch2);
4906    __ Branch(&done);
4907
4908    // Check for Oddballs. Undefined/False is converted to zero and True to one
4909    // for truncating conversions.
4910    __ bind(&no_heap_number);
4911    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4912    __ Branch(&check_bools, ne, input_reg, Operand(at));
4913    DCHECK(ToRegister(instr->result()).is(input_reg));
4914    __ Branch(USE_DELAY_SLOT, &done);
4915    __ mov(input_reg, zero_reg);  // In delay slot.
4916
4917    __ bind(&check_bools);
4918    __ LoadRoot(at, Heap::kTrueValueRootIndex);
4919    __ Branch(&check_false, ne, scratch2, Operand(at));
4920    __ Branch(USE_DELAY_SLOT, &done);
4921    __ li(input_reg, Operand(1));  // In delay slot.
4922
4923    __ bind(&check_false);
4924    __ LoadRoot(at, Heap::kFalseValueRootIndex);
4925    DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate");
4926    __ Branch(USE_DELAY_SLOT, &done);
4927    __ mov(input_reg, zero_reg);  // In delay slot.
4928  } else {
4929    DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number");
4930
4931    // Load the double value.
4932    __ ldc1(double_scratch,
4933            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4934
4935    Register except_flag = scratch2;
4936    __ EmitFPUTruncate(kRoundToZero,
4937                       input_reg,
4938                       double_scratch,
4939                       scratch1,
4940                       double_scratch2,
4941                       except_flag,
4942                       kCheckForInexactConversion);
4943
4944    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg),
4945                 "lost precision or NaN");
4946
4947    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4948      __ Branch(&done, ne, input_reg, Operand(zero_reg));
4949
4950      __ Mfhc1(scratch1, double_scratch);
4951      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4952      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero");
4953    }
4954  }
4955  __ bind(&done);
4956}
4957
4958
4959void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4960  class DeferredTaggedToI FINAL : public LDeferredCode {
4961   public:
4962    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4963        : LDeferredCode(codegen), instr_(instr) { }
4964    virtual void Generate() OVERRIDE {
4965      codegen()->DoDeferredTaggedToI(instr_);
4966    }
4967    virtual LInstruction* instr() OVERRIDE { return instr_; }
4968   private:
4969    LTaggedToI* instr_;
4970  };
4971
4972  LOperand* input = instr->value();
4973  DCHECK(input->IsRegister());
4974  DCHECK(input->Equals(instr->result()));
4975
4976  Register input_reg = ToRegister(input);
4977
4978  if (instr->hydrogen()->value()->representation().IsSmi()) {
4979    __ SmiUntag(input_reg);
4980  } else {
4981    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4982
4983    // Let the deferred code handle the HeapObject case.
4984    __ JumpIfNotSmi(input_reg, deferred->entry());
4985
4986    // Smi to int32 conversion.
4987    __ SmiUntag(input_reg);
4988    __ bind(deferred->exit());
4989  }
4990}
4991
4992
4993void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4994  LOperand* input = instr->value();
4995  DCHECK(input->IsRegister());
4996  LOperand* result = instr->result();
4997  DCHECK(result->IsDoubleRegister());
4998
4999  Register input_reg = ToRegister(input);
5000  DoubleRegister result_reg = ToDoubleRegister(result);
5001
5002  HValue* value = instr->hydrogen()->value();
5003  NumberUntagDMode mode = value->representation().IsSmi()
5004      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5005
5006  EmitNumberUntagD(instr, input_reg, result_reg, mode);
5007}
5008
5009
5010void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5011  Register result_reg = ToRegister(instr->result());
5012  Register scratch1 = scratch0();
5013  DoubleRegister double_input = ToDoubleRegister(instr->value());
5014
5015  if (instr->truncating()) {
5016    __ TruncateDoubleToI(result_reg, double_input);
5017  } else {
5018    Register except_flag = LCodeGen::scratch1();
5019
5020    __ EmitFPUTruncate(kRoundToMinusInf,
5021                       result_reg,
5022                       double_input,
5023                       scratch1,
5024                       double_scratch0(),
5025                       except_flag,
5026                       kCheckForInexactConversion);
5027
5028    // Deopt if the operation did not succeed (except_flag != 0).
5029    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
5030
5031    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5032      Label done;
5033      __ Branch(&done, ne, result_reg, Operand(zero_reg));
5034      __ Mfhc1(scratch1, double_input);
5035      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5036      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
5037      __ bind(&done);
5038    }
5039  }
5040}
5041
5042
5043void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5044  Register result_reg = ToRegister(instr->result());
5045  Register scratch1 = LCodeGen::scratch0();
5046  DoubleRegister double_input = ToDoubleRegister(instr->value());
5047
5048  if (instr->truncating()) {
5049    __ TruncateDoubleToI(result_reg, double_input);
5050  } else {
5051    Register except_flag = LCodeGen::scratch1();
5052
5053    __ EmitFPUTruncate(kRoundToMinusInf,
5054                       result_reg,
5055                       double_input,
5056                       scratch1,
5057                       double_scratch0(),
5058                       except_flag,
5059                       kCheckForInexactConversion);
5060
5061    // Deopt if the operation did not succeed (except_flag != 0).
5062    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
5063
5064    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5065      Label done;
5066      __ Branch(&done, ne, result_reg, Operand(zero_reg));
5067      __ Mfhc1(scratch1, double_input);
5068      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5069      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
5070      __ bind(&done);
5071    }
5072  }
5073  __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5074  DeoptimizeIf(lt, instr, scratch1, Operand(zero_reg));
5075}
5076
5077
5078void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5079  LOperand* input = instr->value();
5080  __ SmiTst(ToRegister(input), at);
5081  DeoptimizeIf(ne, instr, at, Operand(zero_reg));
5082}
5083
5084
5085void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5086  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5087    LOperand* input = instr->value();
5088    __ SmiTst(ToRegister(input), at);
5089    DeoptimizeIf(eq, instr, at, Operand(zero_reg));
5090  }
5091}
5092
5093
5094void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5095  Register input = ToRegister(instr->value());
5096  Register scratch = scratch0();
5097
5098  __ GetObjectType(input, scratch, scratch);
5099
5100  if (instr->hydrogen()->is_interval_check()) {
5101    InstanceType first;
5102    InstanceType last;
5103    instr->hydrogen()->GetCheckInterval(&first, &last);
5104
5105    // If there is only one type in the interval check for equality.
5106    if (first == last) {
5107      DeoptimizeIf(ne, instr, scratch, Operand(first));
5108    } else {
5109      DeoptimizeIf(lo, instr, scratch, Operand(first));
5110      // Omit check for the last type.
5111      if (last != LAST_TYPE) {
5112        DeoptimizeIf(hi, instr, scratch, Operand(last));
5113      }
5114    }
5115  } else {
5116    uint8_t mask;
5117    uint8_t tag;
5118    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5119
5120    if (base::bits::IsPowerOfTwo32(mask)) {
5121      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5122      __ And(at, scratch, mask);
5123      DeoptimizeIf(tag == 0 ? ne : eq, instr, at, Operand(zero_reg));
5124    } else {
5125      __ And(scratch, scratch, Operand(mask));
5126      DeoptimizeIf(ne, instr, scratch, Operand(tag));
5127    }
5128  }
5129}
5130
5131
5132void LCodeGen::DoCheckValue(LCheckValue* instr) {
5133  Register reg = ToRegister(instr->value());
5134  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5135  AllowDeferredHandleDereference smi_check;
5136  if (isolate()->heap()->InNewSpace(*object)) {
5137    Register reg = ToRegister(instr->value());
5138    Handle<Cell> cell = isolate()->factory()->NewCell(object);
5139    __ li(at, Operand(Handle<Object>(cell)));
5140    __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5141    DeoptimizeIf(ne, instr, reg, Operand(at));
5142  } else {
5143    DeoptimizeIf(ne, instr, reg, Operand(object));
5144  }
5145}
5146
5147
5148void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5149  {
5150    PushSafepointRegistersScope scope(this);
5151    __ push(object);
5152    __ mov(cp, zero_reg);
5153    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5154    RecordSafepointWithRegisters(
5155        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5156    __ StoreToSafepointRegisterSlot(v0, scratch0());
5157  }
5158  __ SmiTst(scratch0(), at);
5159  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
5160}
5161
5162
5163void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5164  class DeferredCheckMaps FINAL : public LDeferredCode {
5165   public:
5166    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5167        : LDeferredCode(codegen), instr_(instr), object_(object) {
5168      SetExit(check_maps());
5169    }
5170    virtual void Generate() OVERRIDE {
5171      codegen()->DoDeferredInstanceMigration(instr_, object_);
5172    }
5173    Label* check_maps() { return &check_maps_; }
5174    virtual LInstruction* instr() OVERRIDE { return instr_; }
5175   private:
5176    LCheckMaps* instr_;
5177    Label check_maps_;
5178    Register object_;
5179  };
5180
5181  if (instr->hydrogen()->IsStabilityCheck()) {
5182    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5183    for (int i = 0; i < maps->size(); ++i) {
5184      AddStabilityDependency(maps->at(i).handle());
5185    }
5186    return;
5187  }
5188
5189  Register map_reg = scratch0();
5190  LOperand* input = instr->value();
5191  DCHECK(input->IsRegister());
5192  Register reg = ToRegister(input);
5193  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5194
5195  DeferredCheckMaps* deferred = NULL;
5196  if (instr->hydrogen()->HasMigrationTarget()) {
5197    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5198    __ bind(deferred->check_maps());
5199  }
5200
5201  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5202  Label success;
5203  for (int i = 0; i < maps->size() - 1; i++) {
5204    Handle<Map> map = maps->at(i).handle();
5205    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5206  }
5207  Handle<Map> map = maps->at(maps->size() - 1).handle();
5208  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5209  if (instr->hydrogen()->HasMigrationTarget()) {
5210    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5211  } else {
5212    DeoptimizeIf(ne, instr, map_reg, Operand(map));
5213  }
5214
5215  __ bind(&success);
5216}
5217
5218
5219void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5220  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5221  Register result_reg = ToRegister(instr->result());
5222  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5223  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5224}
5225
5226
5227void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5228  Register unclamped_reg = ToRegister(instr->unclamped());
5229  Register result_reg = ToRegister(instr->result());
5230  __ ClampUint8(result_reg, unclamped_reg);
5231}
5232
5233
5234void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5235  Register scratch = scratch0();
5236  Register input_reg = ToRegister(instr->unclamped());
5237  Register result_reg = ToRegister(instr->result());
5238  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5239  Label is_smi, done, heap_number;
5240
5241  // Both smi and heap number cases are handled.
5242  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5243
5244  // Check for heap number
5245  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5246  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5247
5248  // Check for undefined. Undefined is converted to zero for clamping
5249  // conversions.
5250  DeoptimizeIf(ne, instr, input_reg, Operand(factory()->undefined_value()));
5251  __ mov(result_reg, zero_reg);
5252  __ jmp(&done);
5253
5254  // Heap number
5255  __ bind(&heap_number);
5256  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5257                                             HeapNumber::kValueOffset));
5258  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5259  __ jmp(&done);
5260
5261  __ bind(&is_smi);
5262  __ ClampUint8(result_reg, scratch);
5263
5264  __ bind(&done);
5265}
5266
5267
5268void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5269  DoubleRegister value_reg = ToDoubleRegister(instr->value());
5270  Register result_reg = ToRegister(instr->result());
5271  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5272    __ FmoveHigh(result_reg, value_reg);
5273  } else {
5274    __ FmoveLow(result_reg, value_reg);
5275  }
5276}
5277
5278
5279void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5280  Register hi_reg = ToRegister(instr->hi());
5281  Register lo_reg = ToRegister(instr->lo());
5282  DoubleRegister result_reg = ToDoubleRegister(instr->result());
5283  __ Move(result_reg, lo_reg, hi_reg);
5284}
5285
5286
5287void LCodeGen::DoAllocate(LAllocate* instr) {
5288  class DeferredAllocate FINAL : public LDeferredCode {
5289   public:
5290    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5291        : LDeferredCode(codegen), instr_(instr) { }
5292    virtual void Generate() OVERRIDE {
5293      codegen()->DoDeferredAllocate(instr_);
5294    }
5295    virtual LInstruction* instr() OVERRIDE { return instr_; }
5296   private:
5297    LAllocate* instr_;
5298  };
5299
5300  DeferredAllocate* deferred =
5301      new(zone()) DeferredAllocate(this, instr);
5302
5303  Register result = ToRegister(instr->result());
5304  Register scratch = ToRegister(instr->temp1());
5305  Register scratch2 = ToRegister(instr->temp2());
5306
5307  // Allocate memory for the object.
5308  AllocationFlags flags = TAG_OBJECT;
5309  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5310    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5311  }
5312  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5313    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5314    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5315    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5316  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5317    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5318    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5319  }
5320  if (instr->size()->IsConstantOperand()) {
5321    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5322    if (size <= Page::kMaxRegularHeapObjectSize) {
5323      __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5324    } else {
5325      __ jmp(deferred->entry());
5326    }
5327  } else {
5328    Register size = ToRegister(instr->size());
5329    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5330  }
5331
5332  __ bind(deferred->exit());
5333
5334  if (instr->hydrogen()->MustPrefillWithFiller()) {
5335    STATIC_ASSERT(kHeapObjectTag == 1);
5336    if (instr->size()->IsConstantOperand()) {
5337      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5338      __ li(scratch, Operand(size - kHeapObjectTag));
5339    } else {
5340      __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5341    }
5342    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5343    Label loop;
5344    __ bind(&loop);
5345    __ Subu(scratch, scratch, Operand(kPointerSize));
5346    __ Addu(at, result, Operand(scratch));
5347    __ sw(scratch2, MemOperand(at));
5348    __ Branch(&loop, ge, scratch, Operand(zero_reg));
5349  }
5350}
5351
5352
5353void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5354  Register result = ToRegister(instr->result());
5355
5356  // TODO(3095996): Get rid of this. For now, we need to make the
5357  // result register contain a valid pointer because it is already
5358  // contained in the register pointer map.
5359  __ mov(result, zero_reg);
5360
5361  PushSafepointRegistersScope scope(this);
5362  if (instr->size()->IsRegister()) {
5363    Register size = ToRegister(instr->size());
5364    DCHECK(!size.is(result));
5365    __ SmiTag(size);
5366    __ push(size);
5367  } else {
5368    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5369    if (size >= 0 && size <= Smi::kMaxValue) {
5370      __ Push(Smi::FromInt(size));
5371    } else {
5372      // We should never get here at runtime => abort
5373      __ stop("invalid allocation size");
5374      return;
5375    }
5376  }
5377
5378  int flags = AllocateDoubleAlignFlag::encode(
5379      instr->hydrogen()->MustAllocateDoubleAligned());
5380  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5381    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5382    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5383    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5384  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5385    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5386    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5387  } else {
5388    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5389  }
5390  __ Push(Smi::FromInt(flags));
5391
5392  CallRuntimeFromDeferred(
5393      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5394  __ StoreToSafepointRegisterSlot(v0, result);
5395}
5396
5397
5398void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5399  DCHECK(ToRegister(instr->value()).is(a0));
5400  DCHECK(ToRegister(instr->result()).is(v0));
5401  __ push(a0);
5402  CallRuntime(Runtime::kToFastProperties, 1, instr);
5403}
5404
5405
5406void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5407  DCHECK(ToRegister(instr->context()).is(cp));
5408  Label materialized;
5409  // Registers will be used as follows:
5410  // t3 = literals array.
5411  // a1 = regexp literal.
5412  // a0 = regexp literal clone.
5413  // a2 and t0-t2 are used as temporaries.
5414  int literal_offset =
5415      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5416  __ li(t3, instr->hydrogen()->literals());
5417  __ lw(a1, FieldMemOperand(t3, literal_offset));
5418  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5419  __ Branch(&materialized, ne, a1, Operand(at));
5420
5421  // Create regexp literal using runtime function
5422  // Result will be in v0.
5423  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5424  __ li(t1, Operand(instr->hydrogen()->pattern()));
5425  __ li(t0, Operand(instr->hydrogen()->flags()));
5426  __ Push(t3, t2, t1, t0);
5427  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5428  __ mov(a1, v0);
5429
5430  __ bind(&materialized);
5431  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5432  Label allocated, runtime_allocate;
5433
5434  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5435  __ jmp(&allocated);
5436
5437  __ bind(&runtime_allocate);
5438  __ li(a0, Operand(Smi::FromInt(size)));
5439  __ Push(a1, a0);
5440  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5441  __ pop(a1);
5442
5443  __ bind(&allocated);
5444  // Copy the content into the newly allocated memory.
5445  // (Unroll copy loop once for better throughput).
5446  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5447    __ lw(a3, FieldMemOperand(a1, i));
5448    __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5449    __ sw(a3, FieldMemOperand(v0, i));
5450    __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5451  }
5452  if ((size % (2 * kPointerSize)) != 0) {
5453    __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5454    __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5455  }
5456}
5457
5458
5459void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5460  DCHECK(ToRegister(instr->context()).is(cp));
5461  // Use the fast case closure allocation code that allocates in new
5462  // space for nested functions that don't need literals cloning.
5463  bool pretenure = instr->hydrogen()->pretenure();
5464  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5465    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
5466                            instr->hydrogen()->kind());
5467    __ li(a2, Operand(instr->hydrogen()->shared_info()));
5468    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5469  } else {
5470    __ li(a2, Operand(instr->hydrogen()->shared_info()));
5471    __ li(a1, Operand(pretenure ? factory()->true_value()
5472                                : factory()->false_value()));
5473    __ Push(cp, a2, a1);
5474    CallRuntime(Runtime::kNewClosure, 3, instr);
5475  }
5476}
5477
5478
5479void LCodeGen::DoTypeof(LTypeof* instr) {
5480  DCHECK(ToRegister(instr->result()).is(v0));
5481  Register input = ToRegister(instr->value());
5482  __ push(input);
5483  CallRuntime(Runtime::kTypeof, 1, instr);
5484}
5485
5486
5487void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5488  Register input = ToRegister(instr->value());
5489
5490  Register cmp1 = no_reg;
5491  Operand cmp2 = Operand(no_reg);
5492
5493  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5494                                                  instr->FalseLabel(chunk_),
5495                                                  input,
5496                                                  instr->type_literal(),
5497                                                  &cmp1,
5498                                                  &cmp2);
5499
5500  DCHECK(cmp1.is_valid());
5501  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5502
5503  if (final_branch_condition != kNoCondition) {
5504    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5505  }
5506}
5507
5508
5509Condition LCodeGen::EmitTypeofIs(Label* true_label,
5510                                 Label* false_label,
5511                                 Register input,
5512                                 Handle<String> type_name,
5513                                 Register* cmp1,
5514                                 Operand* cmp2) {
5515  // This function utilizes the delay slot heavily. This is used to load
5516  // values that are always usable without depending on the type of the input
5517  // register.
5518  Condition final_branch_condition = kNoCondition;
5519  Register scratch = scratch0();
5520  Factory* factory = isolate()->factory();
5521  if (String::Equals(type_name, factory->number_string())) {
5522    __ JumpIfSmi(input, true_label);
5523    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5524    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5525    *cmp1 = input;
5526    *cmp2 = Operand(at);
5527    final_branch_condition = eq;
5528
5529  } else if (String::Equals(type_name, factory->string_string())) {
5530    __ JumpIfSmi(input, false_label);
5531    __ GetObjectType(input, input, scratch);
5532    __ Branch(USE_DELAY_SLOT, false_label,
5533              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5534    // input is an object so we can load the BitFieldOffset even if we take the
5535    // other branch.
5536    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5537    __ And(at, at, 1 << Map::kIsUndetectable);
5538    *cmp1 = at;
5539    *cmp2 = Operand(zero_reg);
5540    final_branch_condition = eq;
5541
5542  } else if (String::Equals(type_name, factory->symbol_string())) {
5543    __ JumpIfSmi(input, false_label);
5544    __ GetObjectType(input, input, scratch);
5545    *cmp1 = scratch;
5546    *cmp2 = Operand(SYMBOL_TYPE);
5547    final_branch_condition = eq;
5548
5549  } else if (String::Equals(type_name, factory->boolean_string())) {
5550    __ LoadRoot(at, Heap::kTrueValueRootIndex);
5551    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5552    __ LoadRoot(at, Heap::kFalseValueRootIndex);
5553    *cmp1 = at;
5554    *cmp2 = Operand(input);
5555    final_branch_condition = eq;
5556
5557  } else if (String::Equals(type_name, factory->undefined_string())) {
5558    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5559    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5560    // The first instruction of JumpIfSmi is an And - it is safe in the delay
5561    // slot.
5562    __ JumpIfSmi(input, false_label);
5563    // Check for undetectable objects => true.
5564    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5565    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5566    __ And(at, at, 1 << Map::kIsUndetectable);
5567    *cmp1 = at;
5568    *cmp2 = Operand(zero_reg);
5569    final_branch_condition = ne;
5570
5571  } else if (String::Equals(type_name, factory->function_string())) {
5572    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5573    __ JumpIfSmi(input, false_label);
5574    __ GetObjectType(input, scratch, input);
5575    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5576    *cmp1 = input;
5577    *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5578    final_branch_condition = eq;
5579
5580  } else if (String::Equals(type_name, factory->object_string())) {
5581    __ JumpIfSmi(input, false_label);
5582    __ LoadRoot(at, Heap::kNullValueRootIndex);
5583    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5584    Register map = input;
5585    __ GetObjectType(input, map, scratch);
5586    __ Branch(false_label,
5587              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5588    __ Branch(USE_DELAY_SLOT, false_label,
5589              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5590    // map is still valid, so the BitField can be loaded in delay slot.
5591    // Check for undetectable objects => false.
5592    __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
5593    __ And(at, at, 1 << Map::kIsUndetectable);
5594    *cmp1 = at;
5595    *cmp2 = Operand(zero_reg);
5596    final_branch_condition = eq;
5597
5598  } else {
5599    *cmp1 = at;
5600    *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
5601    __ Branch(false_label);
5602  }
5603
5604  return final_branch_condition;
5605}
5606
5607
5608void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5609  Register temp1 = ToRegister(instr->temp());
5610
5611  EmitIsConstructCall(temp1, scratch0());
5612
5613  EmitBranch(instr, eq, temp1,
5614             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5615}
5616
5617
5618void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5619  DCHECK(!temp1.is(temp2));
5620  // Get the frame pointer for the calling frame.
5621  __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5622
5623  // Skip the arguments adaptor frame if it exists.
5624  Label check_frame_marker;
5625  __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5626  __ Branch(&check_frame_marker, ne, temp2,
5627            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5628  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5629
5630  // Check the marker in the calling frame.
5631  __ bind(&check_frame_marker);
5632  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5633}
5634
5635
5636void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5637  if (!info()->IsStub()) {
5638    // Ensure that we have enough space after the previous lazy-bailout
5639    // instruction for patching the code here.
5640    int current_pc = masm()->pc_offset();
5641    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5642      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5643      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5644      while (padding_size > 0) {
5645        __ nop();
5646        padding_size -= Assembler::kInstrSize;
5647      }
5648    }
5649  }
5650  last_lazy_deopt_pc_ = masm()->pc_offset();
5651}
5652
5653
5654void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5655  last_lazy_deopt_pc_ = masm()->pc_offset();
5656  DCHECK(instr->HasEnvironment());
5657  LEnvironment* env = instr->environment();
5658  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5659  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5660}
5661
5662
5663void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5664  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5665  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5666  // needed return address), even though the implementation of LAZY and EAGER is
5667  // now identical. When LAZY is eventually completely folded into EAGER, remove
5668  // the special case below.
5669  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5670    type = Deoptimizer::LAZY;
5671  }
5672
5673  DeoptimizeIf(al, instr, type, zero_reg, Operand(zero_reg),
5674               instr->hydrogen()->reason());
5675}
5676
5677
5678void LCodeGen::DoDummy(LDummy* instr) {
5679  // Nothing to see here, move on!
5680}
5681
5682
5683void LCodeGen::DoDummyUse(LDummyUse* instr) {
5684  // Nothing to see here, move on!
5685}
5686
5687
5688void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5689  PushSafepointRegistersScope scope(this);
5690  LoadContextFromDeferred(instr->context());
5691  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5692  RecordSafepointWithLazyDeopt(
5693      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5694  DCHECK(instr->HasEnvironment());
5695  LEnvironment* env = instr->environment();
5696  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5697}
5698
5699
5700void LCodeGen::DoStackCheck(LStackCheck* instr) {
5701  class DeferredStackCheck FINAL : public LDeferredCode {
5702   public:
5703    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5704        : LDeferredCode(codegen), instr_(instr) { }
5705    virtual void Generate() OVERRIDE {
5706      codegen()->DoDeferredStackCheck(instr_);
5707    }
5708    virtual LInstruction* instr() OVERRIDE { return instr_; }
5709   private:
5710    LStackCheck* instr_;
5711  };
5712
5713  DCHECK(instr->HasEnvironment());
5714  LEnvironment* env = instr->environment();
5715  // There is no LLazyBailout instruction for stack-checks. We have to
5716  // prepare for lazy deoptimization explicitly here.
5717  if (instr->hydrogen()->is_function_entry()) {
5718    // Perform stack overflow check.
5719    Label done;
5720    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5721    __ Branch(&done, hs, sp, Operand(at));
5722    DCHECK(instr->context()->IsRegister());
5723    DCHECK(ToRegister(instr->context()).is(cp));
5724    CallCode(isolate()->builtins()->StackCheck(),
5725             RelocInfo::CODE_TARGET,
5726             instr);
5727    __ bind(&done);
5728  } else {
5729    DCHECK(instr->hydrogen()->is_backwards_branch());
5730    // Perform stack overflow check if this goto needs it before jumping.
5731    DeferredStackCheck* deferred_stack_check =
5732        new(zone()) DeferredStackCheck(this, instr);
5733    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5734    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5735    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5736    __ bind(instr->done_label());
5737    deferred_stack_check->SetExit(instr->done_label());
5738    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5739    // Don't record a deoptimization index for the safepoint here.
5740    // This will be done explicitly when emitting call and the safepoint in
5741    // the deferred code.
5742  }
5743}
5744
5745
5746void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5747  // This is a pseudo-instruction that ensures that the environment here is
5748  // properly registered for deoptimization and records the assembler's PC
5749  // offset.
5750  LEnvironment* environment = instr->environment();
5751
5752  // If the environment were already registered, we would have no way of
5753  // backpatching it with the spill slot operands.
5754  DCHECK(!environment->HasBeenRegistered());
5755  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5756
5757  GenerateOsrPrologue();
5758}
5759
5760
5761void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5762  Register result = ToRegister(instr->result());
5763  Register object = ToRegister(instr->object());
5764  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5765  DeoptimizeIf(eq, instr, object, Operand(at));
5766
5767  Register null_value = t1;
5768  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5769  DeoptimizeIf(eq, instr, object, Operand(null_value));
5770
5771  __ And(at, object, kSmiTagMask);
5772  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
5773
5774  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5775  __ GetObjectType(object, a1, a1);
5776  DeoptimizeIf(le, instr, a1, Operand(LAST_JS_PROXY_TYPE));
5777
5778  Label use_cache, call_runtime;
5779  DCHECK(object.is(a0));
5780  __ CheckEnumCache(null_value, &call_runtime);
5781
5782  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5783  __ Branch(&use_cache);
5784
5785  // Get the set of properties to enumerate.
5786  __ bind(&call_runtime);
5787  __ push(object);
5788  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5789
5790  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5791  DCHECK(result.is(v0));
5792  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5793  DeoptimizeIf(ne, instr, a1, Operand(at));
5794  __ bind(&use_cache);
5795}
5796
5797
5798void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5799  Register map = ToRegister(instr->map());
5800  Register result = ToRegister(instr->result());
5801  Label load_cache, done;
5802  __ EnumLength(result, map);
5803  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5804  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5805  __ jmp(&done);
5806
5807  __ bind(&load_cache);
5808  __ LoadInstanceDescriptors(map, result);
5809  __ lw(result,
5810        FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5811  __ lw(result,
5812        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5813  DeoptimizeIf(eq, instr, result, Operand(zero_reg));
5814
5815  __ bind(&done);
5816}
5817
5818
5819void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5820  Register object = ToRegister(instr->value());
5821  Register map = ToRegister(instr->map());
5822  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5823  DeoptimizeIf(ne, instr, map, Operand(scratch0()));
5824}
5825
5826
5827void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5828                                           Register result,
5829                                           Register object,
5830                                           Register index) {
5831  PushSafepointRegistersScope scope(this);
5832  __ Push(object, index);
5833  __ mov(cp, zero_reg);
5834  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5835  RecordSafepointWithRegisters(
5836     instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5837  __ StoreToSafepointRegisterSlot(v0, result);
5838}
5839
5840
5841void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5842  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5843   public:
5844    DeferredLoadMutableDouble(LCodeGen* codegen,
5845                              LLoadFieldByIndex* instr,
5846                              Register result,
5847                              Register object,
5848                              Register index)
5849        : LDeferredCode(codegen),
5850          instr_(instr),
5851          result_(result),
5852          object_(object),
5853          index_(index) {
5854    }
5855    virtual void Generate() OVERRIDE {
5856      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5857    }
5858    virtual LInstruction* instr() OVERRIDE { return instr_; }
5859   private:
5860    LLoadFieldByIndex* instr_;
5861    Register result_;
5862    Register object_;
5863    Register index_;
5864  };
5865
5866  Register object = ToRegister(instr->object());
5867  Register index = ToRegister(instr->index());
5868  Register result = ToRegister(instr->result());
5869  Register scratch = scratch0();
5870
5871  DeferredLoadMutableDouble* deferred;
5872  deferred = new(zone()) DeferredLoadMutableDouble(
5873      this, instr, result, object, index);
5874
5875  Label out_of_object, done;
5876
5877  __ And(scratch, index, Operand(Smi::FromInt(1)));
5878  __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5879  __ sra(index, index, 1);
5880
5881  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5882  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
5883
5884  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5885  __ Addu(scratch, object, scratch);
5886  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5887
5888  __ Branch(&done);
5889
5890  __ bind(&out_of_object);
5891  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5892  // Index is equal to negated out of object property index plus 1.
5893  __ Subu(scratch, result, scratch);
5894  __ lw(result, FieldMemOperand(scratch,
5895                                FixedArray::kHeaderSize - kPointerSize));
5896  __ bind(deferred->exit());
5897  __ bind(&done);
5898}
5899
5900
5901void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5902  Register context = ToRegister(instr->context());
5903  __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5904}
5905
5906
5907void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5908  Handle<ScopeInfo> scope_info = instr->scope_info();
5909  __ li(at, scope_info);
5910  __ Push(at, ToRegister(instr->function()));
5911  CallRuntime(Runtime::kPushBlockContext, 2, instr);
5912  RecordSafepoint(Safepoint::kNoLazyDeopt);
5913}
5914
5915
5916#undef __
5917
5918} }  // namespace v8::internal
5919