1// Copyright 2012 the V8 project authors. All rights reserved.7
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "src/crankshaft/mips/lithium-codegen-mips.h"
29
30#include "src/base/bits.h"
31#include "src/builtins/builtins-constructor.h"
32#include "src/code-factory.h"
33#include "src/code-stubs.h"
34#include "src/crankshaft/hydrogen-osr.h"
35#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
36#include "src/ic/ic.h"
37#include "src/ic/stub-cache.h"
38
39namespace v8 {
40namespace internal {
41
42
43class SafepointGenerator final : public CallWrapper {
44 public:
45  SafepointGenerator(LCodeGen* codegen,
46                     LPointerMap* pointers,
47                     Safepoint::DeoptMode mode)
48      : codegen_(codegen),
49        pointers_(pointers),
50        deopt_mode_(mode) { }
51  virtual ~SafepointGenerator() {}
52
53  void BeforeCall(int call_size) const override {}
54
55  void AfterCall() const override {
56    codegen_->RecordSafepoint(pointers_, deopt_mode_);
57  }
58
59 private:
60  LCodeGen* codegen_;
61  LPointerMap* pointers_;
62  Safepoint::DeoptMode deopt_mode_;
63};
64
65LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
66    LCodeGen* codegen)
67    : codegen_(codegen) {
68  DCHECK(codegen_->info()->is_calling());
69  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
70  codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
71
72  StoreRegistersStateStub stub(codegen_->isolate());
73  codegen_->masm_->push(ra);
74  codegen_->masm_->CallStub(&stub);
75}
76
77LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
78  DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
79  RestoreRegistersStateStub stub(codegen_->isolate());
80  codegen_->masm_->push(ra);
81  codegen_->masm_->CallStub(&stub);
82  codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
83}
84
85#define __ masm()->
86
87bool LCodeGen::GenerateCode() {
88  LPhase phase("Z_Code generation", chunk());
89  DCHECK(is_unused());
90  status_ = GENERATING;
91
92  // Open a frame scope to indicate that there is a frame on the stack.  The
93  // NONE indicates that the scope shouldn't actually generate code to set up
94  // the frame (that is done in GeneratePrologue).
95  FrameScope frame_scope(masm_, StackFrame::NONE);
96
97  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
98         GenerateJumpTable() && GenerateSafepointTable();
99}
100
101
102void LCodeGen::FinishCode(Handle<Code> code) {
103  DCHECK(is_done());
104  code->set_stack_slots(GetTotalFrameSlotCount());
105  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
106  PopulateDeoptimizationData(code);
107}
108
109
110void LCodeGen::SaveCallerDoubles() {
111  DCHECK(info()->saves_caller_doubles());
112  DCHECK(NeedsEagerFrame());
113  Comment(";;; Save clobbered callee double registers");
114  int count = 0;
115  BitVector* doubles = chunk()->allocated_double_registers();
116  BitVector::Iterator save_iterator(doubles);
117  while (!save_iterator.Done()) {
118    __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
119            MemOperand(sp, count * kDoubleSize));
120    save_iterator.Advance();
121    count++;
122  }
123}
124
125
126void LCodeGen::RestoreCallerDoubles() {
127  DCHECK(info()->saves_caller_doubles());
128  DCHECK(NeedsEagerFrame());
129  Comment(";;; Restore clobbered callee double registers");
130  BitVector* doubles = chunk()->allocated_double_registers();
131  BitVector::Iterator save_iterator(doubles);
132  int count = 0;
133  while (!save_iterator.Done()) {
134    __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
135            MemOperand(sp, count * kDoubleSize));
136    save_iterator.Advance();
137    count++;
138  }
139}
140
141
142bool LCodeGen::GeneratePrologue() {
143  DCHECK(is_generating());
144
145  if (info()->IsOptimizing()) {
146    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
147
148    // a1: Callee's JS function.
149    // cp: Callee's context.
150    // fp: Caller's frame pointer.
151    // lr: Caller's pc.
152  }
153
154  info()->set_prologue_offset(masm_->pc_offset());
155  if (NeedsEagerFrame()) {
156    if (info()->IsStub()) {
157      __ StubPrologue(StackFrame::STUB);
158    } else {
159      __ Prologue(info()->GeneratePreagedPrologue());
160    }
161    frame_is_built_ = true;
162  }
163
164  // Reserve space for the stack slots needed by the code.
165  int slots = GetStackSlotCount();
166  if (slots > 0) {
167    if (FLAG_debug_code) {
168      __ Subu(sp,  sp, Operand(slots * kPointerSize));
169      __ Push(a0, a1);
170      __ Addu(a0, sp, Operand(slots *  kPointerSize));
171      __ li(a1, Operand(kSlotsZapValue));
172      Label loop;
173      __ bind(&loop);
174      __ Subu(a0, a0, Operand(kPointerSize));
175      __ sw(a1, MemOperand(a0, 2 * kPointerSize));
176      __ Branch(&loop, ne, a0, Operand(sp));
177      __ Pop(a0, a1);
178    } else {
179      __ Subu(sp, sp, Operand(slots * kPointerSize));
180    }
181  }
182
183  if (info()->saves_caller_doubles()) {
184    SaveCallerDoubles();
185  }
186  return !is_aborted();
187}
188
189
190void LCodeGen::DoPrologue(LPrologue* instr) {
191  Comment(";;; Prologue begin");
192
193  // Possibly allocate a local context.
194  if (info()->scope()->NeedsContext()) {
195    Comment(";;; Allocate local context");
196    bool need_write_barrier = true;
197    // Argument to NewContext is the function, which is in a1.
198    int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
199    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
200    if (info()->scope()->is_script_scope()) {
201      __ push(a1);
202      __ Push(info()->scope()->scope_info());
203      __ CallRuntime(Runtime::kNewScriptContext);
204      deopt_mode = Safepoint::kLazyDeopt;
205    } else {
206      if (slots <=
207          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
208        Callable callable = CodeFactory::FastNewFunctionContext(
209            isolate(), info()->scope()->scope_type());
210        __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
211              Operand(slots));
212        __ Call(callable.code(), RelocInfo::CODE_TARGET);
213        // Result of the FastNewFunctionContext builtin is always in new space.
214        need_write_barrier = false;
215      } else {
216        __ push(a1);
217        __ Push(Smi::FromInt(info()->scope()->scope_type()));
218        __ CallRuntime(Runtime::kNewFunctionContext);
219      }
220    }
221    RecordSafepoint(deopt_mode);
222
223    // Context is returned in both v0. It replaces the context passed to us.
224    // It's saved in the stack and kept live in cp.
225    __ mov(cp, v0);
226    __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
227    // Copy any necessary parameters into the context.
228    int num_parameters = info()->scope()->num_parameters();
229    int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
230    for (int i = first_parameter; i < num_parameters; i++) {
231      Variable* var = (i == -1) ? info()->scope()->receiver()
232                                : info()->scope()->parameter(i);
233      if (var->IsContextSlot()) {
234        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
235            (num_parameters - 1 - i) * kPointerSize;
236        // Load parameter from stack.
237        __ lw(a0, MemOperand(fp, parameter_offset));
238        // Store it in the context.
239        MemOperand target = ContextMemOperand(cp, var->index());
240        __ sw(a0, target);
241        // Update the write barrier. This clobbers a3 and a0.
242        if (need_write_barrier) {
243          __ RecordWriteContextSlot(
244              cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
245        } else if (FLAG_debug_code) {
246          Label done;
247          __ JumpIfInNewSpace(cp, a0, &done);
248          __ Abort(kExpectedNewSpaceObject);
249          __ bind(&done);
250        }
251      }
252    }
253    Comment(";;; End allocate local context");
254  }
255
256  Comment(";;; Prologue end");
257}
258
259
260void LCodeGen::GenerateOsrPrologue() {
261  // Generate the OSR entry prologue at the first unknown OSR value, or if there
262  // are none, at the OSR entrypoint instruction.
263  if (osr_pc_offset_ >= 0) return;
264
265  osr_pc_offset_ = masm()->pc_offset();
266
267  // Adjust the frame size, subsuming the unoptimized frame into the
268  // optimized frame.
269  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
270  DCHECK(slots >= 0);
271  __ Subu(sp, sp, Operand(slots * kPointerSize));
272}
273
274
275void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
276  if (instr->IsCall()) {
277    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
278  }
279  if (!instr->IsLazyBailout() && !instr->IsGap()) {
280    safepoints_.BumpLastLazySafepointIndex();
281  }
282}
283
284
285bool LCodeGen::GenerateDeferredCode() {
286  DCHECK(is_generating());
287  if (deferred_.length() > 0) {
288    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
289      LDeferredCode* code = deferred_[i];
290
291      HValue* value =
292          instructions_->at(code->instruction_index())->hydrogen_value();
293      RecordAndWritePosition(value->position());
294
295      Comment(";;; <@%d,#%d> "
296              "-------------------- Deferred %s --------------------",
297              code->instruction_index(),
298              code->instr()->hydrogen_value()->id(),
299              code->instr()->Mnemonic());
300      __ bind(code->entry());
301      if (NeedsDeferredFrame()) {
302        Comment(";;; Build frame");
303        DCHECK(!frame_is_built_);
304        DCHECK(info()->IsStub());
305        frame_is_built_ = true;
306        __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
307        __ PushCommonFrame(scratch0());
308        Comment(";;; Deferred code");
309      }
310      code->Generate();
311      if (NeedsDeferredFrame()) {
312        Comment(";;; Destroy frame");
313        DCHECK(frame_is_built_);
314        __ PopCommonFrame(scratch0());
315        frame_is_built_ = false;
316      }
317      __ jmp(code->exit());
318    }
319  }
320  // Deferred code is the last part of the instruction sequence. Mark
321  // the generated code as done unless we bailed out.
322  if (!is_aborted()) status_ = DONE;
323  return !is_aborted();
324}
325
326
327bool LCodeGen::GenerateJumpTable() {
328  if (jump_table_.length() > 0) {
329    Label needs_frame, call_deopt_entry;
330
331    Comment(";;; -------------------- Jump table --------------------");
332    Address base = jump_table_[0].address;
333
334    Register entry_offset = t9;
335
336    int length = jump_table_.length();
337    for (int i = 0; i < length; i++) {
338      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
339      __ bind(&table_entry->label);
340
341      DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
342      Address entry = table_entry->address;
343      DeoptComment(table_entry->deopt_info);
344
345      // Second-level deopt table entries are contiguous and small, so instead
346      // of loading the full, absolute address of each one, load an immediate
347      // offset which will be added to the base address later.
348      __ li(entry_offset, Operand(entry - base));
349
350      if (table_entry->needs_frame) {
351        DCHECK(!info()->saves_caller_doubles());
352        Comment(";;; call deopt with frame");
353        __ PushCommonFrame();
354        __ Call(&needs_frame);
355      } else {
356        __ Call(&call_deopt_entry);
357      }
358    }
359
360    if (needs_frame.is_linked()) {
361      __ bind(&needs_frame);
362      // This variant of deopt can only be used with stubs. Since we don't
363      // have a function pointer to install in the stack frame that we're
364      // building, install a special marker there instead.
365      __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
366      __ push(at);
367      DCHECK(info()->IsStub());
368    }
369
370    Comment(";;; call deopt");
371    __ bind(&call_deopt_entry);
372
373    if (info()->saves_caller_doubles()) {
374      DCHECK(info()->IsStub());
375      RestoreCallerDoubles();
376    }
377
378    // Add the base address to the offset previously loaded in entry_offset.
379    __ Addu(entry_offset, entry_offset,
380            Operand(ExternalReference::ForDeoptEntry(base)));
381    __ Jump(entry_offset);
382  }
383  __ RecordComment("]");
384
385  // The deoptimization jump table is the last part of the instruction
386  // sequence. Mark the generated code as done unless we bailed out.
387  if (!is_aborted()) status_ = DONE;
388  return !is_aborted();
389}
390
391
392bool LCodeGen::GenerateSafepointTable() {
393  DCHECK(is_done());
394  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
395  return !is_aborted();
396}
397
398
399Register LCodeGen::ToRegister(int index) const {
400  return Register::from_code(index);
401}
402
403
404DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
405  return DoubleRegister::from_code(index);
406}
407
408
409Register LCodeGen::ToRegister(LOperand* op) const {
410  DCHECK(op->IsRegister());
411  return ToRegister(op->index());
412}
413
414
415Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
416  if (op->IsRegister()) {
417    return ToRegister(op->index());
418  } else if (op->IsConstantOperand()) {
419    LConstantOperand* const_op = LConstantOperand::cast(op);
420    HConstant* constant = chunk_->LookupConstant(const_op);
421    Handle<Object> literal = constant->handle(isolate());
422    Representation r = chunk_->LookupLiteralRepresentation(const_op);
423    if (r.IsInteger32()) {
424      AllowDeferredHandleDereference get_number;
425      DCHECK(literal->IsNumber());
426      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
427    } else if (r.IsSmi()) {
428      DCHECK(constant->HasSmiValue());
429      __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
430    } else if (r.IsDouble()) {
431      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
432    } else {
433      DCHECK(r.IsSmiOrTagged());
434      __ li(scratch, literal);
435    }
436    return scratch;
437  } else if (op->IsStackSlot()) {
438    __ lw(scratch, ToMemOperand(op));
439    return scratch;
440  }
441  UNREACHABLE();
442  return scratch;
443}
444
445
446DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
447  DCHECK(op->IsDoubleRegister());
448  return ToDoubleRegister(op->index());
449}
450
451
452DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
453                                                FloatRegister flt_scratch,
454                                                DoubleRegister dbl_scratch) {
455  if (op->IsDoubleRegister()) {
456    return ToDoubleRegister(op->index());
457  } else if (op->IsConstantOperand()) {
458    LConstantOperand* const_op = LConstantOperand::cast(op);
459    HConstant* constant = chunk_->LookupConstant(const_op);
460    Handle<Object> literal = constant->handle(isolate());
461    Representation r = chunk_->LookupLiteralRepresentation(const_op);
462    if (r.IsInteger32()) {
463      DCHECK(literal->IsNumber());
464      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
465      __ mtc1(at, flt_scratch);
466      __ cvt_d_w(dbl_scratch, flt_scratch);
467      return dbl_scratch;
468    } else if (r.IsDouble()) {
469      Abort(kUnsupportedDoubleImmediate);
470    } else if (r.IsTagged()) {
471      Abort(kUnsupportedTaggedImmediate);
472    }
473  } else if (op->IsStackSlot()) {
474    MemOperand mem_op = ToMemOperand(op);
475    __ ldc1(dbl_scratch, mem_op);
476    return dbl_scratch;
477  }
478  UNREACHABLE();
479  return dbl_scratch;
480}
481
482
483Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
484  HConstant* constant = chunk_->LookupConstant(op);
485  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
486  return constant->handle(isolate());
487}
488
489
490bool LCodeGen::IsInteger32(LConstantOperand* op) const {
491  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
492}
493
494
495bool LCodeGen::IsSmi(LConstantOperand* op) const {
496  return chunk_->LookupLiteralRepresentation(op).IsSmi();
497}
498
499
500int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
501  return ToRepresentation(op, Representation::Integer32());
502}
503
504
505int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
506                                   const Representation& r) const {
507  HConstant* constant = chunk_->LookupConstant(op);
508  int32_t value = constant->Integer32Value();
509  if (r.IsInteger32()) return value;
510  DCHECK(r.IsSmiOrTagged());
511  return reinterpret_cast<int32_t>(Smi::FromInt(value));
512}
513
514
515Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
516  HConstant* constant = chunk_->LookupConstant(op);
517  return Smi::FromInt(constant->Integer32Value());
518}
519
520
521double LCodeGen::ToDouble(LConstantOperand* op) const {
522  HConstant* constant = chunk_->LookupConstant(op);
523  DCHECK(constant->HasDoubleValue());
524  return constant->DoubleValue();
525}
526
527
528Operand LCodeGen::ToOperand(LOperand* op) {
529  if (op->IsConstantOperand()) {
530    LConstantOperand* const_op = LConstantOperand::cast(op);
531    HConstant* constant = chunk()->LookupConstant(const_op);
532    Representation r = chunk_->LookupLiteralRepresentation(const_op);
533    if (r.IsSmi()) {
534      DCHECK(constant->HasSmiValue());
535      return Operand(Smi::FromInt(constant->Integer32Value()));
536    } else if (r.IsInteger32()) {
537      DCHECK(constant->HasInteger32Value());
538      return Operand(constant->Integer32Value());
539    } else if (r.IsDouble()) {
540      Abort(kToOperandUnsupportedDoubleImmediate);
541    }
542    DCHECK(r.IsTagged());
543    return Operand(constant->handle(isolate()));
544  } else if (op->IsRegister()) {
545    return Operand(ToRegister(op));
546  } else if (op->IsDoubleRegister()) {
547    Abort(kToOperandIsDoubleRegisterUnimplemented);
548    return Operand(0);
549  }
550  // Stack slots not implemented, use ToMemOperand instead.
551  UNREACHABLE();
552  return Operand(0);
553}
554
555
556static int ArgumentsOffsetWithoutFrame(int index) {
557  DCHECK(index < 0);
558  return -(index + 1) * kPointerSize;
559}
560
561
562MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
563  DCHECK(!op->IsRegister());
564  DCHECK(!op->IsDoubleRegister());
565  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
566  if (NeedsEagerFrame()) {
567    return MemOperand(fp, FrameSlotToFPOffset(op->index()));
568  } else {
569    // Retrieve parameter without eager stack-frame relative to the
570    // stack-pointer.
571    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
572  }
573}
574
575
576MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
577  DCHECK(op->IsDoubleStackSlot());
578  if (NeedsEagerFrame()) {
579    return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
580  } else {
581    // Retrieve parameter without eager stack-frame relative to the
582    // stack-pointer.
583    return MemOperand(
584        sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
585  }
586}
587
588
589void LCodeGen::WriteTranslation(LEnvironment* environment,
590                                Translation* translation) {
591  if (environment == NULL) return;
592
593  // The translation includes one command per value in the environment.
594  int translation_size = environment->translation_size();
595
596  WriteTranslation(environment->outer(), translation);
597  WriteTranslationFrame(environment, translation);
598
599  int object_index = 0;
600  int dematerialized_index = 0;
601  for (int i = 0; i < translation_size; ++i) {
602    LOperand* value = environment->values()->at(i);
603    AddToTranslation(
604        environment, translation, value, environment->HasTaggedValueAt(i),
605        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
606  }
607}
608
609
610void LCodeGen::AddToTranslation(LEnvironment* environment,
611                                Translation* translation,
612                                LOperand* op,
613                                bool is_tagged,
614                                bool is_uint32,
615                                int* object_index_pointer,
616                                int* dematerialized_index_pointer) {
617  if (op == LEnvironment::materialization_marker()) {
618    int object_index = (*object_index_pointer)++;
619    if (environment->ObjectIsDuplicateAt(object_index)) {
620      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
621      translation->DuplicateObject(dupe_of);
622      return;
623    }
624    int object_length = environment->ObjectLengthAt(object_index);
625    if (environment->ObjectIsArgumentsAt(object_index)) {
626      translation->BeginArgumentsObject(object_length);
627    } else {
628      translation->BeginCapturedObject(object_length);
629    }
630    int dematerialized_index = *dematerialized_index_pointer;
631    int env_offset = environment->translation_size() + dematerialized_index;
632    *dematerialized_index_pointer += object_length;
633    for (int i = 0; i < object_length; ++i) {
634      LOperand* value = environment->values()->at(env_offset + i);
635      AddToTranslation(environment,
636                       translation,
637                       value,
638                       environment->HasTaggedValueAt(env_offset + i),
639                       environment->HasUint32ValueAt(env_offset + i),
640                       object_index_pointer,
641                       dematerialized_index_pointer);
642    }
643    return;
644  }
645
646  if (op->IsStackSlot()) {
647    int index = op->index();
648    if (is_tagged) {
649      translation->StoreStackSlot(index);
650    } else if (is_uint32) {
651      translation->StoreUint32StackSlot(index);
652    } else {
653      translation->StoreInt32StackSlot(index);
654    }
655  } else if (op->IsDoubleStackSlot()) {
656    int index = op->index();
657    translation->StoreDoubleStackSlot(index);
658  } else if (op->IsRegister()) {
659    Register reg = ToRegister(op);
660    if (is_tagged) {
661      translation->StoreRegister(reg);
662    } else if (is_uint32) {
663      translation->StoreUint32Register(reg);
664    } else {
665      translation->StoreInt32Register(reg);
666    }
667  } else if (op->IsDoubleRegister()) {
668    DoubleRegister reg = ToDoubleRegister(op);
669    translation->StoreDoubleRegister(reg);
670  } else if (op->IsConstantOperand()) {
671    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
672    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
673    translation->StoreLiteral(src_index);
674  } else {
675    UNREACHABLE();
676  }
677}
678
679
680void LCodeGen::CallCode(Handle<Code> code,
681                        RelocInfo::Mode mode,
682                        LInstruction* instr) {
683  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
684}
685
686
687void LCodeGen::CallCodeGeneric(Handle<Code> code,
688                               RelocInfo::Mode mode,
689                               LInstruction* instr,
690                               SafepointMode safepoint_mode) {
691  DCHECK(instr != NULL);
692  __ Call(code, mode);
693  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
694}
695
696
697void LCodeGen::CallRuntime(const Runtime::Function* function,
698                           int num_arguments,
699                           LInstruction* instr,
700                           SaveFPRegsMode save_doubles) {
701  DCHECK(instr != NULL);
702
703  __ CallRuntime(function, num_arguments, save_doubles);
704
705  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
706}
707
708
709void LCodeGen::LoadContextFromDeferred(LOperand* context) {
710  if (context->IsRegister()) {
711    __ Move(cp, ToRegister(context));
712  } else if (context->IsStackSlot()) {
713    __ lw(cp, ToMemOperand(context));
714  } else if (context->IsConstantOperand()) {
715    HConstant* constant =
716        chunk_->LookupConstant(LConstantOperand::cast(context));
717    __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
718  } else {
719    UNREACHABLE();
720  }
721}
722
723
724void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
725                                       int argc,
726                                       LInstruction* instr,
727                                       LOperand* context) {
728  LoadContextFromDeferred(context);
729  __ CallRuntimeSaveDoubles(id);
730  RecordSafepointWithRegisters(
731      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
732}
733
734
735void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
736                                                    Safepoint::DeoptMode mode) {
737  environment->set_has_been_used();
738  if (!environment->HasBeenRegistered()) {
739    // Physical stack frame layout:
740    // -x ............. -4  0 ..................................... y
741    // [incoming arguments] [spill slots] [pushed outgoing arguments]
742
743    // Layout of the environment:
744    // 0 ..................................................... size-1
745    // [parameters] [locals] [expression stack including arguments]
746
747    // Layout of the translation:
748    // 0 ........................................................ size - 1 + 4
749    // [expression stack including arguments] [locals] [4 words] [parameters]
750    // |>------------  translation_size ------------<|
751
752    int frame_count = 0;
753    int jsframe_count = 0;
754    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
755      ++frame_count;
756      if (e->frame_type() == JS_FUNCTION) {
757        ++jsframe_count;
758      }
759    }
760    Translation translation(&translations_, frame_count, jsframe_count, zone());
761    WriteTranslation(environment, &translation);
762    int deoptimization_index = deoptimizations_.length();
763    int pc_offset = masm()->pc_offset();
764    environment->Register(deoptimization_index,
765                          translation.index(),
766                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
767    deoptimizations_.Add(environment, zone());
768  }
769}
770
771void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
772                            DeoptimizeReason deopt_reason,
773                            Deoptimizer::BailoutType bailout_type,
774                            Register src1, const Operand& src2) {
775  LEnvironment* environment = instr->environment();
776  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
777  DCHECK(environment->HasBeenRegistered());
778  int id = environment->deoptimization_index();
779  Address entry =
780      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
781  if (entry == NULL) {
782    Abort(kBailoutWasNotPrepared);
783    return;
784  }
785
786  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
787    Register scratch = scratch0();
788    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
789    Label no_deopt;
790    __ Push(a1, scratch);
791    __ li(scratch, Operand(count));
792    __ lw(a1, MemOperand(scratch));
793    __ Subu(a1, a1, Operand(1));
794    __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
795    __ li(a1, Operand(FLAG_deopt_every_n_times));
796    __ sw(a1, MemOperand(scratch));
797    __ Pop(a1, scratch);
798
799    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
800    __ bind(&no_deopt);
801    __ sw(a1, MemOperand(scratch));
802    __ Pop(a1, scratch);
803  }
804
805  if (info()->ShouldTrapOnDeopt()) {
806    Label skip;
807    if (condition != al) {
808      __ Branch(&skip, NegateCondition(condition), src1, src2);
809    }
810    __ stop("trap_on_deopt");
811    __ bind(&skip);
812  }
813
814  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
815
816  DCHECK(info()->IsStub() || frame_is_built_);
817  // Go through jump table if we need to handle condition, build frame, or
818  // restore caller doubles.
819  if (condition == al && frame_is_built_ &&
820      !info()->saves_caller_doubles()) {
821    DeoptComment(deopt_info);
822    __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
823  } else {
824    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
825                                            !frame_is_built_);
826    // We often have several deopts to the same entry, reuse the last
827    // jump entry if this is the case.
828    if (FLAG_trace_deopt || isolate()->is_profiling() ||
829        jump_table_.is_empty() ||
830        !table_entry.IsEquivalentTo(jump_table_.last())) {
831      jump_table_.Add(table_entry, zone());
832    }
833    __ Branch(&jump_table_.last().label, condition, src1, src2);
834  }
835}
836
837void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
838                            DeoptimizeReason deopt_reason, Register src1,
839                            const Operand& src2) {
840  Deoptimizer::BailoutType bailout_type = info()->IsStub()
841      ? Deoptimizer::LAZY
842      : Deoptimizer::EAGER;
843  DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
844}
845
846
847void LCodeGen::RecordSafepointWithLazyDeopt(
848    LInstruction* instr, SafepointMode safepoint_mode) {
849  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
850    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
851  } else {
852    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
853    RecordSafepointWithRegisters(
854        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
855  }
856}
857
858
859void LCodeGen::RecordSafepoint(
860    LPointerMap* pointers,
861    Safepoint::Kind kind,
862    int arguments,
863    Safepoint::DeoptMode deopt_mode) {
864  DCHECK(expected_safepoint_kind_ == kind);
865
866  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
867  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
868      kind, arguments, deopt_mode);
869  for (int i = 0; i < operands->length(); i++) {
870    LOperand* pointer = operands->at(i);
871    if (pointer->IsStackSlot()) {
872      safepoint.DefinePointerSlot(pointer->index(), zone());
873    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
874      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
875    }
876  }
877}
878
879
880void LCodeGen::RecordSafepoint(LPointerMap* pointers,
881                               Safepoint::DeoptMode deopt_mode) {
882  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
883}
884
885
886void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
887  LPointerMap empty_pointers(zone());
888  RecordSafepoint(&empty_pointers, deopt_mode);
889}
890
891
892void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
893                                            int arguments,
894                                            Safepoint::DeoptMode deopt_mode) {
895  RecordSafepoint(
896      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
897}
898
899
900static const char* LabelType(LLabel* label) {
901  if (label->is_loop_header()) return " (loop header)";
902  if (label->is_osr_entry()) return " (OSR entry)";
903  return "";
904}
905
906
907void LCodeGen::DoLabel(LLabel* label) {
908  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
909          current_instruction_,
910          label->hydrogen_value()->id(),
911          label->block_id(),
912          LabelType(label));
913  __ bind(label->label());
914  current_block_ = label->block_id();
915  DoGap(label);
916}
917
918
919void LCodeGen::DoParallelMove(LParallelMove* move) {
920  resolver_.Resolve(move);
921}
922
923
924void LCodeGen::DoGap(LGap* gap) {
925  for (int i = LGap::FIRST_INNER_POSITION;
926       i <= LGap::LAST_INNER_POSITION;
927       i++) {
928    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
929    LParallelMove* move = gap->GetParallelMove(inner_pos);
930    if (move != NULL) DoParallelMove(move);
931  }
932}
933
934
935void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
936  DoGap(instr);
937}
938
939
940void LCodeGen::DoParameter(LParameter* instr) {
941  // Nothing to do.
942}
943
944
945void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
946  GenerateOsrPrologue();
947}
948
949
950void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
951  Register dividend = ToRegister(instr->dividend());
952  int32_t divisor = instr->divisor();
953  DCHECK(dividend.is(ToRegister(instr->result())));
954
955  // Theoretically, a variation of the branch-free code for integer division by
956  // a power of 2 (calculating the remainder via an additional multiplication
957  // (which gets simplified to an 'and') and subtraction) should be faster, and
958  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
959  // indicate that positive dividends are heavily favored, so the branching
960  // version performs better.
961  HMod* hmod = instr->hydrogen();
962  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
963  Label dividend_is_not_negative, done;
964
965  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
966    __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
967    // Note: The code below even works when right contains kMinInt.
968    __ subu(dividend, zero_reg, dividend);
969    __ And(dividend, dividend, Operand(mask));
970    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
971      DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
972                   Operand(zero_reg));
973    }
974    __ Branch(USE_DELAY_SLOT, &done);
975    __ subu(dividend, zero_reg, dividend);
976  }
977
978  __ bind(&dividend_is_not_negative);
979  __ And(dividend, dividend, Operand(mask));
980  __ bind(&done);
981}
982
983
984void LCodeGen::DoModByConstI(LModByConstI* instr) {
985  Register dividend = ToRegister(instr->dividend());
986  int32_t divisor = instr->divisor();
987  Register result = ToRegister(instr->result());
988  DCHECK(!dividend.is(result));
989
990  if (divisor == 0) {
991    DeoptimizeIf(al, instr);
992    return;
993  }
994
995  __ TruncatingDiv(result, dividend, Abs(divisor));
996  __ Mul(result, result, Operand(Abs(divisor)));
997  __ Subu(result, dividend, Operand(result));
998
999  // Check for negative zero.
1000  HMod* hmod = instr->hydrogen();
1001  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1002    Label remainder_not_zero;
1003    __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1004    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
1005                 Operand(zero_reg));
1006    __ bind(&remainder_not_zero);
1007  }
1008}
1009
1010
1011void LCodeGen::DoModI(LModI* instr) {
1012  HMod* hmod = instr->hydrogen();
1013  const Register left_reg = ToRegister(instr->left());
1014  const Register right_reg = ToRegister(instr->right());
1015  const Register result_reg = ToRegister(instr->result());
1016
1017  // div runs in the background while we check for special cases.
1018  __ Mod(result_reg, left_reg, right_reg);
1019
1020  Label done;
1021  // Check for x % 0, we have to deopt in this case because we can't return a
1022  // NaN.
1023  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1024    DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
1025                 Operand(zero_reg));
1026  }
1027
1028  // Check for kMinInt % -1, div will return kMinInt, which is not what we
1029  // want. We have to deopt if we care about -0, because we can't return that.
1030  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1031    Label no_overflow_possible;
1032    __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1033    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1034      DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
1035                   Operand(-1));
1036    } else {
1037      __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1038      __ Branch(USE_DELAY_SLOT, &done);
1039      __ mov(result_reg, zero_reg);
1040    }
1041    __ bind(&no_overflow_possible);
1042  }
1043
1044  // If we care about -0, test if the dividend is <0 and the result is 0.
1045  __ Branch(&done, ge, left_reg, Operand(zero_reg));
1046  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1047    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
1048                 Operand(zero_reg));
1049  }
1050  __ bind(&done);
1051}
1052
1053
1054void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1055  Register dividend = ToRegister(instr->dividend());
1056  int32_t divisor = instr->divisor();
1057  Register result = ToRegister(instr->result());
1058  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1059  DCHECK(!result.is(dividend));
1060
1061  // Check for (0 / -x) that will produce negative zero.
1062  HDiv* hdiv = instr->hydrogen();
1063  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1064    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1065                 Operand(zero_reg));
1066  }
1067  // Check for (kMinInt / -1).
1068  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1069    DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
1070                 Operand(kMinInt));
1071  }
1072  // Deoptimize if remainder will not be 0.
1073  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1074      divisor != 1 && divisor != -1) {
1075    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1076    __ And(at, dividend, Operand(mask));
1077    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
1078                 Operand(zero_reg));
1079  }
1080
1081  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
1082    __ Subu(result, zero_reg, dividend);
1083    return;
1084  }
1085  uint16_t shift = WhichPowerOf2Abs(divisor);
1086  if (shift == 0) {
1087    __ Move(result, dividend);
1088  } else if (shift == 1) {
1089    __ srl(result, dividend, 31);
1090    __ Addu(result, dividend, Operand(result));
1091  } else {
1092    __ sra(result, dividend, 31);
1093    __ srl(result, result, 32 - shift);
1094    __ Addu(result, dividend, Operand(result));
1095  }
1096  if (shift > 0) __ sra(result, result, shift);
1097  if (divisor < 0) __ Subu(result, zero_reg, result);
1098}
1099
1100
1101void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1102  Register dividend = ToRegister(instr->dividend());
1103  int32_t divisor = instr->divisor();
1104  Register result = ToRegister(instr->result());
1105  DCHECK(!dividend.is(result));
1106
1107  if (divisor == 0) {
1108    DeoptimizeIf(al, instr);
1109    return;
1110  }
1111
1112  // Check for (0 / -x) that will produce negative zero.
1113  HDiv* hdiv = instr->hydrogen();
1114  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1115    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1116                 Operand(zero_reg));
1117  }
1118
1119  __ TruncatingDiv(result, dividend, Abs(divisor));
1120  if (divisor < 0) __ Subu(result, zero_reg, result);
1121
1122  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1123    __ Mul(scratch0(), result, Operand(divisor));
1124    __ Subu(scratch0(), scratch0(), dividend);
1125    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
1126                 Operand(zero_reg));
1127  }
1128}
1129
1130
1131// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1132void LCodeGen::DoDivI(LDivI* instr) {
1133  HBinaryOperation* hdiv = instr->hydrogen();
1134  Register dividend = ToRegister(instr->dividend());
1135  Register divisor = ToRegister(instr->divisor());
1136  const Register result = ToRegister(instr->result());
1137  Register remainder = ToRegister(instr->temp());
1138
1139  // On MIPS div is asynchronous - it will run in the background while we
1140  // check for special cases.
1141  __ Div(remainder, result, dividend, divisor);
1142
1143  // Check for x / 0.
1144  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1145    DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
1146                 Operand(zero_reg));
1147  }
1148
1149  // Check for (0 / -x) that will produce negative zero.
1150  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1151    Label left_not_zero;
1152    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1153    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
1154                 Operand(zero_reg));
1155    __ bind(&left_not_zero);
1156  }
1157
1158  // Check for (kMinInt / -1).
1159  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1160      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1161    Label left_not_min_int;
1162    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1163    DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
1164    __ bind(&left_not_min_int);
1165  }
1166
1167  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1168    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
1169                 Operand(zero_reg));
1170  }
1171}
1172
1173
1174void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1175  DoubleRegister addend = ToDoubleRegister(instr->addend());
1176  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1177  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1178
1179  // This is computed in-place.
1180  DCHECK(addend.is(ToDoubleRegister(instr->result())));
1181
1182  __ madd_d(addend, addend, multiplier, multiplicand);
1183}
1184
1185
1186void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1187  Register dividend = ToRegister(instr->dividend());
1188  Register result = ToRegister(instr->result());
1189  int32_t divisor = instr->divisor();
1190  Register scratch = result.is(dividend) ? scratch0() : dividend;
1191  DCHECK(!result.is(dividend) || !scratch.is(dividend));
1192
1193  // If the divisor is 1, return the dividend.
1194  if (divisor == 1) {
1195    __ Move(result, dividend);
1196    return;
1197  }
1198
1199  // If the divisor is positive, things are easy: There can be no deopts and we
1200  // can simply do an arithmetic right shift.
1201  uint16_t shift = WhichPowerOf2Abs(divisor);
1202  if (divisor > 1) {
1203    __ sra(result, dividend, shift);
1204    return;
1205  }
1206
1207  // If the divisor is negative, we have to negate and handle edge cases.
1208
1209  // dividend can be the same register as result so save the value of it
1210  // for checking overflow.
1211  __ Move(scratch, dividend);
1212
1213  __ Subu(result, zero_reg, dividend);
1214  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1215    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
1216                 Operand(zero_reg));
1217  }
1218
1219  // Dividing by -1 is basically negation, unless we overflow.
1220  __ Xor(scratch, scratch, result);
1221  if (divisor == -1) {
1222    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1223      DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
1224                   Operand(zero_reg));
1225    }
1226    return;
1227  }
1228
1229  // If the negation could not overflow, simply shifting is OK.
1230  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1231    __ sra(result, result, shift);
1232    return;
1233  }
1234
1235  Label no_overflow, done;
1236  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1237  __ li(result, Operand(kMinInt / divisor));
1238  __ Branch(&done);
1239  __ bind(&no_overflow);
1240  __ sra(result, result, shift);
1241  __ bind(&done);
1242}
1243
1244
1245void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1246  Register dividend = ToRegister(instr->dividend());
1247  int32_t divisor = instr->divisor();
1248  Register result = ToRegister(instr->result());
1249  DCHECK(!dividend.is(result));
1250
1251  if (divisor == 0) {
1252    DeoptimizeIf(al, instr);
1253    return;
1254  }
1255
1256  // Check for (0 / -x) that will produce negative zero.
1257  HMathFloorOfDiv* hdiv = instr->hydrogen();
1258  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1259    DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1260                 Operand(zero_reg));
1261  }
1262
1263  // Easy case: We need no dynamic check for the dividend and the flooring
1264  // division is the same as the truncating division.
1265  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1266      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1267    __ TruncatingDiv(result, dividend, Abs(divisor));
1268    if (divisor < 0) __ Subu(result, zero_reg, result);
1269    return;
1270  }
1271
1272  // In the general case we may need to adjust before and after the truncating
1273  // division to get a flooring division.
1274  Register temp = ToRegister(instr->temp());
1275  DCHECK(!temp.is(dividend) && !temp.is(result));
1276  Label needs_adjustment, done;
1277  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1278            dividend, Operand(zero_reg));
1279  __ TruncatingDiv(result, dividend, Abs(divisor));
1280  if (divisor < 0) __ Subu(result, zero_reg, result);
1281  __ jmp(&done);
1282  __ bind(&needs_adjustment);
1283  __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1284  __ TruncatingDiv(result, temp, Abs(divisor));
1285  if (divisor < 0) __ Subu(result, zero_reg, result);
1286  __ Subu(result, result, Operand(1));
1287  __ bind(&done);
1288}
1289
1290
1291// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1292void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1293  HBinaryOperation* hdiv = instr->hydrogen();
1294  Register dividend = ToRegister(instr->dividend());
1295  Register divisor = ToRegister(instr->divisor());
1296  const Register result = ToRegister(instr->result());
1297  Register remainder = scratch0();
1298  // On MIPS div is asynchronous - it will run in the background while we
1299  // check for special cases.
1300  __ Div(remainder, result, dividend, divisor);
1301
1302  // Check for x / 0.
1303  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1304    DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
1305                 Operand(zero_reg));
1306  }
1307
1308  // Check for (0 / -x) that will produce negative zero.
1309  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1310    Label left_not_zero;
1311    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1312    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
1313                 Operand(zero_reg));
1314    __ bind(&left_not_zero);
1315  }
1316
1317  // Check for (kMinInt / -1).
1318  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1319      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1320    Label left_not_min_int;
1321    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1322    DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
1323    __ bind(&left_not_min_int);
1324  }
1325
1326  // We performed a truncating division. Correct the result if necessary.
1327  Label done;
1328  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1329  __ Xor(remainder, remainder, Operand(divisor));
1330  __ Branch(&done, ge, remainder, Operand(zero_reg));
1331  __ Subu(result, result, Operand(1));
1332  __ bind(&done);
1333}
1334
1335
1336void LCodeGen::DoMulI(LMulI* instr) {
1337  Register scratch = scratch0();
1338  Register result = ToRegister(instr->result());
1339  // Note that result may alias left.
1340  Register left = ToRegister(instr->left());
1341  LOperand* right_op = instr->right();
1342
1343  bool bailout_on_minus_zero =
1344    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1345  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1346
1347  if (right_op->IsConstantOperand()) {
1348    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1349
1350    if (bailout_on_minus_zero && (constant < 0)) {
1351      // The case of a null constant will be handled separately.
1352      // If constant is negative and left is null, the result should be -0.
1353      DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
1354                   Operand(zero_reg));
1355    }
1356
1357    switch (constant) {
1358      case -1:
1359        if (overflow) {
1360          Label no_overflow;
1361          __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1362          DeoptimizeIf(al, instr);
1363          __ bind(&no_overflow);
1364        } else {
1365          __ Subu(result, zero_reg, left);
1366        }
1367        break;
1368      case 0:
1369        if (bailout_on_minus_zero) {
1370          // If left is strictly negative and the constant is null, the
1371          // result is -0. Deoptimize if required, otherwise return 0.
1372          DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
1373                       Operand(zero_reg));
1374        }
1375        __ mov(result, zero_reg);
1376        break;
1377      case 1:
1378        // Nothing to do.
1379        __ Move(result, left);
1380        break;
1381      default:
1382        // Multiplying by powers of two and powers of two plus or minus
1383        // one can be done faster with shifted operands.
1384        // For other constants we emit standard code.
1385        int32_t mask = constant >> 31;
1386        uint32_t constant_abs = (constant + mask) ^ mask;
1387
1388        if (base::bits::IsPowerOfTwo32(constant_abs)) {
1389          int32_t shift = WhichPowerOf2(constant_abs);
1390          __ sll(result, left, shift);
1391          // Correct the sign of the result if the constant is negative.
1392          if (constant < 0)  __ Subu(result, zero_reg, result);
1393        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1394          int32_t shift = WhichPowerOf2(constant_abs - 1);
1395          __ Lsa(result, left, left, shift);
1396          // Correct the sign of the result if the constant is negative.
1397          if (constant < 0)  __ Subu(result, zero_reg, result);
1398        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1399          int32_t shift = WhichPowerOf2(constant_abs + 1);
1400          __ sll(scratch, left, shift);
1401          __ Subu(result, scratch, left);
1402          // Correct the sign of the result if the constant is negative.
1403          if (constant < 0)  __ Subu(result, zero_reg, result);
1404        } else {
1405          // Generate standard code.
1406          __ li(at, constant);
1407          __ Mul(result, left, at);
1408        }
1409    }
1410
1411  } else {
1412    DCHECK(right_op->IsRegister());
1413    Register right = ToRegister(right_op);
1414
1415    if (overflow) {
1416      // hi:lo = left * right.
1417      if (instr->hydrogen()->representation().IsSmi()) {
1418        __ SmiUntag(result, left);
1419        __ Mul(scratch, result, result, right);
1420      } else {
1421        __ Mul(scratch, result, left, right);
1422      }
1423      __ sra(at, result, 31);
1424      DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
1425                   Operand(at));
1426    } else {
1427      if (instr->hydrogen()->representation().IsSmi()) {
1428        __ SmiUntag(result, left);
1429        __ Mul(result, result, right);
1430      } else {
1431        __ Mul(result, left, right);
1432      }
1433    }
1434
1435    if (bailout_on_minus_zero) {
1436      Label done;
1437      __ Xor(at, left, right);
1438      __ Branch(&done, ge, at, Operand(zero_reg));
1439      // Bail out if the result is minus zero.
1440      DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
1441                   Operand(zero_reg));
1442      __ bind(&done);
1443    }
1444  }
1445}
1446
1447
1448void LCodeGen::DoBitI(LBitI* instr) {
1449  LOperand* left_op = instr->left();
1450  LOperand* right_op = instr->right();
1451  DCHECK(left_op->IsRegister());
1452  Register left = ToRegister(left_op);
1453  Register result = ToRegister(instr->result());
1454  Operand right(no_reg);
1455
1456  if (right_op->IsStackSlot()) {
1457    right = Operand(EmitLoadRegister(right_op, at));
1458  } else {
1459    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1460    right = ToOperand(right_op);
1461  }
1462
1463  switch (instr->op()) {
1464    case Token::BIT_AND:
1465      __ And(result, left, right);
1466      break;
1467    case Token::BIT_OR:
1468      __ Or(result, left, right);
1469      break;
1470    case Token::BIT_XOR:
1471      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1472        __ Nor(result, zero_reg, left);
1473      } else {
1474        __ Xor(result, left, right);
1475      }
1476      break;
1477    default:
1478      UNREACHABLE();
1479      break;
1480  }
1481}
1482
1483
1484void LCodeGen::DoShiftI(LShiftI* instr) {
1485  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1486  // result may alias either of them.
1487  LOperand* right_op = instr->right();
1488  Register left = ToRegister(instr->left());
1489  Register result = ToRegister(instr->result());
1490  Register scratch = scratch0();
1491
1492  if (right_op->IsRegister()) {
1493    // No need to mask the right operand on MIPS, it is built into the variable
1494    // shift instructions.
1495    switch (instr->op()) {
1496      case Token::ROR:
1497        __ Ror(result, left, Operand(ToRegister(right_op)));
1498        break;
1499      case Token::SAR:
1500        __ srav(result, left, ToRegister(right_op));
1501        break;
1502      case Token::SHR:
1503        __ srlv(result, left, ToRegister(right_op));
1504        if (instr->can_deopt()) {
1505          DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
1506                       Operand(zero_reg));
1507        }
1508        break;
1509      case Token::SHL:
1510        __ sllv(result, left, ToRegister(right_op));
1511        break;
1512      default:
1513        UNREACHABLE();
1514        break;
1515    }
1516  } else {
1517    // Mask the right_op operand.
1518    int value = ToInteger32(LConstantOperand::cast(right_op));
1519    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1520    switch (instr->op()) {
1521      case Token::ROR:
1522        if (shift_count != 0) {
1523          __ Ror(result, left, Operand(shift_count));
1524        } else {
1525          __ Move(result, left);
1526        }
1527        break;
1528      case Token::SAR:
1529        if (shift_count != 0) {
1530          __ sra(result, left, shift_count);
1531        } else {
1532          __ Move(result, left);
1533        }
1534        break;
1535      case Token::SHR:
1536        if (shift_count != 0) {
1537          __ srl(result, left, shift_count);
1538        } else {
1539          if (instr->can_deopt()) {
1540            __ And(at, left, Operand(0x80000000));
1541            DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
1542                         Operand(zero_reg));
1543          }
1544          __ Move(result, left);
1545        }
1546        break;
1547      case Token::SHL:
1548        if (shift_count != 0) {
1549          if (instr->hydrogen_value()->representation().IsSmi() &&
1550              instr->can_deopt()) {
1551            if (shift_count != 1) {
1552              __ sll(result, left, shift_count - 1);
1553              __ SmiTagCheckOverflow(result, result, scratch);
1554            } else {
1555              __ SmiTagCheckOverflow(result, left, scratch);
1556            }
1557            DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch,
1558                         Operand(zero_reg));
1559          } else {
1560            __ sll(result, left, shift_count);
1561          }
1562        } else {
1563          __ Move(result, left);
1564        }
1565        break;
1566      default:
1567        UNREACHABLE();
1568        break;
1569    }
1570  }
1571}
1572
1573
1574void LCodeGen::DoSubI(LSubI* instr) {
1575  LOperand* left = instr->left();
1576  LOperand* right = instr->right();
1577  LOperand* result = instr->result();
1578  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1579
1580  if (!can_overflow) {
1581    if (right->IsStackSlot()) {
1582      Register right_reg = EmitLoadRegister(right, at);
1583      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1584    } else {
1585      DCHECK(right->IsRegister() || right->IsConstantOperand());
1586      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1587    }
1588  } else {  // can_overflow.
1589    Register scratch = scratch0();
1590    Label no_overflow_label;
1591    if (right->IsStackSlot()) {
1592      Register right_reg = EmitLoadRegister(right, scratch);
1593      __ SubBranchNoOvf(ToRegister(result), ToRegister(left),
1594                        Operand(right_reg), &no_overflow_label);
1595    } else {
1596      DCHECK(right->IsRegister() || right->IsConstantOperand());
1597      __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1598                        &no_overflow_label, scratch);
1599    }
1600    DeoptimizeIf(al, instr);
1601    __ bind(&no_overflow_label);
1602  }
1603}
1604
1605
1606void LCodeGen::DoConstantI(LConstantI* instr) {
1607  __ li(ToRegister(instr->result()), Operand(instr->value()));
1608}
1609
1610
1611void LCodeGen::DoConstantS(LConstantS* instr) {
1612  __ li(ToRegister(instr->result()), Operand(instr->value()));
1613}
1614
1615
1616void LCodeGen::DoConstantD(LConstantD* instr) {
1617  DCHECK(instr->result()->IsDoubleRegister());
1618  DoubleRegister result = ToDoubleRegister(instr->result());
1619  double v = instr->value();
1620  __ Move(result, v);
1621}
1622
1623
1624void LCodeGen::DoConstantE(LConstantE* instr) {
1625  __ li(ToRegister(instr->result()), Operand(instr->value()));
1626}
1627
1628
1629void LCodeGen::DoConstantT(LConstantT* instr) {
1630  Handle<Object> object = instr->value(isolate());
1631  AllowDeferredHandleDereference smi_check;
1632  __ li(ToRegister(instr->result()), object);
1633}
1634
1635
1636MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1637                                           LOperand* index,
1638                                           String::Encoding encoding) {
1639  if (index->IsConstantOperand()) {
1640    int offset = ToInteger32(LConstantOperand::cast(index));
1641    if (encoding == String::TWO_BYTE_ENCODING) {
1642      offset *= kUC16Size;
1643    }
1644    STATIC_ASSERT(kCharSize == 1);
1645    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1646  }
1647  Register scratch = scratch0();
1648  DCHECK(!scratch.is(string));
1649  DCHECK(!scratch.is(ToRegister(index)));
1650  if (encoding == String::ONE_BYTE_ENCODING) {
1651    __ Addu(scratch, string, ToRegister(index));
1652  } else {
1653    STATIC_ASSERT(kUC16Size == 2);
1654    __ sll(scratch, ToRegister(index), 1);
1655    __ Addu(scratch, string, scratch);
1656  }
1657  return FieldMemOperand(scratch, SeqString::kHeaderSize);
1658}
1659
1660
1661void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1662  String::Encoding encoding = instr->hydrogen()->encoding();
1663  Register string = ToRegister(instr->string());
1664  Register result = ToRegister(instr->result());
1665
1666  if (FLAG_debug_code) {
1667    Register scratch = scratch0();
1668    __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1669    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1670
1671    __ And(scratch, scratch,
1672           Operand(kStringRepresentationMask | kStringEncodingMask));
1673    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1674    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1675    __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1676                                ? one_byte_seq_type : two_byte_seq_type));
1677    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1678  }
1679
1680  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1681  if (encoding == String::ONE_BYTE_ENCODING) {
1682    __ lbu(result, operand);
1683  } else {
1684    __ lhu(result, operand);
1685  }
1686}
1687
1688
1689void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1690  String::Encoding encoding = instr->hydrogen()->encoding();
1691  Register string = ToRegister(instr->string());
1692  Register value = ToRegister(instr->value());
1693
1694  if (FLAG_debug_code) {
1695    Register scratch = scratch0();
1696    Register index = ToRegister(instr->index());
1697    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1698    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1699    int encoding_mask =
1700        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1701        ? one_byte_seq_type : two_byte_seq_type;
1702    __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1703  }
1704
1705  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1706  if (encoding == String::ONE_BYTE_ENCODING) {
1707    __ sb(value, operand);
1708  } else {
1709    __ sh(value, operand);
1710  }
1711}
1712
1713
1714void LCodeGen::DoAddI(LAddI* instr) {
1715  LOperand* left = instr->left();
1716  LOperand* right = instr->right();
1717  LOperand* result = instr->result();
1718  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1719
1720  if (!can_overflow) {
1721    if (right->IsStackSlot()) {
1722      Register right_reg = EmitLoadRegister(right, at);
1723      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1724    } else {
1725      DCHECK(right->IsRegister() || right->IsConstantOperand());
1726      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1727    }
1728  } else {  // can_overflow.
1729    Register scratch = scratch1();
1730    Label no_overflow_label;
1731    if (right->IsStackSlot()) {
1732      Register right_reg = EmitLoadRegister(right, scratch);
1733      __ AddBranchNoOvf(ToRegister(result), ToRegister(left),
1734                        Operand(right_reg), &no_overflow_label);
1735    } else {
1736      DCHECK(right->IsRegister() || right->IsConstantOperand());
1737      __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
1738                        &no_overflow_label, scratch);
1739    }
1740    DeoptimizeIf(al, instr);
1741    __ bind(&no_overflow_label);
1742  }
1743}
1744
1745
1746void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1747  LOperand* left = instr->left();
1748  LOperand* right = instr->right();
1749  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1750  Register scratch = scratch1();
1751  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1752    Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1753    Register left_reg = ToRegister(left);
1754    Register right_reg = EmitLoadRegister(right, scratch0());
1755    Register result_reg = ToRegister(instr->result());
1756    Label return_right, done;
1757    __ Slt(scratch, left_reg, Operand(right_reg));
1758    if (condition == ge) {
1759     __  Movz(result_reg, left_reg, scratch);
1760     __  Movn(result_reg, right_reg, scratch);
1761    } else {
1762     DCHECK(condition == le);
1763     __  Movn(result_reg, left_reg, scratch);
1764     __  Movz(result_reg, right_reg, scratch);
1765    }
1766  } else {
1767    DCHECK(instr->hydrogen()->representation().IsDouble());
1768    FPURegister left_reg = ToDoubleRegister(left);
1769    FPURegister right_reg = ToDoubleRegister(right);
1770    FPURegister result_reg = ToDoubleRegister(instr->result());
1771
1772    Label nan, done;
1773    if (operation == HMathMinMax::kMathMax) {
1774      __ Float64Max(result_reg, left_reg, right_reg, &nan);
1775    } else {
1776      DCHECK(operation == HMathMinMax::kMathMin);
1777      __ Float64Min(result_reg, left_reg, right_reg, &nan);
1778    }
1779    __ Branch(&done);
1780
1781    __ bind(&nan);
1782    __ add_d(result_reg, left_reg, right_reg);
1783
1784    __ bind(&done);
1785  }
1786}
1787
1788
1789void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1790  DoubleRegister left = ToDoubleRegister(instr->left());
1791  DoubleRegister right = ToDoubleRegister(instr->right());
1792  DoubleRegister result = ToDoubleRegister(instr->result());
1793  switch (instr->op()) {
1794    case Token::ADD:
1795      __ add_d(result, left, right);
1796      break;
1797    case Token::SUB:
1798      __ sub_d(result, left, right);
1799      break;
1800    case Token::MUL:
1801      __ mul_d(result, left, right);
1802      break;
1803    case Token::DIV:
1804      __ div_d(result, left, right);
1805      break;
1806    case Token::MOD: {
1807      // Save a0-a3 on the stack.
1808      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1809      __ MultiPush(saved_regs);
1810
1811      __ PrepareCallCFunction(0, 2, scratch0());
1812      __ MovToFloatParameters(left, right);
1813      __ CallCFunction(
1814          ExternalReference::mod_two_doubles_operation(isolate()),
1815          0, 2);
1816      // Move the result in the double result register.
1817      __ MovFromFloatResult(result);
1818
1819      // Restore saved register.
1820      __ MultiPop(saved_regs);
1821      break;
1822    }
1823    default:
1824      UNREACHABLE();
1825      break;
1826  }
1827}
1828
1829
1830void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1831  DCHECK(ToRegister(instr->context()).is(cp));
1832  DCHECK(ToRegister(instr->left()).is(a1));
1833  DCHECK(ToRegister(instr->right()).is(a0));
1834  DCHECK(ToRegister(instr->result()).is(v0));
1835
1836  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
1837  CallCode(code, RelocInfo::CODE_TARGET, instr);
1838  // Other arch use a nop here, to signal that there is no inlined
1839  // patchable code. Mips does not need the nop, since our marker
1840  // instruction (andi zero_reg) will never be used in normal code.
1841}
1842
1843
1844template<class InstrType>
1845void LCodeGen::EmitBranch(InstrType instr,
1846                          Condition condition,
1847                          Register src1,
1848                          const Operand& src2) {
1849  int left_block = instr->TrueDestination(chunk_);
1850  int right_block = instr->FalseDestination(chunk_);
1851
1852  int next_block = GetNextEmittedBlock();
1853  if (right_block == left_block || condition == al) {
1854    EmitGoto(left_block);
1855  } else if (left_block == next_block) {
1856    __ Branch(chunk_->GetAssemblyLabel(right_block),
1857              NegateCondition(condition), src1, src2);
1858  } else if (right_block == next_block) {
1859    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1860  } else {
1861    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1862    __ Branch(chunk_->GetAssemblyLabel(right_block));
1863  }
1864}
1865
1866
1867template<class InstrType>
1868void LCodeGen::EmitBranchF(InstrType instr,
1869                           Condition condition,
1870                           FPURegister src1,
1871                           FPURegister src2) {
1872  int right_block = instr->FalseDestination(chunk_);
1873  int left_block = instr->TrueDestination(chunk_);
1874
1875  int next_block = GetNextEmittedBlock();
1876  if (right_block == left_block) {
1877    EmitGoto(left_block);
1878  } else if (left_block == next_block) {
1879    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1880               NegateFpuCondition(condition), src1, src2);
1881  } else if (right_block == next_block) {
1882    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1883               condition, src1, src2);
1884  } else {
1885    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1886               condition, src1, src2);
1887    __ Branch(chunk_->GetAssemblyLabel(right_block));
1888  }
1889}
1890
1891
1892template <class InstrType>
1893void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
1894                              Register src1, const Operand& src2) {
1895  int true_block = instr->TrueDestination(chunk_);
1896  __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
1897}
1898
1899
1900template <class InstrType>
1901void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
1902                               Register src1, const Operand& src2) {
1903  int false_block = instr->FalseDestination(chunk_);
1904  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
1905}
1906
1907
1908template<class InstrType>
1909void LCodeGen::EmitFalseBranchF(InstrType instr,
1910                                Condition condition,
1911                                FPURegister src1,
1912                                FPURegister src2) {
1913  int false_block = instr->FalseDestination(chunk_);
1914  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
1915             condition, src1, src2);
1916}
1917
1918
1919void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
1920  __ stop("LDebugBreak");
1921}
1922
1923
1924void LCodeGen::DoBranch(LBranch* instr) {
1925  Representation r = instr->hydrogen()->value()->representation();
1926  if (r.IsInteger32() || r.IsSmi()) {
1927    DCHECK(!info()->IsStub());
1928    Register reg = ToRegister(instr->value());
1929    EmitBranch(instr, ne, reg, Operand(zero_reg));
1930  } else if (r.IsDouble()) {
1931    DCHECK(!info()->IsStub());
1932    DoubleRegister reg = ToDoubleRegister(instr->value());
1933    // Test the double value. Zero and NaN are false.
1934    EmitBranchF(instr, ogl, reg, kDoubleRegZero);
1935  } else {
1936    DCHECK(r.IsTagged());
1937    Register reg = ToRegister(instr->value());
1938    HType type = instr->hydrogen()->value()->type();
1939    if (type.IsBoolean()) {
1940      DCHECK(!info()->IsStub());
1941      __ LoadRoot(at, Heap::kTrueValueRootIndex);
1942      EmitBranch(instr, eq, reg, Operand(at));
1943    } else if (type.IsSmi()) {
1944      DCHECK(!info()->IsStub());
1945      EmitBranch(instr, ne, reg, Operand(zero_reg));
1946    } else if (type.IsJSArray()) {
1947      DCHECK(!info()->IsStub());
1948      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
1949    } else if (type.IsHeapNumber()) {
1950      DCHECK(!info()->IsStub());
1951      DoubleRegister dbl_scratch = double_scratch0();
1952      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1953      // Test the double value. Zero and NaN are false.
1954      EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
1955    } else if (type.IsString()) {
1956      DCHECK(!info()->IsStub());
1957      __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1958      EmitBranch(instr, ne, at, Operand(zero_reg));
1959    } else {
1960      ToBooleanHints expected = instr->hydrogen()->expected_input_types();
1961      // Avoid deopts in the case where we've never executed this path before.
1962      if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
1963
1964      if (expected & ToBooleanHint::kUndefined) {
1965        // undefined -> false.
1966        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1967        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1968      }
1969      if (expected & ToBooleanHint::kBoolean) {
1970        // Boolean -> its value.
1971        __ LoadRoot(at, Heap::kTrueValueRootIndex);
1972        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
1973        __ LoadRoot(at, Heap::kFalseValueRootIndex);
1974        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1975      }
1976      if (expected & ToBooleanHint::kNull) {
1977        // 'null' -> false.
1978        __ LoadRoot(at, Heap::kNullValueRootIndex);
1979        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1980      }
1981
1982      if (expected & ToBooleanHint::kSmallInteger) {
1983        // Smis: 0 -> false, all other -> true.
1984        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
1985        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
1986      } else if (expected & ToBooleanHint::kNeedsMap) {
1987        // If we need a map later and have a Smi -> deopt.
1988        __ SmiTst(reg, at);
1989        DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
1990      }
1991
1992      const Register map = scratch0();
1993      if (expected & ToBooleanHint::kNeedsMap) {
1994        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1995        if (expected & ToBooleanHint::kCanBeUndetectable) {
1996          // Undetectable -> false.
1997          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1998          __ And(at, at, Operand(1 << Map::kIsUndetectable));
1999          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2000        }
2001      }
2002
2003      if (expected & ToBooleanHint::kReceiver) {
2004        // spec object -> true.
2005        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2006        __ Branch(instr->TrueLabel(chunk_),
2007                  ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
2008      }
2009
2010      if (expected & ToBooleanHint::kString) {
2011        // String value -> false iff empty.
2012        Label not_string;
2013        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2014        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2015        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2016        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2017        __ Branch(instr->FalseLabel(chunk_));
2018        __ bind(&not_string);
2019      }
2020
2021      if (expected & ToBooleanHint::kSymbol) {
2022        // Symbol value -> true.
2023        const Register scratch = scratch1();
2024        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2025        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2026      }
2027
2028      if (expected & ToBooleanHint::kHeapNumber) {
2029        // heap number -> false iff +0, -0, or NaN.
2030        DoubleRegister dbl_scratch = double_scratch0();
2031        Label not_heap_number;
2032        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2033        __ Branch(&not_heap_number, ne, map, Operand(at));
2034        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2035        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2036                   ne, dbl_scratch, kDoubleRegZero);
2037        // Falls through if dbl_scratch == 0.
2038        __ Branch(instr->FalseLabel(chunk_));
2039        __ bind(&not_heap_number);
2040      }
2041
2042      if (expected != ToBooleanHint::kAny) {
2043        // We've seen something for the first time -> deopt.
2044        // This can only happen if we are not generic already.
2045        DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
2046                     Operand(zero_reg));
2047      }
2048    }
2049  }
2050}
2051
2052
2053void LCodeGen::EmitGoto(int block) {
2054  if (!IsNextEmittedBlock(block)) {
2055    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2056  }
2057}
2058
2059
2060void LCodeGen::DoGoto(LGoto* instr) {
2061  EmitGoto(instr->block_id());
2062}
2063
2064
2065Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2066  Condition cond = kNoCondition;
2067  switch (op) {
2068    case Token::EQ:
2069    case Token::EQ_STRICT:
2070      cond = eq;
2071      break;
2072    case Token::NE:
2073    case Token::NE_STRICT:
2074      cond = ne;
2075      break;
2076    case Token::LT:
2077      cond = is_unsigned ? lo : lt;
2078      break;
2079    case Token::GT:
2080      cond = is_unsigned ? hi : gt;
2081      break;
2082    case Token::LTE:
2083      cond = is_unsigned ? ls : le;
2084      break;
2085    case Token::GTE:
2086      cond = is_unsigned ? hs : ge;
2087      break;
2088    case Token::IN:
2089    case Token::INSTANCEOF:
2090    default:
2091      UNREACHABLE();
2092  }
2093  return cond;
2094}
2095
2096
2097void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2098  LOperand* left = instr->left();
2099  LOperand* right = instr->right();
2100  bool is_unsigned =
2101      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2102      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2103  Condition cond = TokenToCondition(instr->op(), is_unsigned);
2104
2105  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2106    // We can statically evaluate the comparison.
2107    double left_val = ToDouble(LConstantOperand::cast(left));
2108    double right_val = ToDouble(LConstantOperand::cast(right));
2109    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2110                         ? instr->TrueDestination(chunk_)
2111                         : instr->FalseDestination(chunk_);
2112    EmitGoto(next_block);
2113  } else {
2114    if (instr->is_double()) {
2115      // Compare left and right as doubles and load the
2116      // resulting flags into the normal status register.
2117      FPURegister left_reg = ToDoubleRegister(left);
2118      FPURegister right_reg = ToDoubleRegister(right);
2119
2120      // If a NaN is involved, i.e. the result is unordered,
2121      // jump to false block label.
2122      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2123                 left_reg, right_reg);
2124
2125      EmitBranchF(instr, cond, left_reg, right_reg);
2126    } else {
2127      Register cmp_left;
2128      Operand cmp_right = Operand(0);
2129
2130      if (right->IsConstantOperand()) {
2131        int32_t value = ToInteger32(LConstantOperand::cast(right));
2132        if (instr->hydrogen_value()->representation().IsSmi()) {
2133          cmp_left = ToRegister(left);
2134          cmp_right = Operand(Smi::FromInt(value));
2135        } else {
2136          cmp_left = ToRegister(left);
2137          cmp_right = Operand(value);
2138        }
2139      } else if (left->IsConstantOperand()) {
2140        int32_t value = ToInteger32(LConstantOperand::cast(left));
2141        if (instr->hydrogen_value()->representation().IsSmi()) {
2142           cmp_left = ToRegister(right);
2143           cmp_right = Operand(Smi::FromInt(value));
2144        } else {
2145          cmp_left = ToRegister(right);
2146          cmp_right = Operand(value);
2147        }
2148        // We commuted the operands, so commute the condition.
2149        cond = CommuteCondition(cond);
2150      } else {
2151        cmp_left = ToRegister(left);
2152        cmp_right = Operand(ToRegister(right));
2153      }
2154
2155      EmitBranch(instr, cond, cmp_left, cmp_right);
2156    }
2157  }
2158}
2159
2160
2161void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2162  Register left = ToRegister(instr->left());
2163  Register right = ToRegister(instr->right());
2164
2165  EmitBranch(instr, eq, left, Operand(right));
2166}
2167
2168
2169void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2170  if (instr->hydrogen()->representation().IsTagged()) {
2171    Register input_reg = ToRegister(instr->object());
2172    __ li(at, Operand(factory()->the_hole_value()));
2173    EmitBranch(instr, eq, input_reg, Operand(at));
2174    return;
2175  }
2176
2177  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2178  EmitFalseBranchF(instr, eq, input_reg, input_reg);
2179
2180  Register scratch = scratch0();
2181  __ FmoveHigh(scratch, input_reg);
2182  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2183}
2184
2185
2186Condition LCodeGen::EmitIsString(Register input,
2187                                 Register temp1,
2188                                 Label* is_not_string,
2189                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2190  if (check_needed == INLINE_SMI_CHECK) {
2191    __ JumpIfSmi(input, is_not_string);
2192  }
2193  __ GetObjectType(input, temp1, temp1);
2194
2195  return lt;
2196}
2197
2198
2199void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2200  Register reg = ToRegister(instr->value());
2201  Register temp1 = ToRegister(instr->temp());
2202
2203  SmiCheck check_needed =
2204      instr->hydrogen()->value()->type().IsHeapObject()
2205          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2206  Condition true_cond =
2207      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2208
2209  EmitBranch(instr, true_cond, temp1,
2210             Operand(FIRST_NONSTRING_TYPE));
2211}
2212
2213
2214void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2215  Register input_reg = EmitLoadRegister(instr->value(), at);
2216  __ And(at, input_reg, kSmiTagMask);
2217  EmitBranch(instr, eq, at, Operand(zero_reg));
2218}
2219
2220
2221void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2222  Register input = ToRegister(instr->value());
2223  Register temp = ToRegister(instr->temp());
2224
2225  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2226    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2227  }
2228  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2229  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2230  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2231  EmitBranch(instr, ne, at, Operand(zero_reg));
2232}
2233
2234
2235static Condition ComputeCompareCondition(Token::Value op) {
2236  switch (op) {
2237    case Token::EQ_STRICT:
2238    case Token::EQ:
2239      return eq;
2240    case Token::LT:
2241      return lt;
2242    case Token::GT:
2243      return gt;
2244    case Token::LTE:
2245      return le;
2246    case Token::GTE:
2247      return ge;
2248    default:
2249      UNREACHABLE();
2250      return kNoCondition;
2251  }
2252}
2253
2254
2255void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2256  DCHECK(ToRegister(instr->context()).is(cp));
2257  DCHECK(ToRegister(instr->left()).is(a1));
2258  DCHECK(ToRegister(instr->right()).is(a0));
2259
2260  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2261  CallCode(code, RelocInfo::CODE_TARGET, instr);
2262  __ LoadRoot(at, Heap::kTrueValueRootIndex);
2263  EmitBranch(instr, eq, v0, Operand(at));
2264}
2265
2266
2267static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2268  InstanceType from = instr->from();
2269  InstanceType to = instr->to();
2270  if (from == FIRST_TYPE) return to;
2271  DCHECK(from == to || to == LAST_TYPE);
2272  return from;
2273}
2274
2275
2276static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2277  InstanceType from = instr->from();
2278  InstanceType to = instr->to();
2279  if (from == to) return eq;
2280  if (to == LAST_TYPE) return hs;
2281  if (from == FIRST_TYPE) return ls;
2282  UNREACHABLE();
2283  return eq;
2284}
2285
2286
2287void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2288  Register scratch = scratch0();
2289  Register input = ToRegister(instr->value());
2290
2291  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2292    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2293  }
2294
2295  __ GetObjectType(input, scratch, scratch);
2296  EmitBranch(instr,
2297             BranchCondition(instr->hydrogen()),
2298             scratch,
2299             Operand(TestType(instr->hydrogen())));
2300}
2301
2302// Branches to a label or falls through with the answer in flags.  Trashes
2303// the temp registers, but not the input.
2304void LCodeGen::EmitClassOfTest(Label* is_true,
2305                               Label* is_false,
2306                               Handle<String>class_name,
2307                               Register input,
2308                               Register temp,
2309                               Register temp2) {
2310  DCHECK(!input.is(temp));
2311  DCHECK(!input.is(temp2));
2312  DCHECK(!temp.is(temp2));
2313
2314  __ JumpIfSmi(input, is_false);
2315  __ GetObjectType(input, temp, temp2);
2316  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2317  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2318    __ Branch(is_true, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
2319  } else {
2320    __ Branch(is_false, hs, temp2, Operand(FIRST_FUNCTION_TYPE));
2321  }
2322
2323  // Check if the constructor in the map is a function.
2324  Register instance_type = scratch1();
2325  DCHECK(!instance_type.is(temp));
2326  __ GetMapConstructor(temp, temp, temp2, instance_type);
2327
2328  // Objects with a non-function constructor have class 'Object'.
2329  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2330    __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2331  } else {
2332    __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2333  }
2334
2335  // temp now contains the constructor function. Grab the
2336  // instance class name from there.
2337  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2338  __ lw(temp, FieldMemOperand(temp,
2339                               SharedFunctionInfo::kInstanceClassNameOffset));
2340  // The class name we are testing against is internalized since it's a literal.
2341  // The name in the constructor is internalized because of the way the context
2342  // is booted.  This routine isn't expected to work for random API-created
2343  // classes and it doesn't have to because you can't access it with natives
2344  // syntax.  Since both sides are internalized it is sufficient to use an
2345  // identity comparison.
2346
2347  // End with the address of this class_name instance in temp register.
2348  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2349}
2350
2351
2352void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2353  Register input = ToRegister(instr->value());
2354  Register temp = scratch0();
2355  Register temp2 = ToRegister(instr->temp());
2356  Handle<String> class_name = instr->hydrogen()->class_name();
2357
2358  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2359                  class_name, input, temp, temp2);
2360
2361  EmitBranch(instr, eq, temp, Operand(class_name));
2362}
2363
2364
2365void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2366  Register reg = ToRegister(instr->value());
2367  Register temp = ToRegister(instr->temp());
2368
2369  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2370  EmitBranch(instr, eq, temp, Operand(instr->map()));
2371}
2372
2373
2374void LCodeGen::DoHasInPrototypeChainAndBranch(
2375    LHasInPrototypeChainAndBranch* instr) {
2376  Register const object = ToRegister(instr->object());
2377  Register const object_map = scratch0();
2378  Register const object_instance_type = scratch1();
2379  Register const object_prototype = object_map;
2380  Register const prototype = ToRegister(instr->prototype());
2381
2382  // The {object} must be a spec object.  It's sufficient to know that {object}
2383  // is not a smi, since all other non-spec objects have {null} prototypes and
2384  // will be ruled out below.
2385  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2386    __ SmiTst(object, at);
2387    EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2388  }
2389
2390  // Loop through the {object}s prototype chain looking for the {prototype}.
2391  __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2392  Label loop;
2393  __ bind(&loop);
2394
2395  // Deoptimize if the object needs to be access checked.
2396  __ lbu(object_instance_type,
2397         FieldMemOperand(object_map, Map::kBitFieldOffset));
2398  __ And(object_instance_type, object_instance_type,
2399         Operand(1 << Map::kIsAccessCheckNeeded));
2400  DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
2401               Operand(zero_reg));
2402  // Deoptimize for proxies.
2403  __ lbu(object_instance_type,
2404         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2405  DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
2406               Operand(JS_PROXY_TYPE));
2407
2408  __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2409  __ LoadRoot(at, Heap::kNullValueRootIndex);
2410  EmitFalseBranch(instr, eq, object_prototype, Operand(at));
2411  EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
2412  __ Branch(USE_DELAY_SLOT, &loop);
2413  __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2414}
2415
2416
2417void LCodeGen::DoCmpT(LCmpT* instr) {
2418  DCHECK(ToRegister(instr->context()).is(cp));
2419  Token::Value op = instr->op();
2420
2421  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2422  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2423  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2424
2425  Condition condition = ComputeCompareCondition(op);
2426  // A minor optimization that relies on LoadRoot always emitting one
2427  // instruction.
2428  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2429  Label done, check;
2430  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2431  __ bind(&check);
2432  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2433  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2434  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2435  __ bind(&done);
2436}
2437
2438
2439void LCodeGen::DoReturn(LReturn* instr) {
2440  if (FLAG_trace && info()->IsOptimizing()) {
2441    // Push the return value on the stack as the parameter.
2442    // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2443    // managed by the register allocator and tearing down the frame, it's
2444    // safe to write to the context register.
2445    __ push(v0);
2446    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2447    __ CallRuntime(Runtime::kTraceExit);
2448  }
2449  if (info()->saves_caller_doubles()) {
2450    RestoreCallerDoubles();
2451  }
2452  if (NeedsEagerFrame()) {
2453    __ mov(sp, fp);
2454    __ Pop(ra, fp);
2455  }
2456  if (instr->has_constant_parameter_count()) {
2457    int parameter_count = ToInteger32(instr->constant_parameter_count());
2458    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2459    if (sp_delta != 0) {
2460      __ Addu(sp, sp, Operand(sp_delta));
2461    }
2462  } else {
2463    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2464    Register reg = ToRegister(instr->parameter_count());
2465    // The argument count parameter is a smi
2466    __ SmiUntag(reg);
2467    __ Lsa(sp, sp, reg, kPointerSizeLog2);
2468  }
2469
2470  __ Jump(ra);
2471}
2472
2473
2474void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2475  Register context = ToRegister(instr->context());
2476  Register result = ToRegister(instr->result());
2477
2478  __ lw(result, ContextMemOperand(context, instr->slot_index()));
2479  if (instr->hydrogen()->RequiresHoleCheck()) {
2480    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2481
2482    if (instr->hydrogen()->DeoptimizesOnHole()) {
2483      DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
2484    } else {
2485      Label is_not_hole;
2486      __ Branch(&is_not_hole, ne, result, Operand(at));
2487      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2488      __ bind(&is_not_hole);
2489    }
2490  }
2491}
2492
2493
2494void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2495  Register context = ToRegister(instr->context());
2496  Register value = ToRegister(instr->value());
2497  Register scratch = scratch0();
2498  MemOperand target = ContextMemOperand(context, instr->slot_index());
2499
2500  Label skip_assignment;
2501
2502  if (instr->hydrogen()->RequiresHoleCheck()) {
2503    __ lw(scratch, target);
2504    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2505
2506    if (instr->hydrogen()->DeoptimizesOnHole()) {
2507      DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
2508    } else {
2509      __ Branch(&skip_assignment, ne, scratch, Operand(at));
2510    }
2511  }
2512
2513  __ sw(value, target);
2514  if (instr->hydrogen()->NeedsWriteBarrier()) {
2515    SmiCheck check_needed =
2516        instr->hydrogen()->value()->type().IsHeapObject()
2517            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2518    __ RecordWriteContextSlot(context,
2519                              target.offset(),
2520                              value,
2521                              scratch0(),
2522                              GetRAState(),
2523                              kSaveFPRegs,
2524                              EMIT_REMEMBERED_SET,
2525                              check_needed);
2526  }
2527
2528  __ bind(&skip_assignment);
2529}
2530
2531
2532void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2533  HObjectAccess access = instr->hydrogen()->access();
2534  int offset = access.offset();
2535  Register object = ToRegister(instr->object());
2536
2537  if (access.IsExternalMemory()) {
2538    Register result = ToRegister(instr->result());
2539    MemOperand operand = MemOperand(object, offset);
2540    __ Load(result, operand, access.representation());
2541    return;
2542  }
2543
2544  if (instr->hydrogen()->representation().IsDouble()) {
2545    DoubleRegister result = ToDoubleRegister(instr->result());
2546    __ ldc1(result, FieldMemOperand(object, offset));
2547    return;
2548  }
2549
2550  Register result = ToRegister(instr->result());
2551  if (!access.IsInobject()) {
2552    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2553    object = result;
2554  }
2555  MemOperand operand = FieldMemOperand(object, offset);
2556  __ Load(result, operand, access.representation());
2557}
2558
2559
2560void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2561  Register scratch = scratch0();
2562  Register function = ToRegister(instr->function());
2563  Register result = ToRegister(instr->result());
2564
2565  // Get the prototype or initial map from the function.
2566  __ lw(result,
2567         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2568
2569  // Check that the function has a prototype or an initial map.
2570  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2571  DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
2572
2573  // If the function does not have an initial map, we're done.
2574  Label done;
2575  __ GetObjectType(result, scratch, scratch);
2576  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2577
2578  // Get the prototype from the initial map.
2579  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2580
2581  // All done.
2582  __ bind(&done);
2583}
2584
2585
2586void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2587  Register result = ToRegister(instr->result());
2588  __ LoadRoot(result, instr->index());
2589}
2590
2591
2592void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2593  Register arguments = ToRegister(instr->arguments());
2594  Register result = ToRegister(instr->result());
2595  // There are two words between the frame pointer and the last argument.
2596  // Subtracting from length accounts for one of them add one more.
2597  if (instr->length()->IsConstantOperand()) {
2598    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2599    if (instr->index()->IsConstantOperand()) {
2600      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2601      int index = (const_length - const_index) + 1;
2602      __ lw(result, MemOperand(arguments, index * kPointerSize));
2603    } else {
2604      Register index = ToRegister(instr->index());
2605      __ li(at, Operand(const_length + 1));
2606      __ Subu(result, at, index);
2607      __ Lsa(at, arguments, result, kPointerSizeLog2);
2608      __ lw(result, MemOperand(at));
2609    }
2610  } else if (instr->index()->IsConstantOperand()) {
2611    Register length = ToRegister(instr->length());
2612    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2613    int loc = const_index - 1;
2614    if (loc != 0) {
2615      __ Subu(result, length, Operand(loc));
2616      __ Lsa(at, arguments, result, kPointerSizeLog2);
2617      __ lw(result, MemOperand(at));
2618    } else {
2619      __ Lsa(at, arguments, length, kPointerSizeLog2);
2620      __ lw(result, MemOperand(at));
2621    }
2622  } else {
2623    Register length = ToRegister(instr->length());
2624    Register index = ToRegister(instr->index());
2625    __ Subu(result, length, index);
2626    __ Addu(result, result, 1);
2627    __ Lsa(at, arguments, result, kPointerSizeLog2);
2628    __ lw(result, MemOperand(at));
2629  }
2630}
2631
2632
2633void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2634  Register external_pointer = ToRegister(instr->elements());
2635  Register key = no_reg;
2636  ElementsKind elements_kind = instr->elements_kind();
2637  bool key_is_constant = instr->key()->IsConstantOperand();
2638  int constant_key = 0;
2639  if (key_is_constant) {
2640    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2641    if (constant_key & 0xF0000000) {
2642      Abort(kArrayIndexConstantValueTooBig);
2643    }
2644  } else {
2645    key = ToRegister(instr->key());
2646  }
2647  int element_size_shift = ElementsKindToShiftSize(elements_kind);
2648  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2649      ? (element_size_shift - kSmiTagSize) : element_size_shift;
2650  int base_offset = instr->base_offset();
2651
2652  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2653    FPURegister result = ToDoubleRegister(instr->result());
2654    if (key_is_constant) {
2655      __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
2656    } else {
2657      __ sll(scratch0(), key, shift_size);
2658      __ Addu(scratch0(), scratch0(), external_pointer);
2659    }
2660    if (elements_kind == FLOAT32_ELEMENTS) {
2661      __ lwc1(result, MemOperand(scratch0(), base_offset));
2662      __ cvt_d_s(result, result);
2663    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2664      __ ldc1(result, MemOperand(scratch0(), base_offset));
2665    }
2666  } else {
2667    Register result = ToRegister(instr->result());
2668    MemOperand mem_operand = PrepareKeyedOperand(
2669        key, external_pointer, key_is_constant, constant_key,
2670        element_size_shift, shift_size, base_offset);
2671    switch (elements_kind) {
2672      case INT8_ELEMENTS:
2673        __ lb(result, mem_operand);
2674        break;
2675      case UINT8_ELEMENTS:
2676      case UINT8_CLAMPED_ELEMENTS:
2677        __ lbu(result, mem_operand);
2678        break;
2679      case INT16_ELEMENTS:
2680        __ lh(result, mem_operand);
2681        break;
2682      case UINT16_ELEMENTS:
2683        __ lhu(result, mem_operand);
2684        break;
2685      case INT32_ELEMENTS:
2686        __ lw(result, mem_operand);
2687        break;
2688      case UINT32_ELEMENTS:
2689        __ lw(result, mem_operand);
2690        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2691          DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
2692                       result, Operand(0x80000000));
2693        }
2694        break;
2695      case FLOAT32_ELEMENTS:
2696      case FLOAT64_ELEMENTS:
2697      case FAST_DOUBLE_ELEMENTS:
2698      case FAST_ELEMENTS:
2699      case FAST_SMI_ELEMENTS:
2700      case FAST_HOLEY_DOUBLE_ELEMENTS:
2701      case FAST_HOLEY_ELEMENTS:
2702      case FAST_HOLEY_SMI_ELEMENTS:
2703      case DICTIONARY_ELEMENTS:
2704      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2705      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2706      case FAST_STRING_WRAPPER_ELEMENTS:
2707      case SLOW_STRING_WRAPPER_ELEMENTS:
2708      case NO_ELEMENTS:
2709        UNREACHABLE();
2710        break;
2711    }
2712  }
2713}
2714
2715
2716void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2717  Register elements = ToRegister(instr->elements());
2718  bool key_is_constant = instr->key()->IsConstantOperand();
2719  Register key = no_reg;
2720  DoubleRegister result = ToDoubleRegister(instr->result());
2721  Register scratch = scratch0();
2722
2723  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2724
2725  int base_offset = instr->base_offset();
2726  if (key_is_constant) {
2727    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2728    if (constant_key & 0xF0000000) {
2729      Abort(kArrayIndexConstantValueTooBig);
2730    }
2731    base_offset += constant_key * kDoubleSize;
2732  }
2733  __ Addu(scratch, elements, Operand(base_offset));
2734
2735  if (!key_is_constant) {
2736    key = ToRegister(instr->key());
2737    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2738        ? (element_size_shift - kSmiTagSize) : element_size_shift;
2739    __ Lsa(scratch, scratch, key, shift_size);
2740  }
2741
2742  __ ldc1(result, MemOperand(scratch));
2743
2744  if (instr->hydrogen()->RequiresHoleCheck()) {
2745    __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
2746    DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
2747                 Operand(kHoleNanUpper32));
2748  }
2749}
2750
2751
2752void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2753  Register elements = ToRegister(instr->elements());
2754  Register result = ToRegister(instr->result());
2755  Register scratch = scratch0();
2756  Register store_base = scratch;
2757  int offset = instr->base_offset();
2758
2759  if (instr->key()->IsConstantOperand()) {
2760    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2761    offset += ToInteger32(const_operand) * kPointerSize;
2762    store_base = elements;
2763  } else {
2764    Register key = ToRegister(instr->key());
2765    // Even though the HLoadKeyed instruction forces the input
2766    // representation for the key to be an integer, the input gets replaced
2767    // during bound check elimination with the index argument to the bounds
2768    // check, which can be tagged, so that case must be handled here, too.
2769    if (instr->hydrogen()->key()->representation().IsSmi()) {
2770      __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
2771    } else {
2772      __ Lsa(scratch, elements, key, kPointerSizeLog2);
2773    }
2774  }
2775  __ lw(result, MemOperand(store_base, offset));
2776
2777  // Check for the hole value.
2778  if (instr->hydrogen()->RequiresHoleCheck()) {
2779    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2780      __ SmiTst(result, scratch);
2781      DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
2782                   Operand(zero_reg));
2783    } else {
2784      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2785      DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
2786                   Operand(scratch));
2787    }
2788  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2789    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2790    Label done;
2791    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2792    __ Branch(&done, ne, result, Operand(scratch));
2793    if (info()->IsStub()) {
2794      // A stub can safely convert the hole to undefined only if the array
2795      // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
2796      // it needs to bail out.
2797      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2798      __ lw(result, FieldMemOperand(result, PropertyCell::kValueOffset));
2799      DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
2800                   Operand(Smi::FromInt(Isolate::kProtectorValid)));
2801    }
2802    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2803    __ bind(&done);
2804  }
2805}
2806
2807
2808void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2809  if (instr->is_fixed_typed_array()) {
2810    DoLoadKeyedExternalArray(instr);
2811  } else if (instr->hydrogen()->representation().IsDouble()) {
2812    DoLoadKeyedFixedDoubleArray(instr);
2813  } else {
2814    DoLoadKeyedFixedArray(instr);
2815  }
2816}
2817
2818
2819MemOperand LCodeGen::PrepareKeyedOperand(Register key,
2820                                         Register base,
2821                                         bool key_is_constant,
2822                                         int constant_key,
2823                                         int element_size,
2824                                         int shift_size,
2825                                         int base_offset) {
2826  if (key_is_constant) {
2827    return MemOperand(base, (constant_key << element_size) + base_offset);
2828  }
2829
2830  if (base_offset == 0) {
2831    if (shift_size >= 0) {
2832      __ sll(scratch0(), key, shift_size);
2833      __ Addu(scratch0(), base, scratch0());
2834      return MemOperand(scratch0());
2835    } else {
2836      DCHECK_EQ(-1, shift_size);
2837      __ srl(scratch0(), key, 1);
2838      __ Addu(scratch0(), base, scratch0());
2839      return MemOperand(scratch0());
2840    }
2841  }
2842
2843  if (shift_size >= 0) {
2844    __ sll(scratch0(), key, shift_size);
2845    __ Addu(scratch0(), base, scratch0());
2846    return MemOperand(scratch0(), base_offset);
2847  } else {
2848    DCHECK_EQ(-1, shift_size);
2849    __ sra(scratch0(), key, 1);
2850    __ Addu(scratch0(), base, scratch0());
2851    return MemOperand(scratch0(), base_offset);
2852  }
2853}
2854
2855
2856void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2857  Register scratch = scratch0();
2858  Register temp = scratch1();
2859  Register result = ToRegister(instr->result());
2860
2861  if (instr->hydrogen()->from_inlined()) {
2862    __ Subu(result, sp, 2 * kPointerSize);
2863  } else if (instr->hydrogen()->arguments_adaptor()) {
2864    // Check if the calling frame is an arguments adaptor frame.
2865    Label done, adapted;
2866    __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2867    __ lw(result,
2868          MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
2869    __ Xor(temp, result,
2870           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
2871
2872    // Result is the frame pointer for the frame if not adapted and for the real
2873    // frame below the adaptor frame if adapted.
2874    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
2875    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
2876  } else {
2877    __ mov(result, fp);
2878  }
2879}
2880
2881
2882void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2883  Register elem = ToRegister(instr->elements());
2884  Register result = ToRegister(instr->result());
2885
2886  Label done;
2887
2888  // If no arguments adaptor frame the number of arguments is fixed.
2889  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2890  __ Branch(&done, eq, fp, Operand(elem));
2891
2892  // Arguments adaptor frame present. Get argument length from there.
2893  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2894  __ lw(result,
2895        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2896  __ SmiUntag(result);
2897
2898  // Argument length is in result register.
2899  __ bind(&done);
2900}
2901
2902
2903void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2904  Register receiver = ToRegister(instr->receiver());
2905  Register function = ToRegister(instr->function());
2906  Register result = ToRegister(instr->result());
2907  Register scratch = scratch0();
2908
2909  // If the receiver is null or undefined, we have to pass the global
2910  // object as a receiver to normal functions. Values have to be
2911  // passed unchanged to builtins and strict-mode functions.
2912  Label global_object, result_in_receiver;
2913
2914  if (!instr->hydrogen()->known_function()) {
2915    // Do not transform the receiver to object for strict mode
2916    // functions.
2917    __ lw(scratch,
2918           FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2919    __ lw(scratch,
2920           FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2921
2922    // Do not transform the receiver to object for builtins.
2923    int32_t strict_mode_function_mask =
2924        1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2925    int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2926    __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2927    __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
2928  }
2929
2930  // Normal function. Replace undefined or null with global receiver.
2931  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2932  __ Branch(&global_object, eq, receiver, Operand(scratch));
2933  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2934  __ Branch(&global_object, eq, receiver, Operand(scratch));
2935
2936  // Deoptimize if the receiver is not a JS object.
2937  __ SmiTst(receiver, scratch);
2938  DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
2939
2940  __ GetObjectType(receiver, scratch, scratch);
2941  DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
2942               Operand(FIRST_JS_RECEIVER_TYPE));
2943
2944  __ Branch(&result_in_receiver);
2945  __ bind(&global_object);
2946  __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
2947  __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
2948  __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
2949
2950  if (result.is(receiver)) {
2951    __ bind(&result_in_receiver);
2952  } else {
2953    Label result_ok;
2954    __ Branch(&result_ok);
2955    __ bind(&result_in_receiver);
2956    __ mov(result, receiver);
2957    __ bind(&result_ok);
2958  }
2959}
2960
2961
2962void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2963  Register receiver = ToRegister(instr->receiver());
2964  Register function = ToRegister(instr->function());
2965  Register length = ToRegister(instr->length());
2966  Register elements = ToRegister(instr->elements());
2967  Register scratch = scratch0();
2968  DCHECK(receiver.is(a0));  // Used for parameter count.
2969  DCHECK(function.is(a1));  // Required by InvokeFunction.
2970  DCHECK(ToRegister(instr->result()).is(v0));
2971
2972  // Copy the arguments to this function possibly from the
2973  // adaptor frame below it.
2974  const uint32_t kArgumentsLimit = 1 * KB;
2975  DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
2976               Operand(kArgumentsLimit));
2977
2978  // Push the receiver and use the register to keep the original
2979  // number of arguments.
2980  __ push(receiver);
2981  __ Move(receiver, length);
2982  // The arguments are at a one pointer size offset from elements.
2983  __ Addu(elements, elements, Operand(1 * kPointerSize));
2984
2985  // Loop through the arguments pushing them onto the execution
2986  // stack.
2987  Label invoke, loop;
2988  // length is a small non-negative integer, due to the test above.
2989  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
2990  __ sll(scratch, length, 2);
2991  __ bind(&loop);
2992  __ Addu(scratch, elements, scratch);
2993  __ lw(scratch, MemOperand(scratch));
2994  __ push(scratch);
2995  __ Subu(length, length, Operand(1));
2996  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
2997  __ sll(scratch, length, 2);
2998
2999  __ bind(&invoke);
3000
3001  InvokeFlag flag = CALL_FUNCTION;
3002  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3003    DCHECK(!info()->saves_caller_doubles());
3004    // TODO(ishell): drop current frame before pushing arguments to the stack.
3005    flag = JUMP_FUNCTION;
3006    ParameterCount actual(a0);
3007    // It is safe to use t0, t1 and t2 as scratch registers here given that
3008    // we are not going to return to caller function anyway.
3009    PrepareForTailCall(actual, t0, t1, t2);
3010  }
3011
3012  DCHECK(instr->HasPointerMap());
3013  LPointerMap* pointers = instr->pointer_map();
3014  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3015  // The number of arguments is stored in receiver which is a0, as expected
3016  // by InvokeFunction.
3017  ParameterCount actual(receiver);
3018  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
3019}
3020
3021
3022void LCodeGen::DoPushArgument(LPushArgument* instr) {
3023  LOperand* argument = instr->value();
3024  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3025    Abort(kDoPushArgumentNotImplementedForDoubleType);
3026  } else {
3027    Register argument_reg = EmitLoadRegister(argument, at);
3028    __ push(argument_reg);
3029  }
3030}
3031
3032
3033void LCodeGen::DoDrop(LDrop* instr) {
3034  __ Drop(instr->count());
3035}
3036
3037
3038void LCodeGen::DoThisFunction(LThisFunction* instr) {
3039  Register result = ToRegister(instr->result());
3040  __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3041}
3042
3043
3044void LCodeGen::DoContext(LContext* instr) {
3045  // If there is a non-return use, the context must be moved to a register.
3046  Register result = ToRegister(instr->result());
3047  if (info()->IsOptimizing()) {
3048    __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3049  } else {
3050    // If there is no frame, the context must be in cp.
3051    DCHECK(result.is(cp));
3052  }
3053}
3054
3055
3056void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3057  DCHECK(ToRegister(instr->context()).is(cp));
3058  __ li(scratch0(), instr->hydrogen()->declarations());
3059  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3060  __ Push(scratch0(), scratch1());
3061  __ li(scratch0(), instr->hydrogen()->feedback_vector());
3062  __ Push(scratch0());
3063  CallRuntime(Runtime::kDeclareGlobals, instr);
3064}
3065
3066void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3067                                 int formal_parameter_count, int arity,
3068                                 bool is_tail_call, LInstruction* instr) {
3069  bool dont_adapt_arguments =
3070      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3071  bool can_invoke_directly =
3072      dont_adapt_arguments || formal_parameter_count == arity;
3073
3074  Register function_reg = a1;
3075  LPointerMap* pointers = instr->pointer_map();
3076
3077  if (can_invoke_directly) {
3078    // Change context.
3079    __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3080
3081    // Always initialize new target and number of actual arguments.
3082    __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
3083    __ li(a0, Operand(arity));
3084
3085    bool is_self_call = function.is_identical_to(info()->closure());
3086
3087    // Invoke function.
3088    if (is_self_call) {
3089      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3090      if (is_tail_call) {
3091        __ Jump(self, RelocInfo::CODE_TARGET);
3092      } else {
3093        __ Call(self, RelocInfo::CODE_TARGET);
3094      }
3095    } else {
3096      __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3097      if (is_tail_call) {
3098        __ Jump(at);
3099      } else {
3100        __ Call(at);
3101      }
3102    }
3103
3104    if (!is_tail_call) {
3105      // Set up deoptimization.
3106      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3107    }
3108  } else {
3109    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3110    ParameterCount actual(arity);
3111    ParameterCount expected(formal_parameter_count);
3112    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3113    __ InvokeFunction(function_reg, expected, actual, flag, generator);
3114  }
3115}
3116
3117
3118void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3119  DCHECK(instr->context() != NULL);
3120  DCHECK(ToRegister(instr->context()).is(cp));
3121  Register input = ToRegister(instr->value());
3122  Register result = ToRegister(instr->result());
3123  Register scratch = scratch0();
3124
3125  // Deoptimize if not a heap number.
3126  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3127  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3128  DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
3129               Operand(at));
3130
3131  Label done;
3132  Register exponent = scratch0();
3133  scratch = no_reg;
3134  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3135  // Check the sign of the argument. If the argument is positive, just
3136  // return it.
3137  __ Move(result, input);
3138  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3139  __ Branch(&done, eq, at, Operand(zero_reg));
3140
3141  // Input is negative. Reverse its sign.
3142  // Preserve the value of all registers.
3143  {
3144    PushSafepointRegistersScope scope(this);
3145
3146    // Registers were saved at the safepoint, so we can use
3147    // many scratch registers.
3148    Register tmp1 = input.is(a1) ? a0 : a1;
3149    Register tmp2 = input.is(a2) ? a0 : a2;
3150    Register tmp3 = input.is(a3) ? a0 : a3;
3151    Register tmp4 = input.is(t0) ? a0 : t0;
3152
3153    // exponent: floating point exponent value.
3154
3155    Label allocated, slow;
3156    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3157    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3158    __ Branch(&allocated);
3159
3160    // Slow case: Call the runtime system to do the number allocation.
3161    __ bind(&slow);
3162
3163    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3164                            instr->context());
3165    // Set the pointer to the new heap number in tmp.
3166    if (!tmp1.is(v0))
3167      __ mov(tmp1, v0);
3168    // Restore input_reg after call to runtime.
3169    __ LoadFromSafepointRegisterSlot(input, input);
3170    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3171
3172    __ bind(&allocated);
3173    // exponent: floating point exponent value.
3174    // tmp1: allocated heap number.
3175    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3176    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3177    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3178    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3179
3180    __ StoreToSafepointRegisterSlot(tmp1, result);
3181  }
3182
3183  __ bind(&done);
3184}
3185
3186
3187void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3188  Register input = ToRegister(instr->value());
3189  Register result = ToRegister(instr->result());
3190  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3191  Label done;
3192  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3193  __ mov(result, input);
3194  __ subu(result, zero_reg, input);
3195  // Overflow if result is still negative, i.e. 0x80000000.
3196  DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
3197               Operand(zero_reg));
3198  __ bind(&done);
3199}
3200
3201
3202void LCodeGen::DoMathAbs(LMathAbs* instr) {
3203  // Class for deferred case.
3204  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3205   public:
3206    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3207        : LDeferredCode(codegen), instr_(instr) { }
3208    void Generate() override {
3209      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3210    }
3211    LInstruction* instr() override { return instr_; }
3212
3213   private:
3214    LMathAbs* instr_;
3215  };
3216
3217  Representation r = instr->hydrogen()->value()->representation();
3218  if (r.IsDouble()) {
3219    FPURegister input = ToDoubleRegister(instr->value());
3220    FPURegister result = ToDoubleRegister(instr->result());
3221    __ abs_d(result, input);
3222  } else if (r.IsSmiOrInteger32()) {
3223    EmitIntegerMathAbs(instr);
3224  } else {
3225    // Representation is tagged.
3226    DeferredMathAbsTaggedHeapNumber* deferred =
3227        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3228    Register input = ToRegister(instr->value());
3229    // Smi check.
3230    __ JumpIfNotSmi(input, deferred->entry());
3231    // If smi, handle it directly.
3232    EmitIntegerMathAbs(instr);
3233    __ bind(deferred->exit());
3234  }
3235}
3236
3237
3238void LCodeGen::DoMathFloor(LMathFloor* instr) {
3239  DoubleRegister input = ToDoubleRegister(instr->value());
3240  Register result = ToRegister(instr->result());
3241  Register scratch1 = scratch0();
3242  Register except_flag = ToRegister(instr->temp());
3243
3244  __ EmitFPUTruncate(kRoundToMinusInf,
3245                     result,
3246                     input,
3247                     scratch1,
3248                     double_scratch0(),
3249                     except_flag);
3250
3251  // Deopt if the operation did not succeed.
3252  DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
3253               Operand(zero_reg));
3254
3255  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3256    // Test for -0.
3257    Label done;
3258    __ Branch(&done, ne, result, Operand(zero_reg));
3259    __ Mfhc1(scratch1, input);
3260    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3261    DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
3262                 Operand(zero_reg));
3263    __ bind(&done);
3264  }
3265}
3266
3267
3268void LCodeGen::DoMathRound(LMathRound* instr) {
3269  DoubleRegister input = ToDoubleRegister(instr->value());
3270  Register result = ToRegister(instr->result());
3271  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3272  Register scratch = scratch0();
3273  Label done, check_sign_on_zero;
3274
3275  // Extract exponent bits.
3276  __ Mfhc1(result, input);
3277  __ Ext(scratch,
3278         result,
3279         HeapNumber::kExponentShift,
3280         HeapNumber::kExponentBits);
3281
3282  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3283  Label skip1;
3284  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3285  __ mov(result, zero_reg);
3286  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3287    __ Branch(&check_sign_on_zero);
3288  } else {
3289    __ Branch(&done);
3290  }
3291  __ bind(&skip1);
3292
3293  // The following conversion will not work with numbers
3294  // outside of ]-2^32, 2^32[.
3295  DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
3296               Operand(HeapNumber::kExponentBias + 32));
3297
3298  // Save the original sign for later comparison.
3299  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3300
3301  __ Move(double_scratch0(), 0.5);
3302  __ add_d(double_scratch0(), input, double_scratch0());
3303
3304  // Check sign of the result: if the sign changed, the input
3305  // value was in ]0.5, 0[ and the result should be -0.
3306  __ Mfhc1(result, double_scratch0());
3307  __ Xor(result, result, Operand(scratch));
3308  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3309    // ARM uses 'mi' here, which is 'lt'
3310    DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
3311                 Operand(zero_reg));
3312  } else {
3313    Label skip2;
3314    // ARM uses 'mi' here, which is 'lt'
3315    // Negating it results in 'ge'
3316    __ Branch(&skip2, ge, result, Operand(zero_reg));
3317    __ mov(result, zero_reg);
3318    __ Branch(&done);
3319    __ bind(&skip2);
3320  }
3321
3322  Register except_flag = scratch;
3323  __ EmitFPUTruncate(kRoundToMinusInf,
3324                     result,
3325                     double_scratch0(),
3326                     at,
3327                     double_scratch1,
3328                     except_flag);
3329
3330  DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
3331               Operand(zero_reg));
3332
3333  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3334    // Test for -0.
3335    __ Branch(&done, ne, result, Operand(zero_reg));
3336    __ bind(&check_sign_on_zero);
3337    __ Mfhc1(scratch, input);
3338    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3339    DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
3340                 Operand(zero_reg));
3341  }
3342  __ bind(&done);
3343}
3344
3345
3346void LCodeGen::DoMathFround(LMathFround* instr) {
3347  DoubleRegister input = ToDoubleRegister(instr->value());
3348  DoubleRegister result = ToDoubleRegister(instr->result());
3349  __ cvt_s_d(result.low(), input);
3350  __ cvt_d_s(result, result.low());
3351}
3352
3353
3354void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3355  DoubleRegister input = ToDoubleRegister(instr->value());
3356  DoubleRegister result = ToDoubleRegister(instr->result());
3357  __ sqrt_d(result, input);
3358}
3359
3360
3361void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3362  DoubleRegister input = ToDoubleRegister(instr->value());
3363  DoubleRegister result = ToDoubleRegister(instr->result());
3364  DoubleRegister temp = ToDoubleRegister(instr->temp());
3365
3366  DCHECK(!input.is(result));
3367
3368  // Note that according to ECMA-262 15.8.2.13:
3369  // Math.pow(-Infinity, 0.5) == Infinity
3370  // Math.sqrt(-Infinity) == NaN
3371  Label done;
3372  __ Move(temp, static_cast<double>(-V8_INFINITY));
3373  // Set up Infinity.
3374  __ Neg_d(result, temp);
3375  // result is overwritten if the branch is not taken.
3376  __ BranchF(&done, NULL, eq, temp, input);
3377
3378  // Add +0 to convert -0 to +0.
3379  __ add_d(result, input, kDoubleRegZero);
3380  __ sqrt_d(result, result);
3381  __ bind(&done);
3382}
3383
3384
3385void LCodeGen::DoPower(LPower* instr) {
3386  Representation exponent_type = instr->hydrogen()->right()->representation();
3387  // Having marked this as a call, we can use any registers.
3388  // Just make sure that the input/output registers are the expected ones.
3389  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3390  DCHECK(!instr->right()->IsDoubleRegister() ||
3391         ToDoubleRegister(instr->right()).is(f4));
3392  DCHECK(!instr->right()->IsRegister() ||
3393         ToRegister(instr->right()).is(tagged_exponent));
3394  DCHECK(ToDoubleRegister(instr->left()).is(f2));
3395  DCHECK(ToDoubleRegister(instr->result()).is(f0));
3396
3397  if (exponent_type.IsSmi()) {
3398    MathPowStub stub(isolate(), MathPowStub::TAGGED);
3399    __ CallStub(&stub);
3400  } else if (exponent_type.IsTagged()) {
3401    Label no_deopt;
3402    __ JumpIfSmi(tagged_exponent, &no_deopt);
3403    DCHECK(!t3.is(tagged_exponent));
3404    __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3405    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3406    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, t3, Operand(at));
3407    __ bind(&no_deopt);
3408    MathPowStub stub(isolate(), MathPowStub::TAGGED);
3409    __ CallStub(&stub);
3410  } else if (exponent_type.IsInteger32()) {
3411    MathPowStub stub(isolate(), MathPowStub::INTEGER);
3412    __ CallStub(&stub);
3413  } else {
3414    DCHECK(exponent_type.IsDouble());
3415    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3416    __ CallStub(&stub);
3417  }
3418}
3419
3420void LCodeGen::DoMathCos(LMathCos* instr) {
3421  __ PrepareCallCFunction(0, 1, scratch0());
3422  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3423  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
3424  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3425}
3426
3427void LCodeGen::DoMathSin(LMathSin* instr) {
3428  __ PrepareCallCFunction(0, 1, scratch0());
3429  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3430  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
3431  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3432}
3433
3434void LCodeGen::DoMathExp(LMathExp* instr) {
3435  __ PrepareCallCFunction(0, 1, scratch0());
3436  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3437  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
3438  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3439}
3440
3441
3442void LCodeGen::DoMathLog(LMathLog* instr) {
3443  __ PrepareCallCFunction(0, 1, scratch0());
3444  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3445  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
3446  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3447}
3448
3449
3450void LCodeGen::DoMathClz32(LMathClz32* instr) {
3451  Register input = ToRegister(instr->value());
3452  Register result = ToRegister(instr->result());
3453  __ Clz(result, input);
3454}
3455
3456void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3457                                  Register scratch1, Register scratch2,
3458                                  Register scratch3) {
3459#if DEBUG
3460  if (actual.is_reg()) {
3461    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3462  } else {
3463    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3464  }
3465#endif
3466  if (FLAG_code_comments) {
3467    if (actual.is_reg()) {
3468      Comment(";;; PrepareForTailCall, actual: %s {",
3469              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3470                  actual.reg().code()));
3471    } else {
3472      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3473    }
3474  }
3475
3476  // Check if next frame is an arguments adaptor frame.
3477  Register caller_args_count_reg = scratch1;
3478  Label no_arguments_adaptor, formal_parameter_count_loaded;
3479  __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3480  __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3481  __ Branch(&no_arguments_adaptor, ne, scratch3,
3482            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
3483
3484  // Drop current frame and load arguments count from arguments adaptor frame.
3485  __ mov(fp, scratch2);
3486  __ lw(caller_args_count_reg,
3487        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3488  __ SmiUntag(caller_args_count_reg);
3489  __ Branch(&formal_parameter_count_loaded);
3490
3491  __ bind(&no_arguments_adaptor);
3492  // Load caller's formal parameter count
3493  __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3494  __ lw(scratch1,
3495        FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
3496  __ li(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3497
3498  __ bind(&formal_parameter_count_loaded);
3499  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3500
3501  Comment(";;; }");
3502}
3503
3504void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3505  HInvokeFunction* hinstr = instr->hydrogen();
3506  DCHECK(ToRegister(instr->context()).is(cp));
3507  DCHECK(ToRegister(instr->function()).is(a1));
3508  DCHECK(instr->HasPointerMap());
3509
3510  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3511
3512  if (is_tail_call) {
3513    DCHECK(!info()->saves_caller_doubles());
3514    ParameterCount actual(instr->arity());
3515    // It is safe to use t0, t1 and t2 as scratch registers here given that
3516    // we are not going to return to caller function anyway.
3517    PrepareForTailCall(actual, t0, t1, t2);
3518  }
3519
3520  Handle<JSFunction> known_function = hinstr->known_function();
3521  if (known_function.is_null()) {
3522    LPointerMap* pointers = instr->pointer_map();
3523    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3524    ParameterCount actual(instr->arity());
3525    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3526    __ InvokeFunction(a1, no_reg, actual, flag, generator);
3527  } else {
3528    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3529                      instr->arity(), is_tail_call, instr);
3530  }
3531}
3532
3533
3534void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3535  DCHECK(ToRegister(instr->result()).is(v0));
3536
3537  if (instr->hydrogen()->IsTailCall()) {
3538    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3539
3540    if (instr->target()->IsConstantOperand()) {
3541      LConstantOperand* target = LConstantOperand::cast(instr->target());
3542      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3543      __ Jump(code, RelocInfo::CODE_TARGET);
3544    } else {
3545      DCHECK(instr->target()->IsRegister());
3546      Register target = ToRegister(instr->target());
3547      __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3548      __ Jump(target);
3549    }
3550  } else {
3551    LPointerMap* pointers = instr->pointer_map();
3552    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3553
3554    if (instr->target()->IsConstantOperand()) {
3555      LConstantOperand* target = LConstantOperand::cast(instr->target());
3556      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3557      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3558      __ Call(code, RelocInfo::CODE_TARGET);
3559    } else {
3560      DCHECK(instr->target()->IsRegister());
3561      Register target = ToRegister(instr->target());
3562      generator.BeforeCall(__ CallSize(target));
3563      __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3564      __ Call(target);
3565    }
3566    generator.AfterCall();
3567  }
3568}
3569
3570
3571void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3572  DCHECK(ToRegister(instr->context()).is(cp));
3573  DCHECK(ToRegister(instr->constructor()).is(a1));
3574  DCHECK(ToRegister(instr->result()).is(v0));
3575
3576  __ li(a0, Operand(instr->arity()));
3577  __ li(a2, instr->hydrogen()->site());
3578
3579  ElementsKind kind = instr->hydrogen()->elements_kind();
3580  AllocationSiteOverrideMode override_mode =
3581      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3582          ? DISABLE_ALLOCATION_SITES
3583          : DONT_OVERRIDE;
3584
3585  if (instr->arity() == 0) {
3586    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3587    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3588  } else if (instr->arity() == 1) {
3589    Label done;
3590    if (IsFastPackedElementsKind(kind)) {
3591      Label packed_case;
3592      // We might need a change here,
3593      // look at the first argument.
3594      __ lw(t1, MemOperand(sp, 0));
3595      __ Branch(&packed_case, eq, t1, Operand(zero_reg));
3596
3597      ElementsKind holey_kind = GetHoleyElementsKind(kind);
3598      ArraySingleArgumentConstructorStub stub(isolate(),
3599                                              holey_kind,
3600                                              override_mode);
3601      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3602      __ jmp(&done);
3603      __ bind(&packed_case);
3604    }
3605
3606    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3607    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3608    __ bind(&done);
3609  } else {
3610    ArrayNArgumentsConstructorStub stub(isolate());
3611    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3612  }
3613}
3614
3615
3616void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3617  CallRuntime(instr->function(), instr->arity(), instr);
3618}
3619
3620
3621void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3622  Register function = ToRegister(instr->function());
3623  Register code_object = ToRegister(instr->code_object());
3624  __ Addu(code_object, code_object,
3625          Operand(Code::kHeaderSize - kHeapObjectTag));
3626  __ sw(code_object,
3627        FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3628}
3629
3630
3631void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3632  Register result = ToRegister(instr->result());
3633  Register base = ToRegister(instr->base_object());
3634  if (instr->offset()->IsConstantOperand()) {
3635    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3636    __ Addu(result, base, Operand(ToInteger32(offset)));
3637  } else {
3638    Register offset = ToRegister(instr->offset());
3639    __ Addu(result, base, offset);
3640  }
3641}
3642
3643
3644void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3645  Representation representation = instr->representation();
3646
3647  Register object = ToRegister(instr->object());
3648  Register scratch = scratch0();
3649  HObjectAccess access = instr->hydrogen()->access();
3650  int offset = access.offset();
3651
3652  if (access.IsExternalMemory()) {
3653    Register value = ToRegister(instr->value());
3654    MemOperand operand = MemOperand(object, offset);
3655    __ Store(value, operand, representation);
3656    return;
3657  }
3658
3659  __ AssertNotSmi(object);
3660
3661  DCHECK(!representation.IsSmi() ||
3662         !instr->value()->IsConstantOperand() ||
3663         IsSmi(LConstantOperand::cast(instr->value())));
3664  if (representation.IsDouble()) {
3665    DCHECK(access.IsInobject());
3666    DCHECK(!instr->hydrogen()->has_transition());
3667    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3668    DoubleRegister value = ToDoubleRegister(instr->value());
3669    __ sdc1(value, FieldMemOperand(object, offset));
3670    return;
3671  }
3672
3673  if (instr->hydrogen()->has_transition()) {
3674    Handle<Map> transition = instr->hydrogen()->transition_map();
3675    AddDeprecationDependency(transition);
3676    __ li(scratch, Operand(transition));
3677    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3678    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3679      Register temp = ToRegister(instr->temp());
3680      // Update the write barrier for the map field.
3681      __ RecordWriteForMap(object,
3682                           scratch,
3683                           temp,
3684                           GetRAState(),
3685                           kSaveFPRegs);
3686    }
3687  }
3688
3689  // Do the store.
3690  Register value = ToRegister(instr->value());
3691  if (access.IsInobject()) {
3692    MemOperand operand = FieldMemOperand(object, offset);
3693    __ Store(value, operand, representation);
3694    if (instr->hydrogen()->NeedsWriteBarrier()) {
3695      // Update the write barrier for the object for in-object properties.
3696      __ RecordWriteField(object,
3697                          offset,
3698                          value,
3699                          scratch,
3700                          GetRAState(),
3701                          kSaveFPRegs,
3702                          EMIT_REMEMBERED_SET,
3703                          instr->hydrogen()->SmiCheckForWriteBarrier(),
3704                          instr->hydrogen()->PointersToHereCheckForValue());
3705    }
3706  } else {
3707    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3708    MemOperand operand = FieldMemOperand(scratch, offset);
3709    __ Store(value, operand, representation);
3710    if (instr->hydrogen()->NeedsWriteBarrier()) {
3711      // Update the write barrier for the properties array.
3712      // object is used as a scratch register.
3713      __ RecordWriteField(scratch,
3714                          offset,
3715                          value,
3716                          object,
3717                          GetRAState(),
3718                          kSaveFPRegs,
3719                          EMIT_REMEMBERED_SET,
3720                          instr->hydrogen()->SmiCheckForWriteBarrier(),
3721                          instr->hydrogen()->PointersToHereCheckForValue());
3722    }
3723  }
3724}
3725
3726
3727void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3728  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
3729  Operand operand(0);
3730  Register reg;
3731  if (instr->index()->IsConstantOperand()) {
3732    operand = ToOperand(instr->index());
3733    reg = ToRegister(instr->length());
3734    cc = CommuteCondition(cc);
3735  } else {
3736    reg = ToRegister(instr->index());
3737    operand = ToOperand(instr->length());
3738  }
3739  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3740    Label done;
3741    __ Branch(&done, NegateCondition(cc), reg, operand);
3742    __ stop("eliminated bounds check failed");
3743    __ bind(&done);
3744  } else {
3745    DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
3746  }
3747}
3748
3749
3750void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3751  Register external_pointer = ToRegister(instr->elements());
3752  Register key = no_reg;
3753  ElementsKind elements_kind = instr->elements_kind();
3754  bool key_is_constant = instr->key()->IsConstantOperand();
3755  int constant_key = 0;
3756  if (key_is_constant) {
3757    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3758    if (constant_key & 0xF0000000) {
3759      Abort(kArrayIndexConstantValueTooBig);
3760    }
3761  } else {
3762    key = ToRegister(instr->key());
3763  }
3764  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3765  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3766      ? (element_size_shift - kSmiTagSize) : element_size_shift;
3767  int base_offset = instr->base_offset();
3768
3769  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
3770    Register address = scratch0();
3771    FPURegister value(ToDoubleRegister(instr->value()));
3772    if (key_is_constant) {
3773      if (constant_key != 0) {
3774        __ Addu(address, external_pointer,
3775                Operand(constant_key << element_size_shift));
3776      } else {
3777        address = external_pointer;
3778      }
3779    } else {
3780      __ Lsa(address, external_pointer, key, shift_size);
3781    }
3782
3783    if (elements_kind == FLOAT32_ELEMENTS) {
3784      __ cvt_s_d(double_scratch0(), value);
3785      __ swc1(double_scratch0(), MemOperand(address, base_offset));
3786    } else {  // Storing doubles, not floats.
3787      __ sdc1(value, MemOperand(address, base_offset));
3788    }
3789  } else {
3790    Register value(ToRegister(instr->value()));
3791    MemOperand mem_operand = PrepareKeyedOperand(
3792        key, external_pointer, key_is_constant, constant_key,
3793        element_size_shift, shift_size,
3794        base_offset);
3795    switch (elements_kind) {
3796      case UINT8_ELEMENTS:
3797      case UINT8_CLAMPED_ELEMENTS:
3798      case INT8_ELEMENTS:
3799        __ sb(value, mem_operand);
3800        break;
3801      case INT16_ELEMENTS:
3802      case UINT16_ELEMENTS:
3803        __ sh(value, mem_operand);
3804        break;
3805      case INT32_ELEMENTS:
3806      case UINT32_ELEMENTS:
3807        __ sw(value, mem_operand);
3808        break;
3809      case FLOAT32_ELEMENTS:
3810      case FLOAT64_ELEMENTS:
3811      case FAST_DOUBLE_ELEMENTS:
3812      case FAST_ELEMENTS:
3813      case FAST_SMI_ELEMENTS:
3814      case FAST_HOLEY_DOUBLE_ELEMENTS:
3815      case FAST_HOLEY_ELEMENTS:
3816      case FAST_HOLEY_SMI_ELEMENTS:
3817      case DICTIONARY_ELEMENTS:
3818      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3819      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3820      case FAST_STRING_WRAPPER_ELEMENTS:
3821      case SLOW_STRING_WRAPPER_ELEMENTS:
3822      case NO_ELEMENTS:
3823        UNREACHABLE();
3824        break;
3825    }
3826  }
3827}
3828
3829
3830void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
3831  DoubleRegister value = ToDoubleRegister(instr->value());
3832  Register elements = ToRegister(instr->elements());
3833  Register scratch = scratch0();
3834  Register scratch_1 = scratch1();
3835  DoubleRegister double_scratch = double_scratch0();
3836  bool key_is_constant = instr->key()->IsConstantOperand();
3837  int base_offset = instr->base_offset();
3838  Label not_nan, done;
3839
3840  // Calculate the effective address of the slot in the array to store the
3841  // double value.
3842  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3843  if (key_is_constant) {
3844    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3845    if (constant_key & 0xF0000000) {
3846      Abort(kArrayIndexConstantValueTooBig);
3847    }
3848    __ Addu(scratch, elements,
3849           Operand((constant_key << element_size_shift) + base_offset));
3850  } else {
3851    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3852        ? (element_size_shift - kSmiTagSize) : element_size_shift;
3853    __ Addu(scratch, elements, Operand(base_offset));
3854    __ sll(at, ToRegister(instr->key()), shift_size);
3855    __ Addu(scratch, scratch, at);
3856  }
3857
3858  if (instr->NeedsCanonicalization()) {
3859    Label is_nan;
3860    // Check for NaN. All NaNs must be canonicalized.
3861    __ BranchF(NULL, &is_nan, eq, value, value);
3862    __ Branch(&not_nan);
3863
3864    // Only load canonical NaN if the comparison above set the overflow.
3865    __ bind(&is_nan);
3866    __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
3867    __ ldc1(double_scratch,
3868            FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
3869    __ sdc1(double_scratch, MemOperand(scratch, 0));
3870    __ Branch(&done);
3871  }
3872
3873  __ bind(&not_nan);
3874  __ sdc1(value, MemOperand(scratch, 0));
3875  __ bind(&done);
3876}
3877
3878
3879void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
3880  Register value = ToRegister(instr->value());
3881  Register elements = ToRegister(instr->elements());
3882  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
3883      : no_reg;
3884  Register scratch = scratch0();
3885  Register store_base = scratch;
3886  int offset = instr->base_offset();
3887
3888  // Do the store.
3889  if (instr->key()->IsConstantOperand()) {
3890    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3891    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3892    offset += ToInteger32(const_operand) * kPointerSize;
3893    store_base = elements;
3894  } else {
3895    // Even though the HLoadKeyed instruction forces the input
3896    // representation for the key to be an integer, the input gets replaced
3897    // during bound check elimination with the index argument to the bounds
3898    // check, which can be tagged, so that case must be handled here, too.
3899    if (instr->hydrogen()->key()->representation().IsSmi()) {
3900      __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
3901    } else {
3902      __ Lsa(scratch, elements, key, kPointerSizeLog2);
3903    }
3904  }
3905  __ sw(value, MemOperand(store_base, offset));
3906
3907  if (instr->hydrogen()->NeedsWriteBarrier()) {
3908    SmiCheck check_needed =
3909        instr->hydrogen()->value()->type().IsHeapObject()
3910            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3911    // Compute address of modified element and store it into key register.
3912    __ Addu(key, store_base, Operand(offset));
3913    __ RecordWrite(elements,
3914                   key,
3915                   value,
3916                   GetRAState(),
3917                   kSaveFPRegs,
3918                   EMIT_REMEMBERED_SET,
3919                   check_needed,
3920                   instr->hydrogen()->PointersToHereCheckForValue());
3921  }
3922}
3923
3924
3925void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
3926  // By cases: external, fast double
3927  if (instr->is_fixed_typed_array()) {
3928    DoStoreKeyedExternalArray(instr);
3929  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
3930    DoStoreKeyedFixedDoubleArray(instr);
3931  } else {
3932    DoStoreKeyedFixedArray(instr);
3933  }
3934}
3935
3936
3937void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
3938  class DeferredMaybeGrowElements final : public LDeferredCode {
3939   public:
3940    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
3941        : LDeferredCode(codegen), instr_(instr) {}
3942    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
3943    LInstruction* instr() override { return instr_; }
3944
3945   private:
3946    LMaybeGrowElements* instr_;
3947  };
3948
3949  Register result = v0;
3950  DeferredMaybeGrowElements* deferred =
3951      new (zone()) DeferredMaybeGrowElements(this, instr);
3952  LOperand* key = instr->key();
3953  LOperand* current_capacity = instr->current_capacity();
3954
3955  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
3956  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
3957  DCHECK(key->IsConstantOperand() || key->IsRegister());
3958  DCHECK(current_capacity->IsConstantOperand() ||
3959         current_capacity->IsRegister());
3960
3961  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
3962    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
3963    int32_t constant_capacity =
3964        ToInteger32(LConstantOperand::cast(current_capacity));
3965    if (constant_key >= constant_capacity) {
3966      // Deferred case.
3967      __ jmp(deferred->entry());
3968    }
3969  } else if (key->IsConstantOperand()) {
3970    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
3971    __ Branch(deferred->entry(), le, ToRegister(current_capacity),
3972              Operand(constant_key));
3973  } else if (current_capacity->IsConstantOperand()) {
3974    int32_t constant_capacity =
3975        ToInteger32(LConstantOperand::cast(current_capacity));
3976    __ Branch(deferred->entry(), ge, ToRegister(key),
3977              Operand(constant_capacity));
3978  } else {
3979    __ Branch(deferred->entry(), ge, ToRegister(key),
3980              Operand(ToRegister(current_capacity)));
3981  }
3982
3983  if (instr->elements()->IsRegister()) {
3984    __ mov(result, ToRegister(instr->elements()));
3985  } else {
3986    __ lw(result, ToMemOperand(instr->elements()));
3987  }
3988
3989  __ bind(deferred->exit());
3990}
3991
3992
3993void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
3994  // TODO(3095996): Get rid of this. For now, we need to make the
3995  // result register contain a valid pointer because it is already
3996  // contained in the register pointer map.
3997  Register result = v0;
3998  __ mov(result, zero_reg);
3999
4000  // We have to call a stub.
4001  {
4002    PushSafepointRegistersScope scope(this);
4003    if (instr->object()->IsRegister()) {
4004      __ mov(result, ToRegister(instr->object()));
4005    } else {
4006      __ lw(result, ToMemOperand(instr->object()));
4007    }
4008
4009    LOperand* key = instr->key();
4010    if (key->IsConstantOperand()) {
4011      LConstantOperand* constant_key = LConstantOperand::cast(key);
4012      int32_t int_key = ToInteger32(constant_key);
4013      if (Smi::IsValid(int_key)) {
4014        __ li(a3, Operand(Smi::FromInt(int_key)));
4015      } else {
4016        Abort(kArrayIndexConstantValueTooBig);
4017      }
4018    } else {
4019      Label is_smi;
4020      __ SmiTagCheckOverflow(a3, ToRegister(key), at);
4021      // Deopt if the key is outside Smi range. The stub expects Smi and would
4022      // bump the elements into dictionary mode (and trigger a deopt) anyways.
4023      __ BranchOnNoOverflow(&is_smi, at);
4024      RestoreRegistersStateStub stub(isolate());
4025      __ push(ra);
4026      __ CallStub(&stub);
4027      DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow);
4028      __ bind(&is_smi);
4029    }
4030
4031    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
4032    __ mov(a0, result);
4033    __ CallStub(&stub);
4034    RecordSafepointWithLazyDeopt(
4035        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4036    __ StoreToSafepointRegisterSlot(result, result);
4037  }
4038
4039  // Deopt on smi, which means the elements array changed to dictionary mode.
4040  __ SmiTst(result, at);
4041  DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
4042}
4043
4044
4045void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4046  Register object_reg = ToRegister(instr->object());
4047  Register scratch = scratch0();
4048
4049  Handle<Map> from_map = instr->original_map();
4050  Handle<Map> to_map = instr->transitioned_map();
4051  ElementsKind from_kind = instr->from_kind();
4052  ElementsKind to_kind = instr->to_kind();
4053
4054  Label not_applicable;
4055  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4056  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4057
4058  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4059    Register new_map_reg = ToRegister(instr->new_map_temp());
4060    __ li(new_map_reg, Operand(to_map));
4061    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4062    // Write barrier.
4063    __ RecordWriteForMap(object_reg,
4064                         new_map_reg,
4065                         scratch,
4066                         GetRAState(),
4067                         kDontSaveFPRegs);
4068  } else {
4069    DCHECK(object_reg.is(a0));
4070    DCHECK(ToRegister(instr->context()).is(cp));
4071    PushSafepointRegistersScope scope(this);
4072    __ li(a1, Operand(to_map));
4073    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
4074    __ CallStub(&stub);
4075    RecordSafepointWithRegisters(
4076        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4077  }
4078  __ bind(&not_applicable);
4079}
4080
4081
4082void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4083  Register object = ToRegister(instr->object());
4084  Register temp = ToRegister(instr->temp());
4085  Label no_memento_found;
4086  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4087  DeoptimizeIf(al, instr);
4088  __ bind(&no_memento_found);
4089}
4090
4091
4092void LCodeGen::DoStringAdd(LStringAdd* instr) {
4093  DCHECK(ToRegister(instr->context()).is(cp));
4094  DCHECK(ToRegister(instr->left()).is(a1));
4095  DCHECK(ToRegister(instr->right()).is(a0));
4096  StringAddStub stub(isolate(),
4097                     instr->hydrogen()->flags(),
4098                     instr->hydrogen()->pretenure_flag());
4099  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4100}
4101
4102
4103void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4104  class DeferredStringCharCodeAt final : public LDeferredCode {
4105   public:
4106    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4107        : LDeferredCode(codegen), instr_(instr) { }
4108    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4109    LInstruction* instr() override { return instr_; }
4110
4111   private:
4112    LStringCharCodeAt* instr_;
4113  };
4114
4115  DeferredStringCharCodeAt* deferred =
4116      new(zone()) DeferredStringCharCodeAt(this, instr);
4117  StringCharLoadGenerator::Generate(masm(),
4118                                    ToRegister(instr->string()),
4119                                    ToRegister(instr->index()),
4120                                    ToRegister(instr->result()),
4121                                    deferred->entry());
4122  __ bind(deferred->exit());
4123}
4124
4125
4126void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4127  Register string = ToRegister(instr->string());
4128  Register result = ToRegister(instr->result());
4129  Register scratch = scratch0();
4130
4131  // TODO(3095996): Get rid of this. For now, we need to make the
4132  // result register contain a valid pointer because it is already
4133  // contained in the register pointer map.
4134  __ mov(result, zero_reg);
4135
4136  PushSafepointRegistersScope scope(this);
4137  __ push(string);
4138  // Push the index as a smi. This is safe because of the checks in
4139  // DoStringCharCodeAt above.
4140  if (instr->index()->IsConstantOperand()) {
4141    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4142    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4143    __ push(scratch);
4144  } else {
4145    Register index = ToRegister(instr->index());
4146    __ SmiTag(index);
4147    __ push(index);
4148  }
4149  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4150                          instr->context());
4151  __ AssertSmi(v0);
4152  __ SmiUntag(v0);
4153  __ StoreToSafepointRegisterSlot(v0, result);
4154}
4155
4156
4157void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4158  class DeferredStringCharFromCode final : public LDeferredCode {
4159   public:
4160    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4161        : LDeferredCode(codegen), instr_(instr) { }
4162    void Generate() override {
4163      codegen()->DoDeferredStringCharFromCode(instr_);
4164    }
4165    LInstruction* instr() override { return instr_; }
4166
4167   private:
4168    LStringCharFromCode* instr_;
4169  };
4170
4171  DeferredStringCharFromCode* deferred =
4172      new(zone()) DeferredStringCharFromCode(this, instr);
4173
4174  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4175  Register char_code = ToRegister(instr->char_code());
4176  Register result = ToRegister(instr->result());
4177  Register scratch = scratch0();
4178  DCHECK(!char_code.is(result));
4179
4180  __ Branch(deferred->entry(), hi,
4181            char_code, Operand(String::kMaxOneByteCharCode));
4182  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4183  __ Lsa(result, result, char_code, kPointerSizeLog2);
4184  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4185  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4186  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4187  __ bind(deferred->exit());
4188}
4189
4190
4191void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4192  Register char_code = ToRegister(instr->char_code());
4193  Register result = ToRegister(instr->result());
4194
4195  // TODO(3095996): Get rid of this. For now, we need to make the
4196  // result register contain a valid pointer because it is already
4197  // contained in the register pointer map.
4198  __ mov(result, zero_reg);
4199
4200  PushSafepointRegistersScope scope(this);
4201  __ SmiTag(char_code);
4202  __ push(char_code);
4203  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4204                          instr->context());
4205  __ StoreToSafepointRegisterSlot(v0, result);
4206}
4207
4208
4209void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4210  LOperand* input = instr->value();
4211  DCHECK(input->IsRegister() || input->IsStackSlot());
4212  LOperand* output = instr->result();
4213  DCHECK(output->IsDoubleRegister());
4214  FPURegister single_scratch = double_scratch0().low();
4215  if (input->IsStackSlot()) {
4216    Register scratch = scratch0();
4217    __ lw(scratch, ToMemOperand(input));
4218    __ mtc1(scratch, single_scratch);
4219  } else {
4220    __ mtc1(ToRegister(input), single_scratch);
4221  }
4222  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4223}
4224
4225
4226void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4227  LOperand* input = instr->value();
4228  LOperand* output = instr->result();
4229
4230  __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22);
4231}
4232
4233
4234void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4235  class DeferredNumberTagI final : public LDeferredCode {
4236   public:
4237    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4238        : LDeferredCode(codegen), instr_(instr) { }
4239    void Generate() override {
4240      codegen()->DoDeferredNumberTagIU(instr_,
4241                                       instr_->value(),
4242                                       instr_->temp1(),
4243                                       instr_->temp2(),
4244                                       SIGNED_INT32);
4245    }
4246    LInstruction* instr() override { return instr_; }
4247
4248   private:
4249    LNumberTagI* instr_;
4250  };
4251
4252  Register src = ToRegister(instr->value());
4253  Register dst = ToRegister(instr->result());
4254  Register overflow = scratch0();
4255
4256  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4257  __ SmiTagCheckOverflow(dst, src, overflow);
4258  __ BranchOnOverflow(deferred->entry(), overflow);
4259  __ bind(deferred->exit());
4260}
4261
4262
4263void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4264  class DeferredNumberTagU final : public LDeferredCode {
4265   public:
4266    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4267        : LDeferredCode(codegen), instr_(instr) { }
4268    void Generate() override {
4269      codegen()->DoDeferredNumberTagIU(instr_,
4270                                       instr_->value(),
4271                                       instr_->temp1(),
4272                                       instr_->temp2(),
4273                                       UNSIGNED_INT32);
4274    }
4275    LInstruction* instr() override { return instr_; }
4276
4277   private:
4278    LNumberTagU* instr_;
4279  };
4280
4281  Register input = ToRegister(instr->value());
4282  Register result = ToRegister(instr->result());
4283
4284  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4285  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4286  __ SmiTag(result, input);
4287  __ bind(deferred->exit());
4288}
4289
4290
4291void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4292                                     LOperand* value,
4293                                     LOperand* temp1,
4294                                     LOperand* temp2,
4295                                     IntegerSignedness signedness) {
4296  Label done, slow;
4297  Register src = ToRegister(value);
4298  Register dst = ToRegister(instr->result());
4299  Register tmp1 = scratch0();
4300  Register tmp2 = ToRegister(temp1);
4301  Register tmp3 = ToRegister(temp2);
4302  DoubleRegister dbl_scratch = double_scratch0();
4303
4304  if (signedness == SIGNED_INT32) {
4305    // There was overflow, so bits 30 and 31 of the original integer
4306    // disagree. Try to allocate a heap number in new space and store
4307    // the value in there. If that fails, call the runtime system.
4308    if (dst.is(src)) {
4309      __ SmiUntag(src, dst);
4310      __ Xor(src, src, Operand(0x80000000));
4311    }
4312    __ mtc1(src, dbl_scratch);
4313    __ cvt_d_w(dbl_scratch, dbl_scratch);
4314  } else {
4315    __ Cvt_d_uw(dbl_scratch, src, f22);
4316  }
4317
4318  if (FLAG_inline_new) {
4319    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4320    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4321    __ Branch(&done);
4322  }
4323
4324  // Slow case: Call the runtime system to do the number allocation.
4325  __ bind(&slow);
4326  {
4327    // TODO(3095996): Put a valid pointer value in the stack slot where the
4328    // result register is stored, as this register is in the pointer map, but
4329    // contains an integer value.
4330    __ mov(dst, zero_reg);
4331
4332    // Preserve the value of all registers.
4333    PushSafepointRegistersScope scope(this);
4334    // Reset the context register.
4335    if (!dst.is(cp)) {
4336      __ mov(cp, zero_reg);
4337    }
4338    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4339    RecordSafepointWithRegisters(
4340        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4341    __ StoreToSafepointRegisterSlot(v0, dst);
4342  }
4343
4344  // Done. Put the value in dbl_scratch into the value of the allocated heap
4345  // number.
4346  __ bind(&done);
4347  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4348}
4349
4350
4351void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4352  class DeferredNumberTagD final : public LDeferredCode {
4353   public:
4354    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4355        : LDeferredCode(codegen), instr_(instr) { }
4356    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4357    LInstruction* instr() override { return instr_; }
4358
4359   private:
4360    LNumberTagD* instr_;
4361  };
4362
4363  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4364  Register scratch = scratch0();
4365  Register reg = ToRegister(instr->result());
4366  Register temp1 = ToRegister(instr->temp());
4367  Register temp2 = ToRegister(instr->temp2());
4368
4369  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4370  if (FLAG_inline_new) {
4371    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4372    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4373  } else {
4374    __ Branch(deferred->entry());
4375  }
4376  __ bind(deferred->exit());
4377  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4378  // Now that we have finished with the object's real address tag it
4379}
4380
4381
4382void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4383  // TODO(3095996): Get rid of this. For now, we need to make the
4384  // result register contain a valid pointer because it is already
4385  // contained in the register pointer map.
4386  Register reg = ToRegister(instr->result());
4387  __ mov(reg, zero_reg);
4388
4389  PushSafepointRegistersScope scope(this);
4390  // Reset the context register.
4391  if (!reg.is(cp)) {
4392    __ mov(cp, zero_reg);
4393  }
4394  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4395  RecordSafepointWithRegisters(
4396      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4397  __ StoreToSafepointRegisterSlot(v0, reg);
4398}
4399
4400
4401void LCodeGen::DoSmiTag(LSmiTag* instr) {
4402  HChange* hchange = instr->hydrogen();
4403  Register input = ToRegister(instr->value());
4404  Register output = ToRegister(instr->result());
4405  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4406      hchange->value()->CheckFlag(HValue::kUint32)) {
4407    __ And(at, input, Operand(0xc0000000));
4408    DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
4409  }
4410  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4411      !hchange->value()->CheckFlag(HValue::kUint32)) {
4412    __ SmiTagCheckOverflow(output, input, at);
4413    DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
4414  } else {
4415    __ SmiTag(output, input);
4416  }
4417}
4418
4419
4420void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4421  Register scratch = scratch0();
4422  Register input = ToRegister(instr->value());
4423  Register result = ToRegister(instr->result());
4424  if (instr->needs_check()) {
4425    STATIC_ASSERT(kHeapObjectTag == 1);
4426    // If the input is a HeapObject, value of scratch won't be zero.
4427    __ And(scratch, input, Operand(kHeapObjectTag));
4428    __ SmiUntag(result, input);
4429    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
4430                 Operand(zero_reg));
4431  } else {
4432    __ SmiUntag(result, input);
4433  }
4434}
4435
4436
4437void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4438                                DoubleRegister result_reg,
4439                                NumberUntagDMode mode) {
4440  bool can_convert_undefined_to_nan = instr->truncating();
4441  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4442
4443  Register scratch = scratch0();
4444  Label convert, load_smi, done;
4445  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4446    // Smi check.
4447    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4448    // Heap number map check.
4449    __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4450    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4451    if (can_convert_undefined_to_nan) {
4452      __ Branch(&convert, ne, scratch, Operand(at));
4453    } else {
4454      DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
4455                   Operand(at));
4456    }
4457    // Load heap number.
4458    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4459    if (deoptimize_on_minus_zero) {
4460      __ mfc1(at, result_reg.low());
4461      __ Branch(&done, ne, at, Operand(zero_reg));
4462      __ Mfhc1(scratch, result_reg);
4463      DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
4464                   Operand(HeapNumber::kSignMask));
4465    }
4466    __ Branch(&done);
4467    if (can_convert_undefined_to_nan) {
4468      __ bind(&convert);
4469      // Convert undefined (and hole) to NaN.
4470      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4471      DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
4472                   input_reg, Operand(at));
4473      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4474      __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4475      __ Branch(&done);
4476    }
4477  } else {
4478    __ SmiUntag(scratch, input_reg);
4479    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4480  }
4481  // Smi to double register conversion
4482  __ bind(&load_smi);
4483  // scratch: untagged value of input_reg
4484  __ mtc1(scratch, result_reg);
4485  __ cvt_d_w(result_reg, result_reg);
4486  __ bind(&done);
4487}
4488
4489
4490void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4491  Register input_reg = ToRegister(instr->value());
4492  Register scratch1 = scratch0();
4493  Register scratch2 = ToRegister(instr->temp());
4494  DoubleRegister double_scratch = double_scratch0();
4495  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4496
4497  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4498  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4499
4500  Label done;
4501
4502  // The input is a tagged HeapObject.
4503  // Heap number map check.
4504  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4505  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4506  // This 'at' value and scratch1 map value are used for tests in both clauses
4507  // of the if.
4508
4509  if (instr->truncating()) {
4510    Label truncate;
4511    __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
4512    __ mov(scratch2, input_reg);  // In delay slot.
4513    __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4514    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
4515                 Operand(ODDBALL_TYPE));
4516    __ bind(&truncate);
4517    __ TruncateHeapNumberToI(input_reg, scratch2);
4518  } else {
4519    DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
4520                 Operand(at));
4521
4522    // Load the double value.
4523    __ ldc1(double_scratch,
4524            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4525
4526    Register except_flag = scratch2;
4527    __ EmitFPUTruncate(kRoundToZero,
4528                       input_reg,
4529                       double_scratch,
4530                       scratch1,
4531                       double_scratch2,
4532                       except_flag,
4533                       kCheckForInexactConversion);
4534
4535    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4536                 Operand(zero_reg));
4537
4538    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4539      __ Branch(&done, ne, input_reg, Operand(zero_reg));
4540
4541      __ Mfhc1(scratch1, double_scratch);
4542      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4543      DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4544                   Operand(zero_reg));
4545    }
4546  }
4547  __ bind(&done);
4548}
4549
4550
4551void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4552  class DeferredTaggedToI final : public LDeferredCode {
4553   public:
4554    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4555        : LDeferredCode(codegen), instr_(instr) { }
4556    void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4557    LInstruction* instr() override { return instr_; }
4558
4559   private:
4560    LTaggedToI* instr_;
4561  };
4562
4563  LOperand* input = instr->value();
4564  DCHECK(input->IsRegister());
4565  DCHECK(input->Equals(instr->result()));
4566
4567  Register input_reg = ToRegister(input);
4568
4569  if (instr->hydrogen()->value()->representation().IsSmi()) {
4570    __ SmiUntag(input_reg);
4571  } else {
4572    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4573
4574    // Let the deferred code handle the HeapObject case.
4575    __ JumpIfNotSmi(input_reg, deferred->entry());
4576
4577    // Smi to int32 conversion.
4578    __ SmiUntag(input_reg);
4579    __ bind(deferred->exit());
4580  }
4581}
4582
4583
4584void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4585  LOperand* input = instr->value();
4586  DCHECK(input->IsRegister());
4587  LOperand* result = instr->result();
4588  DCHECK(result->IsDoubleRegister());
4589
4590  Register input_reg = ToRegister(input);
4591  DoubleRegister result_reg = ToDoubleRegister(result);
4592
4593  HValue* value = instr->hydrogen()->value();
4594  NumberUntagDMode mode = value->representation().IsSmi()
4595      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4596
4597  EmitNumberUntagD(instr, input_reg, result_reg, mode);
4598}
4599
4600
4601void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4602  Register result_reg = ToRegister(instr->result());
4603  Register scratch1 = scratch0();
4604  DoubleRegister double_input = ToDoubleRegister(instr->value());
4605
4606  if (instr->truncating()) {
4607    __ TruncateDoubleToI(result_reg, double_input);
4608  } else {
4609    Register except_flag = LCodeGen::scratch1();
4610
4611    __ EmitFPUTruncate(kRoundToMinusInf,
4612                       result_reg,
4613                       double_input,
4614                       scratch1,
4615                       double_scratch0(),
4616                       except_flag,
4617                       kCheckForInexactConversion);
4618
4619    // Deopt if the operation did not succeed (except_flag != 0).
4620    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4621                 Operand(zero_reg));
4622
4623    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4624      Label done;
4625      __ Branch(&done, ne, result_reg, Operand(zero_reg));
4626      __ Mfhc1(scratch1, double_input);
4627      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4628      DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4629                   Operand(zero_reg));
4630      __ bind(&done);
4631    }
4632  }
4633}
4634
4635
4636void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4637  Register result_reg = ToRegister(instr->result());
4638  Register scratch1 = LCodeGen::scratch0();
4639  DoubleRegister double_input = ToDoubleRegister(instr->value());
4640
4641  if (instr->truncating()) {
4642    __ TruncateDoubleToI(result_reg, double_input);
4643  } else {
4644    Register except_flag = LCodeGen::scratch1();
4645
4646    __ EmitFPUTruncate(kRoundToMinusInf,
4647                       result_reg,
4648                       double_input,
4649                       scratch1,
4650                       double_scratch0(),
4651                       except_flag,
4652                       kCheckForInexactConversion);
4653
4654    // Deopt if the operation did not succeed (except_flag != 0).
4655    DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4656                 Operand(zero_reg));
4657
4658    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4659      Label done;
4660      __ Branch(&done, ne, result_reg, Operand(zero_reg));
4661      __ Mfhc1(scratch1, double_input);
4662      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4663      DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4664                   Operand(zero_reg));
4665      __ bind(&done);
4666    }
4667  }
4668  __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
4669  DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch1,
4670               Operand(zero_reg));
4671}
4672
4673
4674void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4675  LOperand* input = instr->value();
4676  __ SmiTst(ToRegister(input), at);
4677  DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
4678}
4679
4680
4681void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4682  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4683    LOperand* input = instr->value();
4684    __ SmiTst(ToRegister(input), at);
4685    DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
4686  }
4687}
4688
4689
4690void LCodeGen::DoCheckArrayBufferNotNeutered(
4691    LCheckArrayBufferNotNeutered* instr) {
4692  Register view = ToRegister(instr->view());
4693  Register scratch = scratch0();
4694
4695  __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4696  __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4697  __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
4698  DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
4699               Operand(zero_reg));
4700}
4701
4702
4703void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4704  Register input = ToRegister(instr->value());
4705  Register scratch = scratch0();
4706
4707  __ GetObjectType(input, scratch, scratch);
4708
4709  if (instr->hydrogen()->is_interval_check()) {
4710    InstanceType first;
4711    InstanceType last;
4712    instr->hydrogen()->GetCheckInterval(&first, &last);
4713
4714    // If there is only one type in the interval check for equality.
4715    if (first == last) {
4716      DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4717                   Operand(first));
4718    } else {
4719      DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4720                   Operand(first));
4721      // Omit check for the last type.
4722      if (last != LAST_TYPE) {
4723        DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4724                     Operand(last));
4725      }
4726    }
4727  } else {
4728    uint8_t mask;
4729    uint8_t tag;
4730    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4731
4732    if (base::bits::IsPowerOfTwo32(mask)) {
4733      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4734      __ And(at, scratch, mask);
4735      DeoptimizeIf(tag == 0 ? ne : eq, instr,
4736                   DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
4737    } else {
4738      __ And(scratch, scratch, Operand(mask));
4739      DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4740                   Operand(tag));
4741    }
4742  }
4743}
4744
4745
4746void LCodeGen::DoCheckValue(LCheckValue* instr) {
4747  Register reg = ToRegister(instr->value());
4748  Handle<HeapObject> object = instr->hydrogen()->object().handle();
4749  AllowDeferredHandleDereference smi_check;
4750  if (isolate()->heap()->InNewSpace(*object)) {
4751    Register reg = ToRegister(instr->value());
4752    Handle<Cell> cell = isolate()->factory()->NewCell(object);
4753    __ li(at, Operand(cell));
4754    __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
4755    DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
4756  } else {
4757    DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
4758                 Operand(object));
4759  }
4760}
4761
4762
4763void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4764  Label deopt, done;
4765  // If the map is not deprecated the migration attempt does not make sense.
4766  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
4767  __ lw(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
4768  __ And(at, scratch0(), Operand(Map::Deprecated::kMask));
4769  __ Branch(&deopt, eq, at, Operand(zero_reg));
4770
4771  {
4772    PushSafepointRegistersScope scope(this);
4773    __ push(object);
4774    __ mov(cp, zero_reg);
4775    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4776    RecordSafepointWithRegisters(
4777        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4778    __ StoreToSafepointRegisterSlot(v0, scratch0());
4779  }
4780  __ SmiTst(scratch0(), at);
4781  __ Branch(&done, ne, at, Operand(zero_reg));
4782
4783  __ bind(&deopt);
4784  // In case of "al" condition the operands are not used so just pass zero_reg
4785  // there.
4786  DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg,
4787               Operand(zero_reg));
4788
4789  __ bind(&done);
4790}
4791
4792
4793void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4794  class DeferredCheckMaps final : public LDeferredCode {
4795   public:
4796    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4797        : LDeferredCode(codegen), instr_(instr), object_(object) {
4798      SetExit(check_maps());
4799    }
4800    void Generate() override {
4801      codegen()->DoDeferredInstanceMigration(instr_, object_);
4802    }
4803    Label* check_maps() { return &check_maps_; }
4804    LInstruction* instr() override { return instr_; }
4805
4806   private:
4807    LCheckMaps* instr_;
4808    Label check_maps_;
4809    Register object_;
4810  };
4811
4812  if (instr->hydrogen()->IsStabilityCheck()) {
4813    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4814    for (int i = 0; i < maps->size(); ++i) {
4815      AddStabilityDependency(maps->at(i).handle());
4816    }
4817    return;
4818  }
4819
4820  Register map_reg = scratch0();
4821  LOperand* input = instr->value();
4822  DCHECK(input->IsRegister());
4823  Register reg = ToRegister(input);
4824  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
4825
4826  DeferredCheckMaps* deferred = NULL;
4827  if (instr->hydrogen()->HasMigrationTarget()) {
4828    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
4829    __ bind(deferred->check_maps());
4830  }
4831
4832  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4833  Label success;
4834  for (int i = 0; i < maps->size() - 1; i++) {
4835    Handle<Map> map = maps->at(i).handle();
4836    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
4837  }
4838  Handle<Map> map = maps->at(maps->size() - 1).handle();
4839  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
4840  if (instr->hydrogen()->HasMigrationTarget()) {
4841    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
4842  } else {
4843    DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
4844  }
4845
4846  __ bind(&success);
4847}
4848
4849
4850void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4851  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4852  Register result_reg = ToRegister(instr->result());
4853  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4854  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4855}
4856
4857
4858void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4859  Register unclamped_reg = ToRegister(instr->unclamped());
4860  Register result_reg = ToRegister(instr->result());
4861  __ ClampUint8(result_reg, unclamped_reg);
4862}
4863
4864
4865void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4866  Register scratch = scratch0();
4867  Register input_reg = ToRegister(instr->unclamped());
4868  Register result_reg = ToRegister(instr->result());
4869  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
4870  Label is_smi, done, heap_number;
4871
4872  // Both smi and heap number cases are handled.
4873  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4874
4875  // Check for heap number
4876  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4877  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4878
4879  // Check for undefined. Undefined is converted to zero for clamping
4880  // conversions.
4881  DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
4882               Operand(factory()->undefined_value()));
4883  __ mov(result_reg, zero_reg);
4884  __ jmp(&done);
4885
4886  // Heap number
4887  __ bind(&heap_number);
4888  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4889                                             HeapNumber::kValueOffset));
4890  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4891  __ jmp(&done);
4892
4893  __ bind(&is_smi);
4894  __ ClampUint8(result_reg, scratch);
4895
4896  __ bind(&done);
4897}
4898
4899
4900void LCodeGen::DoAllocate(LAllocate* instr) {
4901  class DeferredAllocate final : public LDeferredCode {
4902   public:
4903    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
4904        : LDeferredCode(codegen), instr_(instr) { }
4905    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
4906    LInstruction* instr() override { return instr_; }
4907
4908   private:
4909    LAllocate* instr_;
4910  };
4911
4912  DeferredAllocate* deferred =
4913      new(zone()) DeferredAllocate(this, instr);
4914
4915  Register result = ToRegister(instr->result());
4916  Register scratch = ToRegister(instr->temp1());
4917  Register scratch2 = ToRegister(instr->temp2());
4918
4919  // Allocate memory for the object.
4920  AllocationFlags flags = NO_ALLOCATION_FLAGS;
4921  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
4922    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
4923  }
4924  if (instr->hydrogen()->IsOldSpaceAllocation()) {
4925    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
4926    flags = static_cast<AllocationFlags>(flags | PRETENURE);
4927  }
4928
4929  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
4930    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
4931  }
4932  DCHECK(!instr->hydrogen()->IsAllocationFolded());
4933
4934  if (instr->size()->IsConstantOperand()) {
4935    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
4936    CHECK(size <= kMaxRegularHeapObjectSize);
4937    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
4938  } else {
4939    Register size = ToRegister(instr->size());
4940    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
4941  }
4942
4943  __ bind(deferred->exit());
4944
4945  if (instr->hydrogen()->MustPrefillWithFiller()) {
4946    STATIC_ASSERT(kHeapObjectTag == 1);
4947    if (instr->size()->IsConstantOperand()) {
4948      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
4949      __ li(scratch, Operand(size - kHeapObjectTag));
4950    } else {
4951      __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
4952    }
4953    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
4954    Label loop;
4955    __ bind(&loop);
4956    __ Subu(scratch, scratch, Operand(kPointerSize));
4957    __ Addu(at, result, Operand(scratch));
4958    __ sw(scratch2, MemOperand(at));
4959    __ Branch(&loop, ge, scratch, Operand(zero_reg));
4960  }
4961}
4962
4963
4964void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
4965  Register result = ToRegister(instr->result());
4966
4967  // TODO(3095996): Get rid of this. For now, we need to make the
4968  // result register contain a valid pointer because it is already
4969  // contained in the register pointer map.
4970  __ mov(result, zero_reg);
4971
4972  PushSafepointRegistersScope scope(this);
4973  if (instr->size()->IsRegister()) {
4974    Register size = ToRegister(instr->size());
4975    DCHECK(!size.is(result));
4976    __ SmiTag(size);
4977    __ push(size);
4978  } else {
4979    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
4980    if (size >= 0 && size <= Smi::kMaxValue) {
4981      __ Push(Smi::FromInt(size));
4982    } else {
4983      // We should never get here at runtime => abort
4984      __ stop("invalid allocation size");
4985      return;
4986    }
4987  }
4988
4989  int flags = AllocateDoubleAlignFlag::encode(
4990      instr->hydrogen()->MustAllocateDoubleAligned());
4991  if (instr->hydrogen()->IsOldSpaceAllocation()) {
4992    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
4993    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
4994  } else {
4995    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
4996  }
4997  __ Push(Smi::FromInt(flags));
4998
4999  CallRuntimeFromDeferred(
5000      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5001  __ StoreToSafepointRegisterSlot(v0, result);
5002
5003  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5004    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5005    if (instr->hydrogen()->IsOldSpaceAllocation()) {
5006      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5007      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5008    }
5009    // If the allocation folding dominator allocate triggered a GC, allocation
5010    // happend in the runtime. We have to reset the top pointer to virtually
5011    // undo the allocation.
5012    ExternalReference allocation_top =
5013        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5014    Register top_address = scratch0();
5015    __ Subu(v0, v0, Operand(kHeapObjectTag));
5016    __ li(top_address, Operand(allocation_top));
5017    __ sw(v0, MemOperand(top_address));
5018    __ Addu(v0, v0, Operand(kHeapObjectTag));
5019  }
5020}
5021
5022void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5023  DCHECK(instr->hydrogen()->IsAllocationFolded());
5024  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5025  Register result = ToRegister(instr->result());
5026  Register scratch1 = ToRegister(instr->temp1());
5027  Register scratch2 = ToRegister(instr->temp2());
5028
5029  AllocationFlags flags = ALLOCATION_FOLDED;
5030  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5031    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5032  }
5033  if (instr->hydrogen()->IsOldSpaceAllocation()) {
5034    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5035    flags = static_cast<AllocationFlags>(flags | PRETENURE);
5036  }
5037  if (instr->size()->IsConstantOperand()) {
5038    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5039    CHECK(size <= kMaxRegularHeapObjectSize);
5040    __ FastAllocate(size, result, scratch1, scratch2, flags);
5041  } else {
5042    Register size = ToRegister(instr->size());
5043    __ FastAllocate(size, result, scratch1, scratch2, flags);
5044  }
5045}
5046
5047
5048void LCodeGen::DoTypeof(LTypeof* instr) {
5049  DCHECK(ToRegister(instr->value()).is(a3));
5050  DCHECK(ToRegister(instr->result()).is(v0));
5051  Label end, do_call;
5052  Register value_register = ToRegister(instr->value());
5053  __ JumpIfNotSmi(value_register, &do_call);
5054  __ li(v0, Operand(isolate()->factory()->number_string()));
5055  __ jmp(&end);
5056  __ bind(&do_call);
5057  Callable callable = CodeFactory::Typeof(isolate());
5058  CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
5059  __ bind(&end);
5060}
5061
5062
5063void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5064  Register input = ToRegister(instr->value());
5065
5066  Register cmp1 = no_reg;
5067  Operand cmp2 = Operand(no_reg);
5068
5069  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5070                                                  instr->FalseLabel(chunk_),
5071                                                  input,
5072                                                  instr->type_literal(),
5073                                                  &cmp1,
5074                                                  &cmp2);
5075
5076  DCHECK(cmp1.is_valid());
5077  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5078
5079  if (final_branch_condition != kNoCondition) {
5080    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5081  }
5082}
5083
5084
5085Condition LCodeGen::EmitTypeofIs(Label* true_label,
5086                                 Label* false_label,
5087                                 Register input,
5088                                 Handle<String> type_name,
5089                                 Register* cmp1,
5090                                 Operand* cmp2) {
5091  // This function utilizes the delay slot heavily. This is used to load
5092  // values that are always usable without depending on the type of the input
5093  // register.
5094  Condition final_branch_condition = kNoCondition;
5095  Register scratch = scratch0();
5096  Factory* factory = isolate()->factory();
5097  if (String::Equals(type_name, factory->number_string())) {
5098    __ JumpIfSmi(input, true_label);
5099    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5100    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5101    *cmp1 = input;
5102    *cmp2 = Operand(at);
5103    final_branch_condition = eq;
5104
5105  } else if (String::Equals(type_name, factory->string_string())) {
5106    __ JumpIfSmi(input, false_label);
5107    __ GetObjectType(input, input, scratch);
5108    *cmp1 = scratch;
5109    *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5110    final_branch_condition = lt;
5111
5112  } else if (String::Equals(type_name, factory->symbol_string())) {
5113    __ JumpIfSmi(input, false_label);
5114    __ GetObjectType(input, input, scratch);
5115    *cmp1 = scratch;
5116    *cmp2 = Operand(SYMBOL_TYPE);
5117    final_branch_condition = eq;
5118
5119  } else if (String::Equals(type_name, factory->boolean_string())) {
5120    __ LoadRoot(at, Heap::kTrueValueRootIndex);
5121    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5122    __ LoadRoot(at, Heap::kFalseValueRootIndex);
5123    *cmp1 = at;
5124    *cmp2 = Operand(input);
5125    final_branch_condition = eq;
5126
5127  } else if (String::Equals(type_name, factory->undefined_string())) {
5128    __ LoadRoot(at, Heap::kNullValueRootIndex);
5129    __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
5130    // The first instruction of JumpIfSmi is an And - it is safe in the delay
5131    // slot.
5132    __ JumpIfSmi(input, false_label);
5133    // Check for undetectable objects => true.
5134    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5135    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5136    __ And(at, at, 1 << Map::kIsUndetectable);
5137    *cmp1 = at;
5138    *cmp2 = Operand(zero_reg);
5139    final_branch_condition = ne;
5140
5141  } else if (String::Equals(type_name, factory->function_string())) {
5142    __ JumpIfSmi(input, false_label);
5143    __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5144    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5145    __ And(scratch, scratch,
5146           Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5147    *cmp1 = scratch;
5148    *cmp2 = Operand(1 << Map::kIsCallable);
5149    final_branch_condition = eq;
5150
5151  } else if (String::Equals(type_name, factory->object_string())) {
5152    __ JumpIfSmi(input, false_label);
5153    __ LoadRoot(at, Heap::kNullValueRootIndex);
5154    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5155    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5156    __ GetObjectType(input, scratch, scratch1());
5157    __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
5158    // Check for callable or undetectable objects => false.
5159    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5160    __ And(at, scratch,
5161           Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5162    *cmp1 = at;
5163    *cmp2 = Operand(zero_reg);
5164    final_branch_condition = eq;
5165
5166  } else {
5167    *cmp1 = at;
5168    *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
5169    __ Branch(false_label);
5170  }
5171
5172  return final_branch_condition;
5173}
5174
5175
5176void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5177  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5178    // Ensure that we have enough space after the previous lazy-bailout
5179    // instruction for patching the code here.
5180    int current_pc = masm()->pc_offset();
5181    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5182      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5183      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5184      while (padding_size > 0) {
5185        __ nop();
5186        padding_size -= Assembler::kInstrSize;
5187      }
5188    }
5189  }
5190  last_lazy_deopt_pc_ = masm()->pc_offset();
5191}
5192
5193
5194void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5195  last_lazy_deopt_pc_ = masm()->pc_offset();
5196  DCHECK(instr->HasEnvironment());
5197  LEnvironment* env = instr->environment();
5198  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5199  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5200}
5201
5202
5203void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5204  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5205  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5206  // needed return address), even though the implementation of LAZY and EAGER is
5207  // now identical. When LAZY is eventually completely folded into EAGER, remove
5208  // the special case below.
5209  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5210    type = Deoptimizer::LAZY;
5211  }
5212
5213  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5214               Operand(zero_reg));
5215}
5216
5217
5218void LCodeGen::DoDummy(LDummy* instr) {
5219  // Nothing to see here, move on!
5220}
5221
5222
5223void LCodeGen::DoDummyUse(LDummyUse* instr) {
5224  // Nothing to see here, move on!
5225}
5226
5227
5228void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5229  PushSafepointRegistersScope scope(this);
5230  LoadContextFromDeferred(instr->context());
5231  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5232  RecordSafepointWithLazyDeopt(
5233      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5234  DCHECK(instr->HasEnvironment());
5235  LEnvironment* env = instr->environment();
5236  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5237}
5238
5239
5240void LCodeGen::DoStackCheck(LStackCheck* instr) {
5241  class DeferredStackCheck final : public LDeferredCode {
5242   public:
5243    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5244        : LDeferredCode(codegen), instr_(instr) { }
5245    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5246    LInstruction* instr() override { return instr_; }
5247
5248   private:
5249    LStackCheck* instr_;
5250  };
5251
5252  DCHECK(instr->HasEnvironment());
5253  LEnvironment* env = instr->environment();
5254  // There is no LLazyBailout instruction for stack-checks. We have to
5255  // prepare for lazy deoptimization explicitly here.
5256  if (instr->hydrogen()->is_function_entry()) {
5257    // Perform stack overflow check.
5258    Label done;
5259    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5260    __ Branch(&done, hs, sp, Operand(at));
5261    DCHECK(instr->context()->IsRegister());
5262    DCHECK(ToRegister(instr->context()).is(cp));
5263    CallCode(isolate()->builtins()->StackCheck(),
5264             RelocInfo::CODE_TARGET,
5265             instr);
5266    __ bind(&done);
5267  } else {
5268    DCHECK(instr->hydrogen()->is_backwards_branch());
5269    // Perform stack overflow check if this goto needs it before jumping.
5270    DeferredStackCheck* deferred_stack_check =
5271        new(zone()) DeferredStackCheck(this, instr);
5272    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5273    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5274    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5275    __ bind(instr->done_label());
5276    deferred_stack_check->SetExit(instr->done_label());
5277    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5278    // Don't record a deoptimization index for the safepoint here.
5279    // This will be done explicitly when emitting call and the safepoint in
5280    // the deferred code.
5281  }
5282}
5283
5284
5285void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5286  // This is a pseudo-instruction that ensures that the environment here is
5287  // properly registered for deoptimization and records the assembler's PC
5288  // offset.
5289  LEnvironment* environment = instr->environment();
5290
5291  // If the environment were already registered, we would have no way of
5292  // backpatching it with the spill slot operands.
5293  DCHECK(!environment->HasBeenRegistered());
5294  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5295
5296  GenerateOsrPrologue();
5297}
5298
5299
5300void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5301  Register result = ToRegister(instr->result());
5302  Register object = ToRegister(instr->object());
5303
5304  Label use_cache, call_runtime;
5305  DCHECK(object.is(a0));
5306  __ CheckEnumCache(&call_runtime);
5307
5308  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5309  __ Branch(&use_cache);
5310
5311  // Get the set of properties to enumerate.
5312  __ bind(&call_runtime);
5313  __ push(object);
5314  CallRuntime(Runtime::kForInEnumerate, instr);
5315  __ bind(&use_cache);
5316}
5317
5318
5319void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5320  Register map = ToRegister(instr->map());
5321  Register result = ToRegister(instr->result());
5322  Label load_cache, done;
5323  __ EnumLength(result, map);
5324  __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
5325  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5326  __ jmp(&done);
5327
5328  __ bind(&load_cache);
5329  __ LoadInstanceDescriptors(map, result);
5330  __ lw(result,
5331        FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5332  __ lw(result,
5333        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5334  DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
5335               Operand(zero_reg));
5336
5337  __ bind(&done);
5338}
5339
5340
5341void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5342  Register object = ToRegister(instr->value());
5343  Register map = ToRegister(instr->map());
5344  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5345  DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
5346               Operand(scratch0()));
5347}
5348
5349
5350void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5351                                           Register result,
5352                                           Register object,
5353                                           Register index) {
5354  PushSafepointRegistersScope scope(this);
5355  __ Push(object, index);
5356  __ mov(cp, zero_reg);
5357  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5358  RecordSafepointWithRegisters(
5359     instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5360  __ StoreToSafepointRegisterSlot(v0, result);
5361}
5362
5363
5364void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5365  class DeferredLoadMutableDouble final : public LDeferredCode {
5366   public:
5367    DeferredLoadMutableDouble(LCodeGen* codegen,
5368                              LLoadFieldByIndex* instr,
5369                              Register result,
5370                              Register object,
5371                              Register index)
5372        : LDeferredCode(codegen),
5373          instr_(instr),
5374          result_(result),
5375          object_(object),
5376          index_(index) {
5377    }
5378    void Generate() override {
5379      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5380    }
5381    LInstruction* instr() override { return instr_; }
5382
5383   private:
5384    LLoadFieldByIndex* instr_;
5385    Register result_;
5386    Register object_;
5387    Register index_;
5388  };
5389
5390  Register object = ToRegister(instr->object());
5391  Register index = ToRegister(instr->index());
5392  Register result = ToRegister(instr->result());
5393  Register scratch = scratch0();
5394
5395  DeferredLoadMutableDouble* deferred;
5396  deferred = new(zone()) DeferredLoadMutableDouble(
5397      this, instr, result, object, index);
5398
5399  Label out_of_object, done;
5400
5401  __ And(scratch, index, Operand(Smi::FromInt(1)));
5402  __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5403  __ sra(index, index, 1);
5404
5405  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5406  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
5407
5408  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5409  __ Addu(scratch, object, scratch);
5410  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5411
5412  __ Branch(&done);
5413
5414  __ bind(&out_of_object);
5415  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5416  // Index is equal to negated out of object property index plus 1.
5417  __ Subu(scratch, result, scratch);
5418  __ lw(result, FieldMemOperand(scratch,
5419                                FixedArray::kHeaderSize - kPointerSize));
5420  __ bind(deferred->exit());
5421  __ bind(&done);
5422}
5423
5424#undef __
5425
5426}  // namespace internal
5427}  // namespace v8
5428