1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "mips/lithium-codegen-mips.h"
31#include "mips/lithium-gap-resolver-mips.h"
32#include "code-stubs.h"
33#include "stub-cache.h"
34
35namespace v8 {
36namespace internal {
37
38
39class SafepointGenerator : public CallWrapper {
40 public:
41  SafepointGenerator(LCodeGen* codegen,
42                     LPointerMap* pointers,
43                     Safepoint::DeoptMode mode)
44      : codegen_(codegen),
45        pointers_(pointers),
46        deopt_mode_(mode) { }
47  virtual ~SafepointGenerator() { }
48
49  virtual void BeforeCall(int call_size) const { }
50
51  virtual void AfterCall() const {
52    codegen_->RecordSafepoint(pointers_, deopt_mode_);
53  }
54
55 private:
56  LCodeGen* codegen_;
57  LPointerMap* pointers_;
58  Safepoint::DeoptMode deopt_mode_;
59};
60
61
62#define __ masm()->
63
64bool LCodeGen::GenerateCode() {
65  LPhase phase("Z_Code generation", chunk());
66  ASSERT(is_unused());
67  status_ = GENERATING;
68
69  // Open a frame scope to indicate that there is a frame on the stack.  The
70  // NONE indicates that the scope shouldn't actually generate code to set up
71  // the frame (that is done in GeneratePrologue).
72  FrameScope frame_scope(masm_, StackFrame::NONE);
73
74  return GeneratePrologue() &&
75      GenerateBody() &&
76      GenerateDeferredCode() &&
77      GenerateDeoptJumpTable() &&
78      GenerateSafepointTable();
79}
80
81
82void LCodeGen::FinishCode(Handle<Code> code) {
83  ASSERT(is_done());
84  code->set_stack_slots(GetStackSlotCount());
85  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
86  if (FLAG_weak_embedded_maps_in_optimized_code) {
87    RegisterDependentCodeForEmbeddedMaps(code);
88  }
89  PopulateDeoptimizationData(code);
90  info()->CommitDependencies(code);
91}
92
93
94void LChunkBuilder::Abort(BailoutReason reason) {
95  info()->set_bailout_reason(reason);
96  status_ = ABORTED;
97}
98
99
100void LCodeGen::Comment(const char* format, ...) {
101  if (!FLAG_code_comments) return;
102  char buffer[4 * KB];
103  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
104  va_list arguments;
105  va_start(arguments, format);
106  builder.AddFormattedList(format, arguments);
107  va_end(arguments);
108
109  // Copy the string before recording it in the assembler to avoid
110  // issues when the stack allocated buffer goes out of scope.
111  size_t length = builder.position();
112  Vector<char> copy = Vector<char>::New(length + 1);
113  OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
114  masm()->RecordComment(copy.start());
115}
116
117
118bool LCodeGen::GeneratePrologue() {
119  ASSERT(is_generating());
120
121  if (info()->IsOptimizing()) {
122    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
123
124#ifdef DEBUG
125    if (strlen(FLAG_stop_at) > 0 &&
126        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
127      __ stop("stop_at");
128    }
129#endif
130
131    // a1: Callee's JS function.
132    // cp: Callee's context.
133    // fp: Caller's frame pointer.
134    // lr: Caller's pc.
135
136    // Strict mode functions and builtins need to replace the receiver
137    // with undefined when called as functions (without an explicit
138    // receiver object). r5 is zero for method calls and non-zero for
139    // function calls.
140    if (!info_->is_classic_mode() || info_->is_native()) {
141      Label ok;
142      __ Branch(&ok, eq, t1, Operand(zero_reg));
143
144      int receiver_offset = scope()->num_parameters() * kPointerSize;
145      __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
146      __ sw(a2, MemOperand(sp, receiver_offset));
147      __ bind(&ok);
148    }
149  }
150
151  info()->set_prologue_offset(masm_->pc_offset());
152  if (NeedsEagerFrame()) {
153    if (info()->IsStub()) {
154      __ Push(ra, fp, cp);
155      __ Push(Smi::FromInt(StackFrame::STUB));
156      // Adjust FP to point to saved FP.
157      __ Addu(fp, sp, Operand(2 * kPointerSize));
158    } else {
159      // The following three instructions must remain together and unmodified
160      // for code aging to work properly.
161      __ Push(ra, fp, cp, a1);
162      // Add unused nop to ensure prologue sequence is identical for
163      // full-codegen and lithium-codegen.
164      __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
165      // Adj. FP to point to saved FP.
166      __ Addu(fp, sp, Operand(2 * kPointerSize));
167    }
168    frame_is_built_ = true;
169    info_->AddNoFrameRange(0, masm_->pc_offset());
170  }
171
172  // Reserve space for the stack slots needed by the code.
173  int slots = GetStackSlotCount();
174  if (slots > 0) {
175    if (FLAG_debug_code) {
176      __ Subu(sp,  sp, Operand(slots * kPointerSize));
177      __ push(a0);
178      __ push(a1);
179      __ Addu(a0, sp, Operand(slots *  kPointerSize));
180      __ li(a1, Operand(kSlotsZapValue));
181      Label loop;
182      __ bind(&loop);
183      __ Subu(a0, a0, Operand(kPointerSize));
184      __ sw(a1, MemOperand(a0, 2 * kPointerSize));
185      __ Branch(&loop, ne, a0, Operand(sp));
186      __ pop(a1);
187      __ pop(a0);
188    } else {
189      __ Subu(sp, sp, Operand(slots * kPointerSize));
190    }
191  }
192
193  if (info()->saves_caller_doubles()) {
194    Comment(";;; Save clobbered callee double registers");
195    int count = 0;
196    BitVector* doubles = chunk()->allocated_double_registers();
197    BitVector::Iterator save_iterator(doubles);
198    while (!save_iterator.Done()) {
199      __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
200              MemOperand(sp, count * kDoubleSize));
201      save_iterator.Advance();
202      count++;
203    }
204  }
205
206  // Possibly allocate a local context.
207  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
208  if (heap_slots > 0) {
209    Comment(";;; Allocate local context");
210    // Argument to NewContext is the function, which is in a1.
211    __ push(a1);
212    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
213      FastNewContextStub stub(heap_slots);
214      __ CallStub(&stub);
215    } else {
216      __ CallRuntime(Runtime::kNewFunctionContext, 1);
217    }
218    RecordSafepoint(Safepoint::kNoLazyDeopt);
219    // Context is returned in both v0 and cp.  It replaces the context
220    // passed to us.  It's saved in the stack and kept live in cp.
221    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
222    // Copy any necessary parameters into the context.
223    int num_parameters = scope()->num_parameters();
224    for (int i = 0; i < num_parameters; i++) {
225      Variable* var = scope()->parameter(i);
226      if (var->IsContextSlot()) {
227        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
228            (num_parameters - 1 - i) * kPointerSize;
229        // Load parameter from stack.
230        __ lw(a0, MemOperand(fp, parameter_offset));
231        // Store it in the context.
232        MemOperand target = ContextOperand(cp, var->index());
233        __ sw(a0, target);
234        // Update the write barrier. This clobbers a3 and a0.
235        __ RecordWriteContextSlot(
236            cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
237      }
238    }
239    Comment(";;; End allocate local context");
240  }
241
242  // Trace the call.
243  if (FLAG_trace && info()->IsOptimizing()) {
244    __ CallRuntime(Runtime::kTraceEnter, 0);
245  }
246  return !is_aborted();
247}
248
249
250bool LCodeGen::GenerateBody() {
251  ASSERT(is_generating());
252  bool emit_instructions = true;
253  for (current_instruction_ = 0;
254       !is_aborted() && current_instruction_ < instructions_->length();
255       current_instruction_++) {
256    LInstruction* instr = instructions_->at(current_instruction_);
257
258    // Don't emit code for basic blocks with a replacement.
259    if (instr->IsLabel()) {
260      emit_instructions = !LLabel::cast(instr)->HasReplacement();
261    }
262    if (!emit_instructions) continue;
263
264    if (FLAG_code_comments && instr->HasInterestingComment(this)) {
265      Comment(";;; <@%d,#%d> %s",
266              current_instruction_,
267              instr->hydrogen_value()->id(),
268              instr->Mnemonic());
269    }
270
271    RecordAndUpdatePosition(instr->position());
272
273    instr->CompileToNative(this);
274  }
275  EnsureSpaceForLazyDeopt();
276  last_lazy_deopt_pc_ = masm()->pc_offset();
277  return !is_aborted();
278}
279
280
281bool LCodeGen::GenerateDeferredCode() {
282  ASSERT(is_generating());
283  if (deferred_.length() > 0) {
284    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
285      LDeferredCode* code = deferred_[i];
286
287      int pos = instructions_->at(code->instruction_index())->position();
288      RecordAndUpdatePosition(pos);
289
290      Comment(";;; <@%d,#%d> "
291              "-------------------- Deferred %s --------------------",
292              code->instruction_index(),
293              code->instr()->hydrogen_value()->id(),
294              code->instr()->Mnemonic());
295      __ bind(code->entry());
296      if (NeedsDeferredFrame()) {
297        Comment(";;; Build frame");
298        ASSERT(!frame_is_built_);
299        ASSERT(info()->IsStub());
300        frame_is_built_ = true;
301        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
302        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
303        __ push(scratch0());
304        __ Addu(fp, sp, Operand(2 * kPointerSize));
305        Comment(";;; Deferred code");
306      }
307      code->Generate();
308      if (NeedsDeferredFrame()) {
309        Comment(";;; Destroy frame");
310        ASSERT(frame_is_built_);
311        __ pop(at);
312        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
313        frame_is_built_ = false;
314      }
315      __ jmp(code->exit());
316    }
317  }
318  // Deferred code is the last part of the instruction sequence. Mark
319  // the generated code as done unless we bailed out.
320  if (!is_aborted()) status_ = DONE;
321  return !is_aborted();
322}
323
324
325bool LCodeGen::GenerateDeoptJumpTable() {
326  // Check that the jump table is accessible from everywhere in the function
327  // code, i.e. that offsets to the table can be encoded in the 16bit signed
328  // immediate of a branch instruction.
329  // To simplify we consider the code size from the first instruction to the
330  // end of the jump table.
331  if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
332      deopt_jump_table_.length() * 12)) {
333    Abort(kGeneratedCodeIsTooLarge);
334  }
335
336  if (deopt_jump_table_.length() > 0) {
337    Comment(";;; -------------------- Jump table --------------------");
338  }
339  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
340  Label table_start;
341  __ bind(&table_start);
342  Label needs_frame;
343  for (int i = 0; i < deopt_jump_table_.length(); i++) {
344    __ bind(&deopt_jump_table_[i].label);
345    Address entry = deopt_jump_table_[i].address;
346    Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
347    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
348    if (id == Deoptimizer::kNotDeoptimizationEntry) {
349      Comment(";;; jump table entry %d.", i);
350    } else {
351      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
352    }
353    __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
354    if (deopt_jump_table_[i].needs_frame) {
355      if (needs_frame.is_bound()) {
356        __ Branch(&needs_frame);
357      } else {
358        __ bind(&needs_frame);
359        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
360        // This variant of deopt can only be used with stubs. Since we don't
361        // have a function pointer to install in the stack frame that we're
362        // building, install a special marker there instead.
363        ASSERT(info()->IsStub());
364        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
365        __ push(scratch0());
366        __ Addu(fp, sp, Operand(2 * kPointerSize));
367        __ Call(t9);
368      }
369    } else {
370      __ Call(t9);
371    }
372  }
373  __ RecordComment("]");
374
375  // The deoptimization jump table is the last part of the instruction
376  // sequence. Mark the generated code as done unless we bailed out.
377  if (!is_aborted()) status_ = DONE;
378  return !is_aborted();
379}
380
381
382bool LCodeGen::GenerateSafepointTable() {
383  ASSERT(is_done());
384  safepoints_.Emit(masm(), GetStackSlotCount());
385  return !is_aborted();
386}
387
388
389Register LCodeGen::ToRegister(int index) const {
390  return Register::FromAllocationIndex(index);
391}
392
393
394DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
395  return DoubleRegister::FromAllocationIndex(index);
396}
397
398
399Register LCodeGen::ToRegister(LOperand* op) const {
400  ASSERT(op->IsRegister());
401  return ToRegister(op->index());
402}
403
404
405Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
406  if (op->IsRegister()) {
407    return ToRegister(op->index());
408  } else if (op->IsConstantOperand()) {
409    LConstantOperand* const_op = LConstantOperand::cast(op);
410    HConstant* constant = chunk_->LookupConstant(const_op);
411    Handle<Object> literal = constant->handle();
412    Representation r = chunk_->LookupLiteralRepresentation(const_op);
413    if (r.IsInteger32()) {
414      ASSERT(literal->IsNumber());
415      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
416    } else if (r.IsSmi()) {
417      ASSERT(constant->HasSmiValue());
418      __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
419    } else if (r.IsDouble()) {
420      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
421    } else {
422      ASSERT(r.IsTagged());
423      __ LoadObject(scratch, literal);
424    }
425    return scratch;
426  } else if (op->IsStackSlot() || op->IsArgument()) {
427    __ lw(scratch, ToMemOperand(op));
428    return scratch;
429  }
430  UNREACHABLE();
431  return scratch;
432}
433
434
435DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
436  ASSERT(op->IsDoubleRegister());
437  return ToDoubleRegister(op->index());
438}
439
440
441DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
442                                                FloatRegister flt_scratch,
443                                                DoubleRegister dbl_scratch) {
444  if (op->IsDoubleRegister()) {
445    return ToDoubleRegister(op->index());
446  } else if (op->IsConstantOperand()) {
447    LConstantOperand* const_op = LConstantOperand::cast(op);
448    HConstant* constant = chunk_->LookupConstant(const_op);
449    Handle<Object> literal = constant->handle();
450    Representation r = chunk_->LookupLiteralRepresentation(const_op);
451    if (r.IsInteger32()) {
452      ASSERT(literal->IsNumber());
453      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
454      __ mtc1(at, flt_scratch);
455      __ cvt_d_w(dbl_scratch, flt_scratch);
456      return dbl_scratch;
457    } else if (r.IsDouble()) {
458      Abort(kUnsupportedDoubleImmediate);
459    } else if (r.IsTagged()) {
460      Abort(kUnsupportedTaggedImmediate);
461    }
462  } else if (op->IsStackSlot() || op->IsArgument()) {
463    MemOperand mem_op = ToMemOperand(op);
464    __ ldc1(dbl_scratch, mem_op);
465    return dbl_scratch;
466  }
467  UNREACHABLE();
468  return dbl_scratch;
469}
470
471
472Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
473  HConstant* constant = chunk_->LookupConstant(op);
474  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
475  return constant->handle();
476}
477
478
479bool LCodeGen::IsInteger32(LConstantOperand* op) const {
480  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
481}
482
483
484bool LCodeGen::IsSmi(LConstantOperand* op) const {
485  return chunk_->LookupLiteralRepresentation(op).IsSmi();
486}
487
488
489int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
490  return ToRepresentation(op, Representation::Integer32());
491}
492
493
494int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
495                                   const Representation& r) const {
496  HConstant* constant = chunk_->LookupConstant(op);
497  int32_t value = constant->Integer32Value();
498  if (r.IsInteger32()) return value;
499  ASSERT(r.IsSmiOrTagged());
500  return reinterpret_cast<int32_t>(Smi::FromInt(value));
501}
502
503
504Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
505  HConstant* constant = chunk_->LookupConstant(op);
506  return Smi::FromInt(constant->Integer32Value());
507}
508
509
510double LCodeGen::ToDouble(LConstantOperand* op) const {
511  HConstant* constant = chunk_->LookupConstant(op);
512  ASSERT(constant->HasDoubleValue());
513  return constant->DoubleValue();
514}
515
516
517Operand LCodeGen::ToOperand(LOperand* op) {
518  if (op->IsConstantOperand()) {
519    LConstantOperand* const_op = LConstantOperand::cast(op);
520    HConstant* constant = chunk()->LookupConstant(const_op);
521    Representation r = chunk_->LookupLiteralRepresentation(const_op);
522    if (r.IsSmi()) {
523      ASSERT(constant->HasSmiValue());
524      return Operand(Smi::FromInt(constant->Integer32Value()));
525    } else if (r.IsInteger32()) {
526      ASSERT(constant->HasInteger32Value());
527      return Operand(constant->Integer32Value());
528    } else if (r.IsDouble()) {
529      Abort(kToOperandUnsupportedDoubleImmediate);
530    }
531    ASSERT(r.IsTagged());
532    return Operand(constant->handle());
533  } else if (op->IsRegister()) {
534    return Operand(ToRegister(op));
535  } else if (op->IsDoubleRegister()) {
536    Abort(kToOperandIsDoubleRegisterUnimplemented);
537    return Operand(0);
538  }
539  // Stack slots not implemented, use ToMemOperand instead.
540  UNREACHABLE();
541  return Operand(0);
542}
543
544
545MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
546  ASSERT(!op->IsRegister());
547  ASSERT(!op->IsDoubleRegister());
548  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
549  return MemOperand(fp, StackSlotOffset(op->index()));
550}
551
552
553MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
554  ASSERT(op->IsDoubleStackSlot());
555  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
556}
557
558
559void LCodeGen::WriteTranslation(LEnvironment* environment,
560                                Translation* translation) {
561  if (environment == NULL) return;
562
563  // The translation includes one command per value in the environment.
564  int translation_size = environment->translation_size();
565  // The output frame height does not include the parameters.
566  int height = translation_size - environment->parameter_count();
567
568  WriteTranslation(environment->outer(), translation);
569  bool has_closure_id = !info()->closure().is_null() &&
570      !info()->closure().is_identical_to(environment->closure());
571  int closure_id = has_closure_id
572      ? DefineDeoptimizationLiteral(environment->closure())
573      : Translation::kSelfLiteralId;
574
575  switch (environment->frame_type()) {
576    case JS_FUNCTION:
577      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
578      break;
579    case JS_CONSTRUCT:
580      translation->BeginConstructStubFrame(closure_id, translation_size);
581      break;
582    case JS_GETTER:
583      ASSERT(translation_size == 1);
584      ASSERT(height == 0);
585      translation->BeginGetterStubFrame(closure_id);
586      break;
587    case JS_SETTER:
588      ASSERT(translation_size == 2);
589      ASSERT(height == 0);
590      translation->BeginSetterStubFrame(closure_id);
591      break;
592    case STUB:
593      translation->BeginCompiledStubFrame();
594      break;
595    case ARGUMENTS_ADAPTOR:
596      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
597      break;
598  }
599
600  int object_index = 0;
601  int dematerialized_index = 0;
602  for (int i = 0; i < translation_size; ++i) {
603    LOperand* value = environment->values()->at(i);
604    AddToTranslation(environment,
605                     translation,
606                     value,
607                     environment->HasTaggedValueAt(i),
608                     environment->HasUint32ValueAt(i),
609                     &object_index,
610                     &dematerialized_index);
611  }
612}
613
614
615void LCodeGen::AddToTranslation(LEnvironment* environment,
616                                Translation* translation,
617                                LOperand* op,
618                                bool is_tagged,
619                                bool is_uint32,
620                                int* object_index_pointer,
621                                int* dematerialized_index_pointer) {
622  if (op == LEnvironment::materialization_marker()) {
623    int object_index = (*object_index_pointer)++;
624    if (environment->ObjectIsDuplicateAt(object_index)) {
625      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
626      translation->DuplicateObject(dupe_of);
627      return;
628    }
629    int object_length = environment->ObjectLengthAt(object_index);
630    if (environment->ObjectIsArgumentsAt(object_index)) {
631      translation->BeginArgumentsObject(object_length);
632    } else {
633      translation->BeginCapturedObject(object_length);
634    }
635    int dematerialized_index = *dematerialized_index_pointer;
636    int env_offset = environment->translation_size() + dematerialized_index;
637    *dematerialized_index_pointer += object_length;
638    for (int i = 0; i < object_length; ++i) {
639      LOperand* value = environment->values()->at(env_offset + i);
640      AddToTranslation(environment,
641                       translation,
642                       value,
643                       environment->HasTaggedValueAt(env_offset + i),
644                       environment->HasUint32ValueAt(env_offset + i),
645                       object_index_pointer,
646                       dematerialized_index_pointer);
647    }
648    return;
649  }
650
651  if (op->IsStackSlot()) {
652    if (is_tagged) {
653      translation->StoreStackSlot(op->index());
654    } else if (is_uint32) {
655      translation->StoreUint32StackSlot(op->index());
656    } else {
657      translation->StoreInt32StackSlot(op->index());
658    }
659  } else if (op->IsDoubleStackSlot()) {
660    translation->StoreDoubleStackSlot(op->index());
661  } else if (op->IsArgument()) {
662    ASSERT(is_tagged);
663    int src_index = GetStackSlotCount() + op->index();
664    translation->StoreStackSlot(src_index);
665  } else if (op->IsRegister()) {
666    Register reg = ToRegister(op);
667    if (is_tagged) {
668      translation->StoreRegister(reg);
669    } else if (is_uint32) {
670      translation->StoreUint32Register(reg);
671    } else {
672      translation->StoreInt32Register(reg);
673    }
674  } else if (op->IsDoubleRegister()) {
675    DoubleRegister reg = ToDoubleRegister(op);
676    translation->StoreDoubleRegister(reg);
677  } else if (op->IsConstantOperand()) {
678    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
679    int src_index = DefineDeoptimizationLiteral(constant->handle());
680    translation->StoreLiteral(src_index);
681  } else {
682    UNREACHABLE();
683  }
684}
685
686
687void LCodeGen::CallCode(Handle<Code> code,
688                        RelocInfo::Mode mode,
689                        LInstruction* instr) {
690  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
691}
692
693
694void LCodeGen::CallCodeGeneric(Handle<Code> code,
695                               RelocInfo::Mode mode,
696                               LInstruction* instr,
697                               SafepointMode safepoint_mode) {
698  EnsureSpaceForLazyDeopt();
699  ASSERT(instr != NULL);
700  LPointerMap* pointers = instr->pointer_map();
701  RecordPosition(pointers->position());
702  __ Call(code, mode);
703  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
704}
705
706
707void LCodeGen::CallRuntime(const Runtime::Function* function,
708                           int num_arguments,
709                           LInstruction* instr) {
710  ASSERT(instr != NULL);
711  LPointerMap* pointers = instr->pointer_map();
712  ASSERT(pointers != NULL);
713  RecordPosition(pointers->position());
714
715  __ CallRuntime(function, num_arguments);
716  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
717}
718
719
720void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
721                                       int argc,
722                                       LInstruction* instr) {
723  __ CallRuntimeSaveDoubles(id);
724  RecordSafepointWithRegisters(
725      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
726}
727
728
729void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
730                                                    Safepoint::DeoptMode mode) {
731  if (!environment->HasBeenRegistered()) {
732    // Physical stack frame layout:
733    // -x ............. -4  0 ..................................... y
734    // [incoming arguments] [spill slots] [pushed outgoing arguments]
735
736    // Layout of the environment:
737    // 0 ..................................................... size-1
738    // [parameters] [locals] [expression stack including arguments]
739
740    // Layout of the translation:
741    // 0 ........................................................ size - 1 + 4
742    // [expression stack including arguments] [locals] [4 words] [parameters]
743    // |>------------  translation_size ------------<|
744
745    int frame_count = 0;
746    int jsframe_count = 0;
747    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
748      ++frame_count;
749      if (e->frame_type() == JS_FUNCTION) {
750        ++jsframe_count;
751      }
752    }
753    Translation translation(&translations_, frame_count, jsframe_count, zone());
754    WriteTranslation(environment, &translation);
755    int deoptimization_index = deoptimizations_.length();
756    int pc_offset = masm()->pc_offset();
757    environment->Register(deoptimization_index,
758                          translation.index(),
759                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
760    deoptimizations_.Add(environment, zone());
761  }
762}
763
764
765void LCodeGen::DeoptimizeIf(Condition condition,
766                            LEnvironment* environment,
767                            Deoptimizer::BailoutType bailout_type,
768                            Register src1,
769                            const Operand& src2) {
770  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
771  ASSERT(environment->HasBeenRegistered());
772  int id = environment->deoptimization_index();
773  ASSERT(info()->IsOptimizing() || info()->IsStub());
774  Address entry =
775      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
776  if (entry == NULL) {
777    Abort(kBailoutWasNotPrepared);
778    return;
779  }
780
781  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS.
782  if (FLAG_deopt_every_n_times == 1 &&
783      !info()->IsStub() &&
784      info()->opt_count() == id) {
785    ASSERT(frame_is_built_);
786    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
787    return;
788  }
789
790  if (info()->ShouldTrapOnDeopt()) {
791    Label skip;
792    if (condition != al) {
793      __ Branch(&skip, NegateCondition(condition), src1, src2);
794    }
795    __ stop("trap_on_deopt");
796    __ bind(&skip);
797  }
798
799  ASSERT(info()->IsStub() || frame_is_built_);
800  if (condition == al && frame_is_built_) {
801    __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
802  } else {
803    // We often have several deopts to the same entry, reuse the last
804    // jump entry if this is the case.
805    if (deopt_jump_table_.is_empty() ||
806        (deopt_jump_table_.last().address != entry) ||
807        (deopt_jump_table_.last().bailout_type != bailout_type) ||
808        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
809      Deoptimizer::JumpTableEntry table_entry(entry,
810                                              bailout_type,
811                                              !frame_is_built_);
812      deopt_jump_table_.Add(table_entry, zone());
813    }
814    __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
815  }
816}
817
818
819void LCodeGen::DeoptimizeIf(Condition condition,
820                            LEnvironment* environment,
821                            Register src1,
822                            const Operand& src2) {
823  Deoptimizer::BailoutType bailout_type = info()->IsStub()
824      ? Deoptimizer::LAZY
825      : Deoptimizer::EAGER;
826  DeoptimizeIf(condition, environment, bailout_type, src1, src2);
827}
828
829
830void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
831  ZoneList<Handle<Map> > maps(1, zone());
832  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
833  for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
834    RelocInfo::Mode mode = it.rinfo()->rmode();
835    if (mode == RelocInfo::EMBEDDED_OBJECT &&
836        it.rinfo()->target_object()->IsMap()) {
837      Handle<Map> map(Map::cast(it.rinfo()->target_object()));
838      if (map->CanTransition()) {
839        maps.Add(map, zone());
840      }
841    }
842  }
843#ifdef VERIFY_HEAP
844  // This disables verification of weak embedded maps after full GC.
845  // AddDependentCode can cause a GC, which would observe the state where
846  // this code is not yet in the depended code lists of the embedded maps.
847  NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
848#endif
849  for (int i = 0; i < maps.length(); i++) {
850    maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
851  }
852}
853
854
855void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
856  int length = deoptimizations_.length();
857  if (length == 0) return;
858  Handle<DeoptimizationInputData> data =
859      factory()->NewDeoptimizationInputData(length, TENURED);
860
861  Handle<ByteArray> translations =
862      translations_.CreateByteArray(isolate()->factory());
863  data->SetTranslationByteArray(*translations);
864  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
865
866  Handle<FixedArray> literals =
867      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
868  { AllowDeferredHandleDereference copy_handles;
869    for (int i = 0; i < deoptimization_literals_.length(); i++) {
870      literals->set(i, *deoptimization_literals_[i]);
871    }
872    data->SetLiteralArray(*literals);
873  }
874
875  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
876  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
877
878  // Populate the deoptimization entries.
879  for (int i = 0; i < length; i++) {
880    LEnvironment* env = deoptimizations_[i];
881    data->SetAstId(i, env->ast_id());
882    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
883    data->SetArgumentsStackHeight(i,
884                                  Smi::FromInt(env->arguments_stack_height()));
885    data->SetPc(i, Smi::FromInt(env->pc_offset()));
886  }
887  code->set_deoptimization_data(*data);
888}
889
890
891int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
892  int result = deoptimization_literals_.length();
893  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
894    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
895  }
896  deoptimization_literals_.Add(literal, zone());
897  return result;
898}
899
900
901void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
902  ASSERT(deoptimization_literals_.length() == 0);
903
904  const ZoneList<Handle<JSFunction> >* inlined_closures =
905      chunk()->inlined_closures();
906
907  for (int i = 0, length = inlined_closures->length();
908       i < length;
909       i++) {
910    DefineDeoptimizationLiteral(inlined_closures->at(i));
911  }
912
913  inlined_function_count_ = deoptimization_literals_.length();
914}
915
916
917void LCodeGen::RecordSafepointWithLazyDeopt(
918    LInstruction* instr, SafepointMode safepoint_mode) {
919  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
920    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
921  } else {
922    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
923    RecordSafepointWithRegisters(
924        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
925  }
926}
927
928
929void LCodeGen::RecordSafepoint(
930    LPointerMap* pointers,
931    Safepoint::Kind kind,
932    int arguments,
933    Safepoint::DeoptMode deopt_mode) {
934  ASSERT(expected_safepoint_kind_ == kind);
935
936  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
937  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
938      kind, arguments, deopt_mode);
939  for (int i = 0; i < operands->length(); i++) {
940    LOperand* pointer = operands->at(i);
941    if (pointer->IsStackSlot()) {
942      safepoint.DefinePointerSlot(pointer->index(), zone());
943    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
944      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
945    }
946  }
947  if (kind & Safepoint::kWithRegisters) {
948    // Register cp always contains a pointer to the context.
949    safepoint.DefinePointerRegister(cp, zone());
950  }
951}
952
953
954void LCodeGen::RecordSafepoint(LPointerMap* pointers,
955                               Safepoint::DeoptMode deopt_mode) {
956  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
957}
958
959
960void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
961  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
962  RecordSafepoint(&empty_pointers, deopt_mode);
963}
964
965
966void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
967                                            int arguments,
968                                            Safepoint::DeoptMode deopt_mode) {
969  RecordSafepoint(
970      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
971}
972
973
974void LCodeGen::RecordSafepointWithRegistersAndDoubles(
975    LPointerMap* pointers,
976    int arguments,
977    Safepoint::DeoptMode deopt_mode) {
978  RecordSafepoint(
979      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
980}
981
982
983void LCodeGen::RecordPosition(int position) {
984  if (position == RelocInfo::kNoPosition) return;
985  masm()->positions_recorder()->RecordPosition(position);
986}
987
988
989void LCodeGen::RecordAndUpdatePosition(int position) {
990  if (position >= 0 && position != old_position_) {
991    masm()->positions_recorder()->RecordPosition(position);
992    old_position_ = position;
993  }
994}
995
996
997static const char* LabelType(LLabel* label) {
998  if (label->is_loop_header()) return " (loop header)";
999  if (label->is_osr_entry()) return " (OSR entry)";
1000  return "";
1001}
1002
1003
1004void LCodeGen::DoLabel(LLabel* label) {
1005  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1006          current_instruction_,
1007          label->hydrogen_value()->id(),
1008          label->block_id(),
1009          LabelType(label));
1010  __ bind(label->label());
1011  current_block_ = label->block_id();
1012  DoGap(label);
1013}
1014
1015
1016void LCodeGen::DoParallelMove(LParallelMove* move) {
1017  resolver_.Resolve(move);
1018}
1019
1020
1021void LCodeGen::DoGap(LGap* gap) {
1022  for (int i = LGap::FIRST_INNER_POSITION;
1023       i <= LGap::LAST_INNER_POSITION;
1024       i++) {
1025    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1026    LParallelMove* move = gap->GetParallelMove(inner_pos);
1027    if (move != NULL) DoParallelMove(move);
1028  }
1029}
1030
1031
1032void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1033  DoGap(instr);
1034}
1035
1036
1037void LCodeGen::DoParameter(LParameter* instr) {
1038  // Nothing to do.
1039}
1040
1041
1042void LCodeGen::DoCallStub(LCallStub* instr) {
1043  ASSERT(ToRegister(instr->result()).is(v0));
1044  switch (instr->hydrogen()->major_key()) {
1045    case CodeStub::RegExpConstructResult: {
1046      RegExpConstructResultStub stub;
1047      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1048      break;
1049    }
1050    case CodeStub::RegExpExec: {
1051      RegExpExecStub stub;
1052      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1053      break;
1054    }
1055    case CodeStub::SubString: {
1056      SubStringStub stub;
1057      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1058      break;
1059    }
1060    case CodeStub::NumberToString: {
1061      NumberToStringStub stub;
1062      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1063      break;
1064    }
1065    case CodeStub::StringCompare: {
1066      StringCompareStub stub;
1067      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1068      break;
1069    }
1070    case CodeStub::TranscendentalCache: {
1071      __ lw(a0, MemOperand(sp, 0));
1072      TranscendentalCacheStub stub(instr->transcendental_type(),
1073                                   TranscendentalCacheStub::TAGGED);
1074      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1075      break;
1076    }
1077    default:
1078      UNREACHABLE();
1079  }
1080}
1081
1082
1083void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1084  // Record the address of the first unknown OSR value as the place to enter.
1085  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
1086}
1087
1088
1089void LCodeGen::DoModI(LModI* instr) {
1090  HMod* hmod = instr->hydrogen();
1091  HValue* left = hmod->left();
1092  HValue* right = hmod->right();
1093  if (hmod->HasPowerOf2Divisor()) {
1094    const Register left_reg = ToRegister(instr->left());
1095    const Register result_reg = ToRegister(instr->result());
1096
1097    // Note: The code below even works when right contains kMinInt.
1098    int32_t divisor = Abs(right->GetInteger32Constant());
1099
1100    Label left_is_not_negative, done;
1101    if (left->CanBeNegative()) {
1102      __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
1103                &left_is_not_negative, ge, left_reg, Operand(zero_reg));
1104      __ subu(result_reg, zero_reg, left_reg);
1105      __ And(result_reg, result_reg, divisor - 1);
1106      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1107        DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1108      }
1109      __ Branch(USE_DELAY_SLOT, &done);
1110      __ subu(result_reg, zero_reg, result_reg);
1111    }
1112
1113    __ bind(&left_is_not_negative);
1114    __ And(result_reg, left_reg, divisor - 1);
1115    __ bind(&done);
1116
1117  } else if (hmod->fixed_right_arg().has_value) {
1118    const Register left_reg = ToRegister(instr->left());
1119    const Register result_reg = ToRegister(instr->result());
1120    const Register right_reg = ToRegister(instr->right());
1121
1122    int32_t divisor = hmod->fixed_right_arg().value;
1123    ASSERT(IsPowerOf2(divisor));
1124
1125    // Check if our assumption of a fixed right operand still holds.
1126    DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
1127
1128    Label left_is_not_negative, done;
1129    if (left->CanBeNegative()) {
1130      __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
1131                &left_is_not_negative, ge, left_reg, Operand(zero_reg));
1132      __ subu(result_reg, zero_reg, left_reg);
1133      __ And(result_reg, result_reg, divisor - 1);
1134      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1135        DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1136      }
1137      __ Branch(USE_DELAY_SLOT, &done);
1138      __ subu(result_reg, zero_reg, result_reg);
1139    }
1140
1141    __ bind(&left_is_not_negative);
1142    __ And(result_reg, left_reg, divisor - 1);
1143    __ bind(&done);
1144
1145  } else {
1146    const Register scratch = scratch0();
1147    const Register left_reg = ToRegister(instr->left());
1148    const Register result_reg = ToRegister(instr->result());
1149
1150    // div runs in the background while we check for special cases.
1151    Register right_reg = EmitLoadRegister(instr->right(), scratch);
1152    __ div(left_reg, right_reg);
1153
1154    Label done;
1155    // Check for x % 0, we have to deopt in this case because we can't return a
1156    // NaN.
1157    if (right->CanBeZero()) {
1158      DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
1159    }
1160
1161    // Check for kMinInt % -1, we have to deopt if we care about -0, because we
1162    // can't return that.
1163    if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1164      Label left_not_min_int;
1165      __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
1166      // TODO(svenpanne) Don't deopt when we don't care about -0.
1167      DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
1168      __ bind(&left_not_min_int);
1169    }
1170
1171    // TODO(svenpanne) Only emit the test/deopt if we have to.
1172    __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
1173    __ mfhi(result_reg);
1174
1175    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1176      DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1177    }
1178    __ bind(&done);
1179  }
1180}
1181
1182
1183void LCodeGen::EmitSignedIntegerDivisionByConstant(
1184    Register result,
1185    Register dividend,
1186    int32_t divisor,
1187    Register remainder,
1188    Register scratch,
1189    LEnvironment* environment) {
1190  ASSERT(!AreAliased(dividend, scratch, at, no_reg));
1191
1192  uint32_t divisor_abs = abs(divisor);
1193
1194  int32_t power_of_2_factor =
1195    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1196
1197  switch (divisor_abs) {
1198    case 0:
1199      DeoptimizeIf(al, environment);
1200      return;
1201
1202    case 1:
1203      if (divisor > 0) {
1204        __ Move(result, dividend);
1205      } else {
1206        __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
1207        DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
1208      }
1209      // Compute the remainder.
1210      __ Move(remainder, zero_reg);
1211      return;
1212
1213    default:
1214      if (IsPowerOf2(divisor_abs)) {
1215        // Branch and condition free code for integer division by a power
1216        // of two.
1217        int32_t power = WhichPowerOf2(divisor_abs);
1218        if (power > 1) {
1219          __ sra(scratch, dividend, power - 1);
1220        }
1221        __ srl(scratch, scratch, 32 - power);
1222        __ Addu(scratch, dividend, Operand(scratch));
1223        __ sra(result, scratch,  power);
1224        // Negate if necessary.
1225        // We don't need to check for overflow because the case '-1' is
1226        // handled separately.
1227        if (divisor < 0) {
1228          ASSERT(divisor != -1);
1229          __ Subu(result, zero_reg, Operand(result));
1230        }
1231        // Compute the remainder.
1232        if (divisor > 0) {
1233          __ sll(scratch, result, power);
1234          __ Subu(remainder, dividend, Operand(scratch));
1235        } else {
1236          __ sll(scratch, result, power);
1237          __ Addu(remainder, dividend, Operand(scratch));
1238        }
1239        return;
1240      } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
1241        // Use magic numbers for a few specific divisors.
1242        // Details and proofs can be found in:
1243        // - Hacker's Delight, Henry S. Warren, Jr.
1244        // - The PowerPC Compiler Writer's Guide
1245        // and probably many others.
1246        //
1247        // We handle
1248        //   <divisor with magic numbers> * <power of 2>
1249        // but not
1250        //   <divisor with magic numbers> * <other divisor with magic numbers>
1251        DivMagicNumbers magic_numbers =
1252          DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1253        // Branch and condition free code for integer division by a power
1254        // of two.
1255        const int32_t M = magic_numbers.M;
1256        const int32_t s = magic_numbers.s + power_of_2_factor;
1257
1258        __ li(scratch, Operand(M));
1259        __ mult(dividend, scratch);
1260        __ mfhi(scratch);
1261        if (M < 0) {
1262          __ Addu(scratch, scratch, Operand(dividend));
1263        }
1264        if (s > 0) {
1265          __ sra(scratch, scratch, s);
1266          __ mov(scratch, scratch);
1267        }
1268        __ srl(at, dividend, 31);
1269        __ Addu(result, scratch, Operand(at));
1270        if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
1271        // Compute the remainder.
1272        __ li(scratch, Operand(divisor));
1273        __ Mul(scratch, result, Operand(scratch));
1274        __ Subu(remainder, dividend, Operand(scratch));
1275      } else {
1276        __ li(scratch, Operand(divisor));
1277        __ div(dividend, scratch);
1278        __ mfhi(remainder);
1279        __ mflo(result);
1280      }
1281  }
1282}
1283
1284
1285void LCodeGen::DoDivI(LDivI* instr) {
1286  const Register left = ToRegister(instr->left());
1287  const Register right = ToRegister(instr->right());
1288  const Register result = ToRegister(instr->result());
1289
1290  // On MIPS div is asynchronous - it will run in the background while we
1291  // check for special cases.
1292  __ div(left, right);
1293
1294  // Check for x / 0.
1295  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1296    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1297  }
1298
1299  // Check for (0 / -x) that will produce negative zero.
1300  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1301    Label left_not_zero;
1302    __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1303    DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1304    __ bind(&left_not_zero);
1305  }
1306
1307  // Check for (kMinInt / -1).
1308  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1309    Label left_not_min_int;
1310    __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1311    DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1312    __ bind(&left_not_min_int);
1313  }
1314
1315  if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1316    __ mfhi(result);
1317    DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
1318  }
1319  __ mflo(result);
1320}
1321
1322
1323void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1324  DoubleRegister addend = ToDoubleRegister(instr->addend());
1325  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1326  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1327
1328  // This is computed in-place.
1329  ASSERT(addend.is(ToDoubleRegister(instr->result())));
1330
1331  __ madd_d(addend, addend, multiplier, multiplicand);
1332}
1333
1334
1335void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1336  const Register result = ToRegister(instr->result());
1337  const Register left = ToRegister(instr->left());
1338  const Register remainder = ToRegister(instr->temp());
1339  const Register scratch = scratch0();
1340
1341  if (instr->right()->IsConstantOperand()) {
1342    Label done;
1343    int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1344    if (divisor < 0) {
1345      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1346    }
1347    EmitSignedIntegerDivisionByConstant(result,
1348                                        left,
1349                                        divisor,
1350                                        remainder,
1351                                        scratch,
1352                                        instr->environment());
1353    // We performed a truncating division. Correct the result if necessary.
1354    __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1355    __ Xor(scratch , remainder, Operand(divisor));
1356    __ Branch(&done, ge, scratch, Operand(zero_reg));
1357    __ Subu(result, result, Operand(1));
1358    __ bind(&done);
1359  } else {
1360    Label done;
1361    const Register right = ToRegister(instr->right());
1362
1363    // On MIPS div is asynchronous - it will run in the background while we
1364    // check for special cases.
1365    __ div(left, right);
1366
1367    // Check for x / 0.
1368    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1369
1370    // Check for (0 / -x) that will produce negative zero.
1371    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1372      Label left_not_zero;
1373      __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1374      DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1375      __ bind(&left_not_zero);
1376    }
1377
1378    // Check for (kMinInt / -1).
1379    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1380      Label left_not_min_int;
1381      __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1382      DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1383      __ bind(&left_not_min_int);
1384    }
1385
1386    __ mfhi(remainder);
1387    __ mflo(result);
1388
1389    // We performed a truncating division. Correct the result if necessary.
1390    __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1391    __ Xor(scratch , remainder, Operand(right));
1392    __ Branch(&done, ge, scratch, Operand(zero_reg));
1393    __ Subu(result, result, Operand(1));
1394    __ bind(&done);
1395  }
1396}
1397
1398
1399void LCodeGen::DoMulI(LMulI* instr) {
1400  Register scratch = scratch0();
1401  Register result = ToRegister(instr->result());
1402  // Note that result may alias left.
1403  Register left = ToRegister(instr->left());
1404  LOperand* right_op = instr->right();
1405
1406  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1407  bool bailout_on_minus_zero =
1408    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1409
1410  if (right_op->IsConstantOperand() && !can_overflow) {
1411    // Use optimized code for specific constants.
1412    int32_t constant = ToRepresentation(
1413        LConstantOperand::cast(right_op),
1414        instr->hydrogen()->right()->representation());
1415
1416    if (bailout_on_minus_zero && (constant < 0)) {
1417      // The case of a null constant will be handled separately.
1418      // If constant is negative and left is null, the result should be -0.
1419      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1420    }
1421
1422    switch (constant) {
1423      case -1:
1424        __ Subu(result, zero_reg, left);
1425        break;
1426      case 0:
1427        if (bailout_on_minus_zero) {
1428          // If left is strictly negative and the constant is null, the
1429          // result is -0. Deoptimize if required, otherwise return 0.
1430          DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
1431        }
1432        __ mov(result, zero_reg);
1433        break;
1434      case 1:
1435        // Nothing to do.
1436        __ Move(result, left);
1437        break;
1438      default:
1439        // Multiplying by powers of two and powers of two plus or minus
1440        // one can be done faster with shifted operands.
1441        // For other constants we emit standard code.
1442        int32_t mask = constant >> 31;
1443        uint32_t constant_abs = (constant + mask) ^ mask;
1444
1445        if (IsPowerOf2(constant_abs) ||
1446            IsPowerOf2(constant_abs - 1) ||
1447            IsPowerOf2(constant_abs + 1)) {
1448          if (IsPowerOf2(constant_abs)) {
1449            int32_t shift = WhichPowerOf2(constant_abs);
1450            __ sll(result, left, shift);
1451          } else if (IsPowerOf2(constant_abs - 1)) {
1452            int32_t shift = WhichPowerOf2(constant_abs - 1);
1453            __ sll(scratch, left, shift);
1454            __ Addu(result, scratch, left);
1455          } else if (IsPowerOf2(constant_abs + 1)) {
1456            int32_t shift = WhichPowerOf2(constant_abs + 1);
1457            __ sll(scratch, left, shift);
1458            __ Subu(result, scratch, left);
1459          }
1460
1461          // Correct the sign of the result is the constant is negative.
1462          if (constant < 0)  {
1463            __ Subu(result, zero_reg, result);
1464          }
1465
1466        } else {
1467          // Generate standard code.
1468          __ li(at, constant);
1469          __ Mul(result, left, at);
1470        }
1471    }
1472
1473  } else {
1474    Register right = EmitLoadRegister(right_op, scratch);
1475    if (bailout_on_minus_zero) {
1476      __ Or(ToRegister(instr->temp()), left, right);
1477    }
1478
1479    if (can_overflow) {
1480      // hi:lo = left * right.
1481      if (instr->hydrogen()->representation().IsSmi()) {
1482        __ SmiUntag(result, left);
1483        __ mult(result, right);
1484        __ mfhi(scratch);
1485        __ mflo(result);
1486      } else {
1487        __ mult(left, right);
1488        __ mfhi(scratch);
1489        __ mflo(result);
1490      }
1491      __ sra(at, result, 31);
1492      DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1493    } else {
1494      if (instr->hydrogen()->representation().IsSmi()) {
1495        __ SmiUntag(result, left);
1496        __ Mul(result, result, right);
1497      } else {
1498        __ Mul(result, left, right);
1499      }
1500    }
1501
1502    if (bailout_on_minus_zero) {
1503      // Bail out if the result is supposed to be negative zero.
1504      Label done;
1505      __ Branch(&done, ne, result, Operand(zero_reg));
1506      DeoptimizeIf(lt,
1507                   instr->environment(),
1508                   ToRegister(instr->temp()),
1509                   Operand(zero_reg));
1510      __ bind(&done);
1511    }
1512  }
1513}
1514
1515
1516void LCodeGen::DoBitI(LBitI* instr) {
1517  LOperand* left_op = instr->left();
1518  LOperand* right_op = instr->right();
1519  ASSERT(left_op->IsRegister());
1520  Register left = ToRegister(left_op);
1521  Register result = ToRegister(instr->result());
1522  Operand right(no_reg);
1523
1524  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1525    right = Operand(EmitLoadRegister(right_op, at));
1526  } else {
1527    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1528    right = ToOperand(right_op);
1529  }
1530
1531  switch (instr->op()) {
1532    case Token::BIT_AND:
1533      __ And(result, left, right);
1534      break;
1535    case Token::BIT_OR:
1536      __ Or(result, left, right);
1537      break;
1538    case Token::BIT_XOR:
1539      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1540        __ Nor(result, zero_reg, left);
1541      } else {
1542        __ Xor(result, left, right);
1543      }
1544      break;
1545    default:
1546      UNREACHABLE();
1547      break;
1548  }
1549}
1550
1551
1552void LCodeGen::DoShiftI(LShiftI* instr) {
1553  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1554  // result may alias either of them.
1555  LOperand* right_op = instr->right();
1556  Register left = ToRegister(instr->left());
1557  Register result = ToRegister(instr->result());
1558  Register scratch = scratch0();
1559
1560  if (right_op->IsRegister()) {
1561    // No need to mask the right operand on MIPS, it is built into the variable
1562    // shift instructions.
1563    switch (instr->op()) {
1564      case Token::ROR:
1565        __ Ror(result, left, Operand(ToRegister(right_op)));
1566        break;
1567      case Token::SAR:
1568        __ srav(result, left, ToRegister(right_op));
1569        break;
1570      case Token::SHR:
1571        __ srlv(result, left, ToRegister(right_op));
1572        if (instr->can_deopt()) {
1573          DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1574        }
1575        break;
1576      case Token::SHL:
1577        __ sllv(result, left, ToRegister(right_op));
1578        break;
1579      default:
1580        UNREACHABLE();
1581        break;
1582    }
1583  } else {
1584    // Mask the right_op operand.
1585    int value = ToInteger32(LConstantOperand::cast(right_op));
1586    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1587    switch (instr->op()) {
1588      case Token::ROR:
1589        if (shift_count != 0) {
1590          __ Ror(result, left, Operand(shift_count));
1591        } else {
1592          __ Move(result, left);
1593        }
1594        break;
1595      case Token::SAR:
1596        if (shift_count != 0) {
1597          __ sra(result, left, shift_count);
1598        } else {
1599          __ Move(result, left);
1600        }
1601        break;
1602      case Token::SHR:
1603        if (shift_count != 0) {
1604          __ srl(result, left, shift_count);
1605        } else {
1606          if (instr->can_deopt()) {
1607            __ And(at, left, Operand(0x80000000));
1608            DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1609          }
1610          __ Move(result, left);
1611        }
1612        break;
1613      case Token::SHL:
1614        if (shift_count != 0) {
1615          if (instr->hydrogen_value()->representation().IsSmi() &&
1616              instr->can_deopt()) {
1617            if (shift_count != 1) {
1618              __ sll(result, left, shift_count - 1);
1619              __ SmiTagCheckOverflow(result, result, scratch);
1620            } else {
1621              __ SmiTagCheckOverflow(result, left, scratch);
1622            }
1623            DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1624          } else {
1625            __ sll(result, left, shift_count);
1626          }
1627        } else {
1628          __ Move(result, left);
1629        }
1630        break;
1631      default:
1632        UNREACHABLE();
1633        break;
1634    }
1635  }
1636}
1637
1638
1639void LCodeGen::DoSubI(LSubI* instr) {
1640  LOperand* left = instr->left();
1641  LOperand* right = instr->right();
1642  LOperand* result = instr->result();
1643  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1644
1645  if (!can_overflow) {
1646    if (right->IsStackSlot() || right->IsArgument()) {
1647      Register right_reg = EmitLoadRegister(right, at);
1648      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1649    } else {
1650      ASSERT(right->IsRegister() || right->IsConstantOperand());
1651      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1652    }
1653  } else {  // can_overflow.
1654    Register overflow = scratch0();
1655    Register scratch = scratch1();
1656    if (right->IsStackSlot() ||
1657        right->IsArgument() ||
1658        right->IsConstantOperand()) {
1659      Register right_reg = EmitLoadRegister(right, scratch);
1660      __ SubuAndCheckForOverflow(ToRegister(result),
1661                                 ToRegister(left),
1662                                 right_reg,
1663                                 overflow);  // Reg at also used as scratch.
1664    } else {
1665      ASSERT(right->IsRegister());
1666      // Due to overflow check macros not supporting constant operands,
1667      // handling the IsConstantOperand case was moved to prev if clause.
1668      __ SubuAndCheckForOverflow(ToRegister(result),
1669                                 ToRegister(left),
1670                                 ToRegister(right),
1671                                 overflow);  // Reg at also used as scratch.
1672    }
1673    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1674  }
1675}
1676
1677
1678void LCodeGen::DoConstantI(LConstantI* instr) {
1679  __ li(ToRegister(instr->result()), Operand(instr->value()));
1680}
1681
1682
1683void LCodeGen::DoConstantS(LConstantS* instr) {
1684  __ li(ToRegister(instr->result()), Operand(instr->value()));
1685}
1686
1687
1688void LCodeGen::DoConstantD(LConstantD* instr) {
1689  ASSERT(instr->result()->IsDoubleRegister());
1690  DoubleRegister result = ToDoubleRegister(instr->result());
1691  double v = instr->value();
1692  __ Move(result, v);
1693}
1694
1695
1696void LCodeGen::DoConstantE(LConstantE* instr) {
1697  __ li(ToRegister(instr->result()), Operand(instr->value()));
1698}
1699
1700
1701void LCodeGen::DoConstantT(LConstantT* instr) {
1702  Handle<Object> value = instr->value();
1703  AllowDeferredHandleDereference smi_check;
1704  __ LoadObject(ToRegister(instr->result()), value);
1705}
1706
1707
1708void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1709  Register result = ToRegister(instr->result());
1710  Register map = ToRegister(instr->value());
1711  __ EnumLength(result, map);
1712}
1713
1714
1715void LCodeGen::DoElementsKind(LElementsKind* instr) {
1716  Register result = ToRegister(instr->result());
1717  Register input = ToRegister(instr->value());
1718
1719  // Load map into |result|.
1720  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1721  // Load the map's "bit field 2" into |result|. We only need the first byte,
1722  // but the following bit field extraction takes care of that anyway.
1723  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1724  // Retrieve elements_kind from bit field 2.
1725  __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1726}
1727
1728
1729void LCodeGen::DoValueOf(LValueOf* instr) {
1730  Register input = ToRegister(instr->value());
1731  Register result = ToRegister(instr->result());
1732  Register map = ToRegister(instr->temp());
1733  Label done;
1734
1735  if (!instr->hydrogen()->value()->IsHeapObject()) {
1736    // If the object is a smi return the object.
1737    __ Move(result, input);
1738    __ JumpIfSmi(input, &done);
1739  }
1740
1741  // If the object is not a value type, return the object.
1742  __ GetObjectType(input, map, map);
1743  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1744  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1745
1746  __ bind(&done);
1747}
1748
1749
1750void LCodeGen::DoDateField(LDateField* instr) {
1751  Register object = ToRegister(instr->date());
1752  Register result = ToRegister(instr->result());
1753  Register scratch = ToRegister(instr->temp());
1754  Smi* index = instr->index();
1755  Label runtime, done;
1756  ASSERT(object.is(a0));
1757  ASSERT(result.is(v0));
1758  ASSERT(!scratch.is(scratch0()));
1759  ASSERT(!scratch.is(object));
1760
1761  __ And(at, object, Operand(kSmiTagMask));
1762  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1763  __ GetObjectType(object, scratch, scratch);
1764  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1765
1766  if (index->value() == 0) {
1767    __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1768  } else {
1769    if (index->value() < JSDate::kFirstUncachedField) {
1770      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1771      __ li(scratch, Operand(stamp));
1772      __ lw(scratch, MemOperand(scratch));
1773      __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1774      __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1775      __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1776                                            kPointerSize * index->value()));
1777      __ jmp(&done);
1778    }
1779    __ bind(&runtime);
1780    __ PrepareCallCFunction(2, scratch);
1781    __ li(a1, Operand(index));
1782    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1783    __ bind(&done);
1784  }
1785}
1786
1787
1788void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1789  Register string = ToRegister(instr->string());
1790  Register index = ToRegister(instr->index());
1791  Register value = ToRegister(instr->value());
1792  Register scratch = scratch0();
1793  String::Encoding encoding = instr->encoding();
1794
1795  if (FLAG_debug_code) {
1796    __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
1797    __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
1798
1799    __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
1800    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1801    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1802    __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
1803                                ? one_byte_seq_type : two_byte_seq_type));
1804    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1805  }
1806
1807  __ Addu(scratch,
1808          string,
1809          Operand(SeqString::kHeaderSize - kHeapObjectTag));
1810  if (encoding == String::ONE_BYTE_ENCODING) {
1811    __ Addu(at, scratch, index);
1812    __ sb(value, MemOperand(at));
1813  } else {
1814    __ sll(at, index, 1);
1815    __ Addu(at, scratch, at);
1816    __ sh(value, MemOperand(at));
1817  }
1818}
1819
1820
1821void LCodeGen::DoThrow(LThrow* instr) {
1822  Register input_reg = EmitLoadRegister(instr->value(), at);
1823  __ push(input_reg);
1824  CallRuntime(Runtime::kThrow, 1, instr);
1825
1826  if (FLAG_debug_code) {
1827    __ stop("Unreachable code.");
1828  }
1829}
1830
1831
1832void LCodeGen::DoAddI(LAddI* instr) {
1833  LOperand* left = instr->left();
1834  LOperand* right = instr->right();
1835  LOperand* result = instr->result();
1836  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1837
1838  if (!can_overflow) {
1839    if (right->IsStackSlot() || right->IsArgument()) {
1840      Register right_reg = EmitLoadRegister(right, at);
1841      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1842    } else {
1843      ASSERT(right->IsRegister() || right->IsConstantOperand());
1844      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1845    }
1846  } else {  // can_overflow.
1847    Register overflow = scratch0();
1848    Register scratch = scratch1();
1849    if (right->IsStackSlot() ||
1850        right->IsArgument() ||
1851        right->IsConstantOperand()) {
1852      Register right_reg = EmitLoadRegister(right, scratch);
1853      __ AdduAndCheckForOverflow(ToRegister(result),
1854                                 ToRegister(left),
1855                                 right_reg,
1856                                 overflow);  // Reg at also used as scratch.
1857    } else {
1858      ASSERT(right->IsRegister());
1859      // Due to overflow check macros not supporting constant operands,
1860      // handling the IsConstantOperand case was moved to prev if clause.
1861      __ AdduAndCheckForOverflow(ToRegister(result),
1862                                 ToRegister(left),
1863                                 ToRegister(right),
1864                                 overflow);  // Reg at also used as scratch.
1865    }
1866    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1867  }
1868}
1869
1870
1871void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1872  LOperand* left = instr->left();
1873  LOperand* right = instr->right();
1874  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1875  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1876  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1877    Register left_reg = ToRegister(left);
1878    Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1879        ? ToOperand(right)
1880        : Operand(EmitLoadRegister(right, at));
1881    Register result_reg = ToRegister(instr->result());
1882    Label return_right, done;
1883    if (!result_reg.is(left_reg)) {
1884      __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
1885      __ mov(result_reg, left_reg);
1886      __ Branch(&done);
1887    }
1888    __ Branch(&done, condition, left_reg, right_op);
1889    __ bind(&return_right);
1890    __ Addu(result_reg, zero_reg, right_op);
1891    __ bind(&done);
1892  } else {
1893    ASSERT(instr->hydrogen()->representation().IsDouble());
1894    FPURegister left_reg = ToDoubleRegister(left);
1895    FPURegister right_reg = ToDoubleRegister(right);
1896    FPURegister result_reg = ToDoubleRegister(instr->result());
1897    Label check_nan_left, check_zero, return_left, return_right, done;
1898    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1899    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1900    __ Branch(&return_right);
1901
1902    __ bind(&check_zero);
1903    // left == right != 0.
1904    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1905    // At this point, both left and right are either 0 or -0.
1906    if (operation == HMathMinMax::kMathMin) {
1907      __ neg_d(left_reg, left_reg);
1908      __ sub_d(result_reg, left_reg, right_reg);
1909      __ neg_d(result_reg, result_reg);
1910    } else {
1911      __ add_d(result_reg, left_reg, right_reg);
1912    }
1913    __ Branch(&done);
1914
1915    __ bind(&check_nan_left);
1916    // left == NaN.
1917    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1918    __ bind(&return_right);
1919    if (!right_reg.is(result_reg)) {
1920      __ mov_d(result_reg, right_reg);
1921    }
1922    __ Branch(&done);
1923
1924    __ bind(&return_left);
1925    if (!left_reg.is(result_reg)) {
1926      __ mov_d(result_reg, left_reg);
1927    }
1928    __ bind(&done);
1929  }
1930}
1931
1932
1933void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1934  DoubleRegister left = ToDoubleRegister(instr->left());
1935  DoubleRegister right = ToDoubleRegister(instr->right());
1936  DoubleRegister result = ToDoubleRegister(instr->result());
1937  switch (instr->op()) {
1938    case Token::ADD:
1939      __ add_d(result, left, right);
1940      break;
1941    case Token::SUB:
1942      __ sub_d(result, left, right);
1943      break;
1944    case Token::MUL:
1945      __ mul_d(result, left, right);
1946      break;
1947    case Token::DIV:
1948      __ div_d(result, left, right);
1949      break;
1950    case Token::MOD: {
1951      // Save a0-a3 on the stack.
1952      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1953      __ MultiPush(saved_regs);
1954
1955      __ PrepareCallCFunction(0, 2, scratch0());
1956      __ SetCallCDoubleArguments(left, right);
1957      __ CallCFunction(
1958          ExternalReference::double_fp_operation(Token::MOD, isolate()),
1959          0, 2);
1960      // Move the result in the double result register.
1961      __ GetCFunctionDoubleResult(result);
1962
1963      // Restore saved register.
1964      __ MultiPop(saved_regs);
1965      break;
1966    }
1967    default:
1968      UNREACHABLE();
1969      break;
1970  }
1971}
1972
1973
1974void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1975  ASSERT(ToRegister(instr->left()).is(a1));
1976  ASSERT(ToRegister(instr->right()).is(a0));
1977  ASSERT(ToRegister(instr->result()).is(v0));
1978
1979  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1980  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1981  // Other arch use a nop here, to signal that there is no inlined
1982  // patchable code. Mips does not need the nop, since our marker
1983  // instruction (andi zero_reg) will never be used in normal code.
1984}
1985
1986
1987int LCodeGen::GetNextEmittedBlock() const {
1988  for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
1989    if (!chunk_->GetLabel(i)->HasReplacement()) return i;
1990  }
1991  return -1;
1992}
1993
1994template<class InstrType>
1995void LCodeGen::EmitBranch(InstrType instr,
1996                          Condition condition,
1997                          Register src1,
1998                          const Operand& src2) {
1999  int left_block = instr->TrueDestination(chunk_);
2000  int right_block = instr->FalseDestination(chunk_);
2001
2002  int next_block = GetNextEmittedBlock();
2003  if (right_block == left_block || condition == al) {
2004    EmitGoto(left_block);
2005  } else if (left_block == next_block) {
2006    __ Branch(chunk_->GetAssemblyLabel(right_block),
2007              NegateCondition(condition), src1, src2);
2008  } else if (right_block == next_block) {
2009    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2010  } else {
2011    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2012    __ Branch(chunk_->GetAssemblyLabel(right_block));
2013  }
2014}
2015
2016
2017template<class InstrType>
2018void LCodeGen::EmitBranchF(InstrType instr,
2019                           Condition condition,
2020                           FPURegister src1,
2021                           FPURegister src2) {
2022  int right_block = instr->FalseDestination(chunk_);
2023  int left_block = instr->TrueDestination(chunk_);
2024
2025  int next_block = GetNextEmittedBlock();
2026  if (right_block == left_block) {
2027    EmitGoto(left_block);
2028  } else if (left_block == next_block) {
2029    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2030               NegateCondition(condition), src1, src2);
2031  } else if (right_block == next_block) {
2032    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2033               condition, src1, src2);
2034  } else {
2035    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2036               condition, src1, src2);
2037    __ Branch(chunk_->GetAssemblyLabel(right_block));
2038  }
2039}
2040
2041
2042template<class InstrType>
2043void LCodeGen::EmitFalseBranchF(InstrType instr,
2044                                Condition condition,
2045                                FPURegister src1,
2046                                FPURegister src2) {
2047  int false_block = instr->FalseDestination(chunk_);
2048  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2049             condition, src1, src2);
2050}
2051
2052
2053void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2054  __ stop("LDebugBreak");
2055}
2056
2057
2058void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
2059  Representation r = instr->hydrogen()->value()->representation();
2060  if (r.IsSmiOrInteger32() || r.IsDouble()) {
2061    EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2062  } else {
2063    ASSERT(r.IsTagged());
2064    Register reg = ToRegister(instr->value());
2065    HType type = instr->hydrogen()->value()->type();
2066    if (type.IsTaggedNumber()) {
2067      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2068    }
2069    __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2070    __ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
2071    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2072    EmitBranch(instr, eq, scratch0(), Operand(at));
2073  }
2074}
2075
2076
2077void LCodeGen::DoBranch(LBranch* instr) {
2078  Representation r = instr->hydrogen()->value()->representation();
2079  if (r.IsInteger32() || r.IsSmi()) {
2080    ASSERT(!info()->IsStub());
2081    Register reg = ToRegister(instr->value());
2082    EmitBranch(instr, ne, reg, Operand(zero_reg));
2083  } else if (r.IsDouble()) {
2084    ASSERT(!info()->IsStub());
2085    DoubleRegister reg = ToDoubleRegister(instr->value());
2086    // Test the double value. Zero and NaN are false.
2087    EmitBranchF(instr, nue, reg, kDoubleRegZero);
2088  } else {
2089    ASSERT(r.IsTagged());
2090    Register reg = ToRegister(instr->value());
2091    HType type = instr->hydrogen()->value()->type();
2092    if (type.IsBoolean()) {
2093      ASSERT(!info()->IsStub());
2094      __ LoadRoot(at, Heap::kTrueValueRootIndex);
2095      EmitBranch(instr, eq, reg, Operand(at));
2096    } else if (type.IsSmi()) {
2097      ASSERT(!info()->IsStub());
2098      EmitBranch(instr, ne, reg, Operand(zero_reg));
2099    } else if (type.IsJSArray()) {
2100      ASSERT(!info()->IsStub());
2101      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2102    } else if (type.IsHeapNumber()) {
2103      ASSERT(!info()->IsStub());
2104      DoubleRegister dbl_scratch = double_scratch0();
2105      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2106      // Test the double value. Zero and NaN are false.
2107      EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2108    } else if (type.IsString()) {
2109      ASSERT(!info()->IsStub());
2110      __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2111      EmitBranch(instr, ne, at, Operand(zero_reg));
2112    } else {
2113      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2114      // Avoid deopts in the case where we've never executed this path before.
2115      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2116
2117      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2118        // undefined -> false.
2119        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2120        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2121      }
2122      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2123        // Boolean -> its value.
2124        __ LoadRoot(at, Heap::kTrueValueRootIndex);
2125        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2126        __ LoadRoot(at, Heap::kFalseValueRootIndex);
2127        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2128      }
2129      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2130        // 'null' -> false.
2131        __ LoadRoot(at, Heap::kNullValueRootIndex);
2132        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2133      }
2134
2135      if (expected.Contains(ToBooleanStub::SMI)) {
2136        // Smis: 0 -> false, all other -> true.
2137        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2138        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2139      } else if (expected.NeedsMap()) {
2140        // If we need a map later and have a Smi -> deopt.
2141        __ And(at, reg, Operand(kSmiTagMask));
2142        DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
2143      }
2144
2145      const Register map = scratch0();
2146      if (expected.NeedsMap()) {
2147        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2148        if (expected.CanBeUndetectable()) {
2149          // Undetectable -> false.
2150          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2151          __ And(at, at, Operand(1 << Map::kIsUndetectable));
2152          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2153        }
2154      }
2155
2156      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2157        // spec object -> true.
2158        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2159        __ Branch(instr->TrueLabel(chunk_),
2160                  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2161      }
2162
2163      if (expected.Contains(ToBooleanStub::STRING)) {
2164        // String value -> false iff empty.
2165        Label not_string;
2166        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2167        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2168        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2169        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2170        __ Branch(instr->FalseLabel(chunk_));
2171        __ bind(&not_string);
2172      }
2173
2174      if (expected.Contains(ToBooleanStub::SYMBOL)) {
2175        // Symbol value -> true.
2176        const Register scratch = scratch1();
2177        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2178        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2179      }
2180
2181      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2182        // heap number -> false iff +0, -0, or NaN.
2183        DoubleRegister dbl_scratch = double_scratch0();
2184        Label not_heap_number;
2185        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2186        __ Branch(&not_heap_number, ne, map, Operand(at));
2187        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2188        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2189                   ne, dbl_scratch, kDoubleRegZero);
2190        // Falls through if dbl_scratch == 0.
2191        __ Branch(instr->FalseLabel(chunk_));
2192        __ bind(&not_heap_number);
2193      }
2194
2195      if (!expected.IsGeneric()) {
2196        // We've seen something for the first time -> deopt.
2197        // This can only happen if we are not generic already.
2198        DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
2199      }
2200    }
2201  }
2202}
2203
2204
2205void LCodeGen::EmitGoto(int block) {
2206  if (!IsNextEmittedBlock(block)) {
2207    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2208  }
2209}
2210
2211
2212void LCodeGen::DoGoto(LGoto* instr) {
2213  EmitGoto(instr->block_id());
2214}
2215
2216
2217Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2218  Condition cond = kNoCondition;
2219  switch (op) {
2220    case Token::EQ:
2221    case Token::EQ_STRICT:
2222      cond = eq;
2223      break;
2224    case Token::LT:
2225      cond = is_unsigned ? lo : lt;
2226      break;
2227    case Token::GT:
2228      cond = is_unsigned ? hi : gt;
2229      break;
2230    case Token::LTE:
2231      cond = is_unsigned ? ls : le;
2232      break;
2233    case Token::GTE:
2234      cond = is_unsigned ? hs : ge;
2235      break;
2236    case Token::IN:
2237    case Token::INSTANCEOF:
2238    default:
2239      UNREACHABLE();
2240  }
2241  return cond;
2242}
2243
2244
2245void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2246  LOperand* left = instr->left();
2247  LOperand* right = instr->right();
2248  Condition cond = TokenToCondition(instr->op(), false);
2249
2250  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2251    // We can statically evaluate the comparison.
2252    double left_val = ToDouble(LConstantOperand::cast(left));
2253    double right_val = ToDouble(LConstantOperand::cast(right));
2254    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2255        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2256    EmitGoto(next_block);
2257  } else {
2258    if (instr->is_double()) {
2259      // Compare left and right as doubles and load the
2260      // resulting flags into the normal status register.
2261      FPURegister left_reg = ToDoubleRegister(left);
2262      FPURegister right_reg = ToDoubleRegister(right);
2263
2264      // If a NaN is involved, i.e. the result is unordered,
2265      // jump to false block label.
2266      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2267                 left_reg, right_reg);
2268
2269      EmitBranchF(instr, cond, left_reg, right_reg);
2270    } else {
2271      Register cmp_left;
2272      Operand cmp_right = Operand(0);
2273
2274      if (right->IsConstantOperand()) {
2275        int32_t value = ToInteger32(LConstantOperand::cast(right));
2276        if (instr->hydrogen_value()->representation().IsSmi()) {
2277          cmp_left = ToRegister(left);
2278          cmp_right = Operand(Smi::FromInt(value));
2279        } else {
2280          cmp_left = ToRegister(left);
2281          cmp_right = Operand(value);
2282        }
2283      } else if (left->IsConstantOperand()) {
2284        int32_t value = ToInteger32(LConstantOperand::cast(left));
2285        if (instr->hydrogen_value()->representation().IsSmi()) {
2286           cmp_left = ToRegister(right);
2287           cmp_right = Operand(Smi::FromInt(value));
2288        } else {
2289          cmp_left = ToRegister(right);
2290          cmp_right = Operand(value);
2291        }
2292        // We transposed the operands. Reverse the condition.
2293        cond = ReverseCondition(cond);
2294      } else {
2295        cmp_left = ToRegister(left);
2296        cmp_right = Operand(ToRegister(right));
2297      }
2298
2299      EmitBranch(instr, cond, cmp_left, cmp_right);
2300    }
2301  }
2302}
2303
2304
2305void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2306  Register left = ToRegister(instr->left());
2307  Register right = ToRegister(instr->right());
2308
2309  EmitBranch(instr, eq, left, Operand(right));
2310}
2311
2312
2313void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2314  if (instr->hydrogen()->representation().IsTagged()) {
2315    Register input_reg = ToRegister(instr->object());
2316    __ li(at, Operand(factory()->the_hole_value()));
2317    EmitBranch(instr, eq, input_reg, Operand(at));
2318    return;
2319  }
2320
2321  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2322  EmitFalseBranchF(instr, eq, input_reg, input_reg);
2323
2324  Register scratch = scratch0();
2325  __ FmoveHigh(scratch, input_reg);
2326  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2327}
2328
2329
2330Condition LCodeGen::EmitIsObject(Register input,
2331                                 Register temp1,
2332                                 Register temp2,
2333                                 Label* is_not_object,
2334                                 Label* is_object) {
2335  __ JumpIfSmi(input, is_not_object);
2336
2337  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2338  __ Branch(is_object, eq, input, Operand(temp2));
2339
2340  // Load map.
2341  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2342  // Undetectable objects behave like undefined.
2343  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2344  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2345  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2346
2347  // Load instance type and check that it is in object type range.
2348  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2349  __ Branch(is_not_object,
2350            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2351
2352  return le;
2353}
2354
2355
2356void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2357  Register reg = ToRegister(instr->value());
2358  Register temp1 = ToRegister(instr->temp());
2359  Register temp2 = scratch0();
2360
2361  Condition true_cond =
2362      EmitIsObject(reg, temp1, temp2,
2363          instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2364
2365  EmitBranch(instr, true_cond, temp2,
2366             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2367}
2368
2369
2370Condition LCodeGen::EmitIsString(Register input,
2371                                 Register temp1,
2372                                 Label* is_not_string,
2373                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2374  if (check_needed == INLINE_SMI_CHECK) {
2375    __ JumpIfSmi(input, is_not_string);
2376  }
2377  __ GetObjectType(input, temp1, temp1);
2378
2379  return lt;
2380}
2381
2382
2383void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2384  Register reg = ToRegister(instr->value());
2385  Register temp1 = ToRegister(instr->temp());
2386
2387  SmiCheck check_needed =
2388      instr->hydrogen()->value()->IsHeapObject()
2389          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2390  Condition true_cond =
2391      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2392
2393  EmitBranch(instr, true_cond, temp1,
2394             Operand(FIRST_NONSTRING_TYPE));
2395}
2396
2397
2398void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2399  Register input_reg = EmitLoadRegister(instr->value(), at);
2400  __ And(at, input_reg, kSmiTagMask);
2401  EmitBranch(instr, eq, at, Operand(zero_reg));
2402}
2403
2404
2405void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2406  Register input = ToRegister(instr->value());
2407  Register temp = ToRegister(instr->temp());
2408
2409  if (!instr->hydrogen()->value()->IsHeapObject()) {
2410    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2411  }
2412  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2413  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2414  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2415  EmitBranch(instr, ne, at, Operand(zero_reg));
2416}
2417
2418
2419static Condition ComputeCompareCondition(Token::Value op) {
2420  switch (op) {
2421    case Token::EQ_STRICT:
2422    case Token::EQ:
2423      return eq;
2424    case Token::LT:
2425      return lt;
2426    case Token::GT:
2427      return gt;
2428    case Token::LTE:
2429      return le;
2430    case Token::GTE:
2431      return ge;
2432    default:
2433      UNREACHABLE();
2434      return kNoCondition;
2435  }
2436}
2437
2438
2439void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2440  Token::Value op = instr->op();
2441
2442  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2443  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2444
2445  Condition condition = ComputeCompareCondition(op);
2446
2447  EmitBranch(instr, condition, v0, Operand(zero_reg));
2448}
2449
2450
2451static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2452  InstanceType from = instr->from();
2453  InstanceType to = instr->to();
2454  if (from == FIRST_TYPE) return to;
2455  ASSERT(from == to || to == LAST_TYPE);
2456  return from;
2457}
2458
2459
2460static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2461  InstanceType from = instr->from();
2462  InstanceType to = instr->to();
2463  if (from == to) return eq;
2464  if (to == LAST_TYPE) return hs;
2465  if (from == FIRST_TYPE) return ls;
2466  UNREACHABLE();
2467  return eq;
2468}
2469
2470
2471void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2472  Register scratch = scratch0();
2473  Register input = ToRegister(instr->value());
2474
2475  if (!instr->hydrogen()->value()->IsHeapObject()) {
2476    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2477  }
2478
2479  __ GetObjectType(input, scratch, scratch);
2480  EmitBranch(instr,
2481             BranchCondition(instr->hydrogen()),
2482             scratch,
2483             Operand(TestType(instr->hydrogen())));
2484}
2485
2486
2487void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2488  Register input = ToRegister(instr->value());
2489  Register result = ToRegister(instr->result());
2490
2491  __ AssertString(input);
2492
2493  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2494  __ IndexFromHash(result, result);
2495}
2496
2497
2498void LCodeGen::DoHasCachedArrayIndexAndBranch(
2499    LHasCachedArrayIndexAndBranch* instr) {
2500  Register input = ToRegister(instr->value());
2501  Register scratch = scratch0();
2502
2503  __ lw(scratch,
2504         FieldMemOperand(input, String::kHashFieldOffset));
2505  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2506  EmitBranch(instr, eq, at, Operand(zero_reg));
2507}
2508
2509
2510// Branches to a label or falls through with the answer in flags.  Trashes
2511// the temp registers, but not the input.
2512void LCodeGen::EmitClassOfTest(Label* is_true,
2513                               Label* is_false,
2514                               Handle<String>class_name,
2515                               Register input,
2516                               Register temp,
2517                               Register temp2) {
2518  ASSERT(!input.is(temp));
2519  ASSERT(!input.is(temp2));
2520  ASSERT(!temp.is(temp2));
2521
2522  __ JumpIfSmi(input, is_false);
2523
2524  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2525    // Assuming the following assertions, we can use the same compares to test
2526    // for both being a function type and being in the object type range.
2527    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2528    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2529                  FIRST_SPEC_OBJECT_TYPE + 1);
2530    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2531                  LAST_SPEC_OBJECT_TYPE - 1);
2532    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2533
2534    __ GetObjectType(input, temp, temp2);
2535    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2536    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2537    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2538  } else {
2539    // Faster code path to avoid two compares: subtract lower bound from the
2540    // actual type and do a signed compare with the width of the type range.
2541    __ GetObjectType(input, temp, temp2);
2542    __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2543    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2544                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2545  }
2546
2547  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2548  // Check if the constructor in the map is a function.
2549  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2550
2551  // Objects with a non-function constructor have class 'Object'.
2552  __ GetObjectType(temp, temp2, temp2);
2553  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2554    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2555  } else {
2556    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2557  }
2558
2559  // temp now contains the constructor function. Grab the
2560  // instance class name from there.
2561  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2562  __ lw(temp, FieldMemOperand(temp,
2563                               SharedFunctionInfo::kInstanceClassNameOffset));
2564  // The class name we are testing against is internalized since it's a literal.
2565  // The name in the constructor is internalized because of the way the context
2566  // is booted.  This routine isn't expected to work for random API-created
2567  // classes and it doesn't have to because you can't access it with natives
2568  // syntax.  Since both sides are internalized it is sufficient to use an
2569  // identity comparison.
2570
2571  // End with the address of this class_name instance in temp register.
2572  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2573}
2574
2575
2576void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2577  Register input = ToRegister(instr->value());
2578  Register temp = scratch0();
2579  Register temp2 = ToRegister(instr->temp());
2580  Handle<String> class_name = instr->hydrogen()->class_name();
2581
2582  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2583                  class_name, input, temp, temp2);
2584
2585  EmitBranch(instr, eq, temp, Operand(class_name));
2586}
2587
2588
2589void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2590  Register reg = ToRegister(instr->value());
2591  Register temp = ToRegister(instr->temp());
2592
2593  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2594  EmitBranch(instr, eq, temp, Operand(instr->map()));
2595}
2596
2597
2598void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2599  Label true_label, done;
2600  ASSERT(ToRegister(instr->left()).is(a0));  // Object is in a0.
2601  ASSERT(ToRegister(instr->right()).is(a1));  // Function is in a1.
2602  Register result = ToRegister(instr->result());
2603  ASSERT(result.is(v0));
2604
2605  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2606  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2607
2608  __ Branch(&true_label, eq, result, Operand(zero_reg));
2609  __ li(result, Operand(factory()->false_value()));
2610  __ Branch(&done);
2611  __ bind(&true_label);
2612  __ li(result, Operand(factory()->true_value()));
2613  __ bind(&done);
2614}
2615
2616
2617void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2618  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2619   public:
2620    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2621                                  LInstanceOfKnownGlobal* instr)
2622        : LDeferredCode(codegen), instr_(instr) { }
2623    virtual void Generate() {
2624      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2625    }
2626    virtual LInstruction* instr() { return instr_; }
2627    Label* map_check() { return &map_check_; }
2628
2629   private:
2630    LInstanceOfKnownGlobal* instr_;
2631    Label map_check_;
2632  };
2633
2634  DeferredInstanceOfKnownGlobal* deferred;
2635  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2636
2637  Label done, false_result;
2638  Register object = ToRegister(instr->value());
2639  Register temp = ToRegister(instr->temp());
2640  Register result = ToRegister(instr->result());
2641
2642  ASSERT(object.is(a0));
2643  ASSERT(result.is(v0));
2644
2645  // A Smi is not instance of anything.
2646  __ JumpIfSmi(object, &false_result);
2647
2648  // This is the inlined call site instanceof cache. The two occurences of the
2649  // hole value will be patched to the last map/result pair generated by the
2650  // instanceof stub.
2651  Label cache_miss;
2652  Register map = temp;
2653  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2654
2655  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2656  __ bind(deferred->map_check());  // Label for calculating code patching.
2657  // We use Factory::the_hole_value() on purpose instead of loading from the
2658  // root array to force relocation to be able to later patch with
2659  // the cached map.
2660  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2661  __ li(at, Operand(Handle<Object>(cell)));
2662  __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
2663  __ Branch(&cache_miss, ne, map, Operand(at));
2664  // We use Factory::the_hole_value() on purpose instead of loading from the
2665  // root array to force relocation to be able to later patch
2666  // with true or false.
2667  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2668  __ Branch(&done);
2669
2670  // The inlined call site cache did not match. Check null and string before
2671  // calling the deferred code.
2672  __ bind(&cache_miss);
2673  // Null is not instance of anything.
2674  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2675  __ Branch(&false_result, eq, object, Operand(temp));
2676
2677  // String values is not instance of anything.
2678  Condition cc = __ IsObjectStringType(object, temp, temp);
2679  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2680
2681  // Go to the deferred code.
2682  __ Branch(deferred->entry());
2683
2684  __ bind(&false_result);
2685  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2686
2687  // Here result has either true or false. Deferred code also produces true or
2688  // false object.
2689  __ bind(deferred->exit());
2690  __ bind(&done);
2691}
2692
2693
2694void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2695                                               Label* map_check) {
2696  Register result = ToRegister(instr->result());
2697  ASSERT(result.is(v0));
2698
2699  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2700  flags = static_cast<InstanceofStub::Flags>(
2701      flags | InstanceofStub::kArgsInRegisters);
2702  flags = static_cast<InstanceofStub::Flags>(
2703      flags | InstanceofStub::kCallSiteInlineCheck);
2704  flags = static_cast<InstanceofStub::Flags>(
2705      flags | InstanceofStub::kReturnTrueFalseObject);
2706  InstanceofStub stub(flags);
2707
2708  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2709
2710  // Get the temp register reserved by the instruction. This needs to be t0 as
2711  // its slot of the pushing of safepoint registers is used to communicate the
2712  // offset to the location of the map check.
2713  Register temp = ToRegister(instr->temp());
2714  ASSERT(temp.is(t0));
2715  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2716  static const int kAdditionalDelta = 7;
2717  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2718  Label before_push_delta;
2719  __ bind(&before_push_delta);
2720  {
2721    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2722    __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2723    __ StoreToSafepointRegisterSlot(temp, temp);
2724  }
2725  CallCodeGeneric(stub.GetCode(isolate()),
2726                  RelocInfo::CODE_TARGET,
2727                  instr,
2728                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2729  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2730  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2731  // Put the result value into the result register slot and
2732  // restore all registers.
2733  __ StoreToSafepointRegisterSlot(result, result);
2734}
2735
2736
2737void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
2738  Register object = ToRegister(instr->object());
2739  Register result = ToRegister(instr->result());
2740  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
2741  __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
2742}
2743
2744
2745void LCodeGen::DoCmpT(LCmpT* instr) {
2746  Token::Value op = instr->op();
2747
2748  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2749  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2750  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2751
2752  Condition condition = ComputeCompareCondition(op);
2753  // A minor optimization that relies on LoadRoot always emitting one
2754  // instruction.
2755  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2756  Label done, check;
2757  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2758  __ bind(&check);
2759  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2760  ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
2761  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2762  __ bind(&done);
2763}
2764
2765
2766void LCodeGen::DoReturn(LReturn* instr) {
2767  if (FLAG_trace && info()->IsOptimizing()) {
2768    // Push the return value on the stack as the parameter.
2769    // Runtime::TraceExit returns its parameter in v0.
2770    __ push(v0);
2771    __ CallRuntime(Runtime::kTraceExit, 1);
2772  }
2773  if (info()->saves_caller_doubles()) {
2774    ASSERT(NeedsEagerFrame());
2775    BitVector* doubles = chunk()->allocated_double_registers();
2776    BitVector::Iterator save_iterator(doubles);
2777    int count = 0;
2778    while (!save_iterator.Done()) {
2779      __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
2780              MemOperand(sp, count * kDoubleSize));
2781      save_iterator.Advance();
2782      count++;
2783    }
2784  }
2785  int no_frame_start = -1;
2786  if (NeedsEagerFrame()) {
2787    __ mov(sp, fp);
2788    no_frame_start = masm_->pc_offset();
2789    __ Pop(ra, fp);
2790  }
2791  if (instr->has_constant_parameter_count()) {
2792    int parameter_count = ToInteger32(instr->constant_parameter_count());
2793    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2794    if (sp_delta != 0) {
2795      __ Addu(sp, sp, Operand(sp_delta));
2796    }
2797  } else {
2798    Register reg = ToRegister(instr->parameter_count());
2799    // The argument count parameter is a smi
2800    __ SmiUntag(reg);
2801    __ sll(at, reg, kPointerSizeLog2);
2802    __ Addu(sp, sp, at);
2803  }
2804
2805  __ Jump(ra);
2806
2807  if (no_frame_start != -1) {
2808    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2809  }
2810}
2811
2812
2813void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2814  Register result = ToRegister(instr->result());
2815  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2816  __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2817  if (instr->hydrogen()->RequiresHoleCheck()) {
2818    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2819    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2820  }
2821}
2822
2823
2824void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2825  ASSERT(ToRegister(instr->global_object()).is(a0));
2826  ASSERT(ToRegister(instr->result()).is(v0));
2827
2828  __ li(a2, Operand(instr->name()));
2829  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2830                                             : RelocInfo::CODE_TARGET_CONTEXT;
2831  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2832  CallCode(ic, mode, instr);
2833}
2834
2835
2836void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2837  Register value = ToRegister(instr->value());
2838  Register cell = scratch0();
2839
2840  // Load the cell.
2841  __ li(cell, Operand(instr->hydrogen()->cell()));
2842
2843  // If the cell we are storing to contains the hole it could have
2844  // been deleted from the property dictionary. In that case, we need
2845  // to update the property details in the property dictionary to mark
2846  // it as no longer deleted.
2847  if (instr->hydrogen()->RequiresHoleCheck()) {
2848    // We use a temp to check the payload.
2849    Register payload = ToRegister(instr->temp());
2850    __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
2851    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2852    DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2853  }
2854
2855  // Store the value.
2856  __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
2857  // Cells are always rescanned, so no write barrier here.
2858}
2859
2860
2861void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2862  ASSERT(ToRegister(instr->global_object()).is(a1));
2863  ASSERT(ToRegister(instr->value()).is(a0));
2864
2865  __ li(a2, Operand(instr->name()));
2866  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2867      ? isolate()->builtins()->StoreIC_Initialize_Strict()
2868      : isolate()->builtins()->StoreIC_Initialize();
2869  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2870}
2871
2872
2873void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2874  Register context = ToRegister(instr->context());
2875  Register result = ToRegister(instr->result());
2876
2877  __ lw(result, ContextOperand(context, instr->slot_index()));
2878  if (instr->hydrogen()->RequiresHoleCheck()) {
2879    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2880
2881    if (instr->hydrogen()->DeoptimizesOnHole()) {
2882      DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2883    } else {
2884      Label is_not_hole;
2885      __ Branch(&is_not_hole, ne, result, Operand(at));
2886      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2887      __ bind(&is_not_hole);
2888    }
2889  }
2890}
2891
2892
2893void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2894  Register context = ToRegister(instr->context());
2895  Register value = ToRegister(instr->value());
2896  Register scratch = scratch0();
2897  MemOperand target = ContextOperand(context, instr->slot_index());
2898
2899  Label skip_assignment;
2900
2901  if (instr->hydrogen()->RequiresHoleCheck()) {
2902    __ lw(scratch, target);
2903    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2904
2905    if (instr->hydrogen()->DeoptimizesOnHole()) {
2906      DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2907    } else {
2908      __ Branch(&skip_assignment, ne, scratch, Operand(at));
2909    }
2910  }
2911
2912  __ sw(value, target);
2913  if (instr->hydrogen()->NeedsWriteBarrier()) {
2914    SmiCheck check_needed =
2915        instr->hydrogen()->value()->IsHeapObject()
2916            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2917    __ RecordWriteContextSlot(context,
2918                              target.offset(),
2919                              value,
2920                              scratch0(),
2921                              GetRAState(),
2922                              kSaveFPRegs,
2923                              EMIT_REMEMBERED_SET,
2924                              check_needed);
2925  }
2926
2927  __ bind(&skip_assignment);
2928}
2929
2930
2931void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2932  HObjectAccess access = instr->hydrogen()->access();
2933  int offset = access.offset();
2934  Register object = ToRegister(instr->object());
2935
2936  if (access.IsExternalMemory()) {
2937    Register result = ToRegister(instr->result());
2938    __ lw(result, MemOperand(object, offset));
2939    return;
2940  }
2941
2942  if (instr->hydrogen()->representation().IsDouble()) {
2943    DoubleRegister result = ToDoubleRegister(instr->result());
2944    __ ldc1(result, FieldMemOperand(object, offset));
2945    return;
2946  }
2947
2948  Register result = ToRegister(instr->result());
2949  if (access.IsInobject()) {
2950    __ lw(result, FieldMemOperand(object, offset));
2951  } else {
2952    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2953    __ lw(result, FieldMemOperand(result, offset));
2954  }
2955}
2956
2957
2958void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2959  ASSERT(ToRegister(instr->object()).is(a0));
2960  ASSERT(ToRegister(instr->result()).is(v0));
2961
2962  // Name is always in a2.
2963  __ li(a2, Operand(instr->name()));
2964  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2965  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2966}
2967
2968
2969void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2970  Register scratch = scratch0();
2971  Register function = ToRegister(instr->function());
2972  Register result = ToRegister(instr->result());
2973
2974  // Check that the function really is a function. Load map into the
2975  // result register.
2976  __ GetObjectType(function, result, scratch);
2977  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2978
2979  // Make sure that the function has an instance prototype.
2980  Label non_instance;
2981  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2982  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2983  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2984
2985  // Get the prototype or initial map from the function.
2986  __ lw(result,
2987         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2988
2989  // Check that the function has a prototype or an initial map.
2990  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2991  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2992
2993  // If the function does not have an initial map, we're done.
2994  Label done;
2995  __ GetObjectType(result, scratch, scratch);
2996  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2997
2998  // Get the prototype from the initial map.
2999  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3000  __ Branch(&done);
3001
3002  // Non-instance prototype: Fetch prototype from constructor field
3003  // in initial map.
3004  __ bind(&non_instance);
3005  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3006
3007  // All done.
3008  __ bind(&done);
3009}
3010
3011
3012void LCodeGen::DoLoadExternalArrayPointer(
3013    LLoadExternalArrayPointer* instr) {
3014  Register to_reg = ToRegister(instr->result());
3015  Register from_reg  = ToRegister(instr->object());
3016  __ lw(to_reg, FieldMemOperand(from_reg,
3017                                ExternalArray::kExternalPointerOffset));
3018}
3019
3020
3021void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3022  Register arguments = ToRegister(instr->arguments());
3023  Register result = ToRegister(instr->result());
3024  if (instr->length()->IsConstantOperand() &&
3025      instr->index()->IsConstantOperand()) {
3026    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3027    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3028    int index = (const_length - const_index) + 1;
3029    __ lw(result, MemOperand(arguments, index * kPointerSize));
3030  } else {
3031    Register length = ToRegister(instr->length());
3032    Register index = ToRegister(instr->index());
3033    // There are two words between the frame pointer and the last argument.
3034    // Subtracting from length accounts for one of them, add one more.
3035    __ subu(length, length, index);
3036    __ Addu(length, length, Operand(1));
3037    __ sll(length, length, kPointerSizeLog2);
3038    __ Addu(at, arguments, Operand(length));
3039    __ lw(result, MemOperand(at, 0));
3040  }
3041}
3042
3043
3044void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3045  Register external_pointer = ToRegister(instr->elements());
3046  Register key = no_reg;
3047  ElementsKind elements_kind = instr->elements_kind();
3048  bool key_is_constant = instr->key()->IsConstantOperand();
3049  int constant_key = 0;
3050  if (key_is_constant) {
3051    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3052    if (constant_key & 0xF0000000) {
3053      Abort(kArrayIndexConstantValueTooBig);
3054    }
3055  } else {
3056    key = ToRegister(instr->key());
3057  }
3058  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3059  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3060      ? (element_size_shift - kSmiTagSize) : element_size_shift;
3061  int additional_offset = instr->additional_index() << element_size_shift;
3062
3063  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3064      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3065    FPURegister result = ToDoubleRegister(instr->result());
3066    if (key_is_constant) {
3067      __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3068    } else {
3069      __ sll(scratch0(), key, shift_size);
3070      __ Addu(scratch0(), scratch0(), external_pointer);
3071    }
3072    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3073      __ lwc1(result, MemOperand(scratch0(), additional_offset));
3074      __ cvt_d_s(result, result);
3075    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3076      __ ldc1(result, MemOperand(scratch0(), additional_offset));
3077    }
3078  } else {
3079    Register result = ToRegister(instr->result());
3080    MemOperand mem_operand = PrepareKeyedOperand(
3081        key, external_pointer, key_is_constant, constant_key,
3082        element_size_shift, shift_size,
3083        instr->additional_index(), additional_offset);
3084    switch (elements_kind) {
3085      case EXTERNAL_BYTE_ELEMENTS:
3086        __ lb(result, mem_operand);
3087        break;
3088      case EXTERNAL_PIXEL_ELEMENTS:
3089      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3090        __ lbu(result, mem_operand);
3091        break;
3092      case EXTERNAL_SHORT_ELEMENTS:
3093        __ lh(result, mem_operand);
3094        break;
3095      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3096        __ lhu(result, mem_operand);
3097        break;
3098      case EXTERNAL_INT_ELEMENTS:
3099        __ lw(result, mem_operand);
3100        break;
3101      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3102        __ lw(result, mem_operand);
3103        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3104          DeoptimizeIf(Ugreater_equal, instr->environment(),
3105              result, Operand(0x80000000));
3106        }
3107        break;
3108      case EXTERNAL_FLOAT_ELEMENTS:
3109      case EXTERNAL_DOUBLE_ELEMENTS:
3110      case FAST_DOUBLE_ELEMENTS:
3111      case FAST_ELEMENTS:
3112      case FAST_SMI_ELEMENTS:
3113      case FAST_HOLEY_DOUBLE_ELEMENTS:
3114      case FAST_HOLEY_ELEMENTS:
3115      case FAST_HOLEY_SMI_ELEMENTS:
3116      case DICTIONARY_ELEMENTS:
3117      case NON_STRICT_ARGUMENTS_ELEMENTS:
3118        UNREACHABLE();
3119        break;
3120    }
3121  }
3122}
3123
3124
3125void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3126  Register elements = ToRegister(instr->elements());
3127  bool key_is_constant = instr->key()->IsConstantOperand();
3128  Register key = no_reg;
3129  DoubleRegister result = ToDoubleRegister(instr->result());
3130  Register scratch = scratch0();
3131
3132  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3133  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3134      ? (element_size_shift - kSmiTagSize) : element_size_shift;
3135  int constant_key = 0;
3136  if (key_is_constant) {
3137    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3138    if (constant_key & 0xF0000000) {
3139      Abort(kArrayIndexConstantValueTooBig);
3140    }
3141  } else {
3142    key = ToRegister(instr->key());
3143  }
3144
3145  int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
3146      ((constant_key + instr->additional_index()) << element_size_shift);
3147  if (!key_is_constant) {
3148    __ sll(scratch, key, shift_size);
3149    __ Addu(elements, elements, scratch);
3150  }
3151  __ Addu(elements, elements, Operand(base_offset));
3152  __ ldc1(result, MemOperand(elements));
3153  if (instr->hydrogen()->RequiresHoleCheck()) {
3154    __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
3155    DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
3156  }
3157}
3158
3159
3160void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3161  Register elements = ToRegister(instr->elements());
3162  Register result = ToRegister(instr->result());
3163  Register scratch = scratch0();
3164  Register store_base = scratch;
3165  int offset = 0;
3166
3167  if (instr->key()->IsConstantOperand()) {
3168    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3169    offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3170                                           instr->additional_index());
3171    store_base = elements;
3172  } else {
3173    Register key = EmitLoadRegister(instr->key(), scratch0());
3174    // Even though the HLoadKeyed instruction forces the input
3175    // representation for the key to be an integer, the input gets replaced
3176    // during bound check elimination with the index argument to the bounds
3177    // check, which can be tagged, so that case must be handled here, too.
3178    if (instr->hydrogen()->key()->representation().IsSmi()) {
3179      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3180      __ addu(scratch, elements, scratch);
3181    } else {
3182      __ sll(scratch, key, kPointerSizeLog2);
3183      __ addu(scratch, elements, scratch);
3184    }
3185    offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3186  }
3187  __ lw(result, FieldMemOperand(store_base, offset));
3188
3189  // Check for the hole value.
3190  if (instr->hydrogen()->RequiresHoleCheck()) {
3191    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3192      __ And(scratch, result, Operand(kSmiTagMask));
3193      DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3194    } else {
3195      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3196      DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
3197    }
3198  }
3199}
3200
3201
3202void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3203  if (instr->is_external()) {
3204    DoLoadKeyedExternalArray(instr);
3205  } else if (instr->hydrogen()->representation().IsDouble()) {
3206    DoLoadKeyedFixedDoubleArray(instr);
3207  } else {
3208    DoLoadKeyedFixedArray(instr);
3209  }
3210}
3211
3212
3213MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3214                                         Register base,
3215                                         bool key_is_constant,
3216                                         int constant_key,
3217                                         int element_size,
3218                                         int shift_size,
3219                                         int additional_index,
3220                                         int additional_offset) {
3221  if (additional_index != 0 && !key_is_constant) {
3222    additional_index *= 1 << (element_size - shift_size);
3223    __ Addu(scratch0(), key, Operand(additional_index));
3224  }
3225
3226  if (key_is_constant) {
3227    return MemOperand(base,
3228                      (constant_key << element_size) + additional_offset);
3229  }
3230
3231  if (additional_index == 0) {
3232    if (shift_size >= 0) {
3233      __ sll(scratch0(), key, shift_size);
3234      __ Addu(scratch0(), base, scratch0());
3235      return MemOperand(scratch0());
3236    } else {
3237      ASSERT_EQ(-1, shift_size);
3238      __ srl(scratch0(), key, 1);
3239      __ Addu(scratch0(), base, scratch0());
3240      return MemOperand(scratch0());
3241    }
3242  }
3243
3244  if (shift_size >= 0) {
3245    __ sll(scratch0(), scratch0(), shift_size);
3246    __ Addu(scratch0(), base, scratch0());
3247    return MemOperand(scratch0());
3248  } else {
3249    ASSERT_EQ(-1, shift_size);
3250    __ srl(scratch0(), scratch0(), 1);
3251    __ Addu(scratch0(), base, scratch0());
3252    return MemOperand(scratch0());
3253  }
3254}
3255
3256
3257void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3258  ASSERT(ToRegister(instr->object()).is(a1));
3259  ASSERT(ToRegister(instr->key()).is(a0));
3260
3261  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3262  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3263}
3264
3265
3266void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3267  Register scratch = scratch0();
3268  Register temp = scratch1();
3269  Register result = ToRegister(instr->result());
3270
3271  if (instr->hydrogen()->from_inlined()) {
3272    __ Subu(result, sp, 2 * kPointerSize);
3273  } else {
3274    // Check if the calling frame is an arguments adaptor frame.
3275    Label done, adapted;
3276    __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3277    __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3278    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3279
3280    // Result is the frame pointer for the frame if not adapted and for the real
3281    // frame below the adaptor frame if adapted.
3282    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
3283    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
3284  }
3285}
3286
3287
3288void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3289  Register elem = ToRegister(instr->elements());
3290  Register result = ToRegister(instr->result());
3291
3292  Label done;
3293
3294  // If no arguments adaptor frame the number of arguments is fixed.
3295  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3296  __ Branch(&done, eq, fp, Operand(elem));
3297
3298  // Arguments adaptor frame present. Get argument length from there.
3299  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3300  __ lw(result,
3301        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3302  __ SmiUntag(result);
3303
3304  // Argument length is in result register.
3305  __ bind(&done);
3306}
3307
3308
3309void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3310  Register receiver = ToRegister(instr->receiver());
3311  Register function = ToRegister(instr->function());
3312  Register scratch = scratch0();
3313
3314  // If the receiver is null or undefined, we have to pass the global
3315  // object as a receiver to normal functions. Values have to be
3316  // passed unchanged to builtins and strict-mode functions.
3317  Label global_object, receiver_ok;
3318
3319  // Do not transform the receiver to object for strict mode
3320  // functions.
3321  __ lw(scratch,
3322         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3323  __ lw(scratch,
3324         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3325
3326  // Do not transform the receiver to object for builtins.
3327  int32_t strict_mode_function_mask =
3328                  1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3329  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3330  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3331  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
3332
3333  // Normal function. Replace undefined or null with global receiver.
3334  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3335  __ Branch(&global_object, eq, receiver, Operand(scratch));
3336  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3337  __ Branch(&global_object, eq, receiver, Operand(scratch));
3338
3339  // Deoptimize if the receiver is not a JS object.
3340  __ And(scratch, receiver, Operand(kSmiTagMask));
3341  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
3342
3343  __ GetObjectType(receiver, scratch, scratch);
3344  DeoptimizeIf(lt, instr->environment(),
3345               scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3346  __ Branch(&receiver_ok);
3347
3348  __ bind(&global_object);
3349  __ lw(receiver, GlobalObjectOperand());
3350  __ lw(receiver,
3351         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
3352  __ bind(&receiver_ok);
3353}
3354
3355
3356void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3357  Register receiver = ToRegister(instr->receiver());
3358  Register function = ToRegister(instr->function());
3359  Register length = ToRegister(instr->length());
3360  Register elements = ToRegister(instr->elements());
3361  Register scratch = scratch0();
3362  ASSERT(receiver.is(a0));  // Used for parameter count.
3363  ASSERT(function.is(a1));  // Required by InvokeFunction.
3364  ASSERT(ToRegister(instr->result()).is(v0));
3365
3366  // Copy the arguments to this function possibly from the
3367  // adaptor frame below it.
3368  const uint32_t kArgumentsLimit = 1 * KB;
3369  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
3370
3371  // Push the receiver and use the register to keep the original
3372  // number of arguments.
3373  __ push(receiver);
3374  __ Move(receiver, length);
3375  // The arguments are at a one pointer size offset from elements.
3376  __ Addu(elements, elements, Operand(1 * kPointerSize));
3377
3378  // Loop through the arguments pushing them onto the execution
3379  // stack.
3380  Label invoke, loop;
3381  // length is a small non-negative integer, due to the test above.
3382  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3383  __ sll(scratch, length, 2);
3384  __ bind(&loop);
3385  __ Addu(scratch, elements, scratch);
3386  __ lw(scratch, MemOperand(scratch));
3387  __ push(scratch);
3388  __ Subu(length, length, Operand(1));
3389  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3390  __ sll(scratch, length, 2);
3391
3392  __ bind(&invoke);
3393  ASSERT(instr->HasPointerMap());
3394  LPointerMap* pointers = instr->pointer_map();
3395  RecordPosition(pointers->position());
3396  SafepointGenerator safepoint_generator(
3397      this, pointers, Safepoint::kLazyDeopt);
3398  // The number of arguments is stored in receiver which is a0, as expected
3399  // by InvokeFunction.
3400  ParameterCount actual(receiver);
3401  __ InvokeFunction(function, actual, CALL_FUNCTION,
3402                    safepoint_generator, CALL_AS_METHOD);
3403  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3404}
3405
3406
3407void LCodeGen::DoPushArgument(LPushArgument* instr) {
3408  LOperand* argument = instr->value();
3409  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3410    Abort(kDoPushArgumentNotImplementedForDoubleType);
3411  } else {
3412    Register argument_reg = EmitLoadRegister(argument, at);
3413    __ push(argument_reg);
3414  }
3415}
3416
3417
3418void LCodeGen::DoDrop(LDrop* instr) {
3419  __ Drop(instr->count());
3420}
3421
3422
3423void LCodeGen::DoThisFunction(LThisFunction* instr) {
3424  Register result = ToRegister(instr->result());
3425  __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3426}
3427
3428
3429void LCodeGen::DoContext(LContext* instr) {
3430  // If there is a non-return use, the context must be moved to a register.
3431  Register result = ToRegister(instr->result());
3432  for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
3433    if (!it.value()->IsReturn()) {
3434      __ mov(result, cp);
3435      return;
3436    }
3437  }
3438}
3439
3440
3441void LCodeGen::DoOuterContext(LOuterContext* instr) {
3442  Register context = ToRegister(instr->context());
3443  Register result = ToRegister(instr->result());
3444  __ lw(result,
3445        MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3446}
3447
3448
3449void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3450  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3451  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3452  // The context is the first argument.
3453  __ Push(cp, scratch0(), scratch1());
3454  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3455}
3456
3457
3458void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3459  Register result = ToRegister(instr->result());
3460  __ lw(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
3461}
3462
3463
3464void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3465  Register global = ToRegister(instr->global_object());
3466  Register result = ToRegister(instr->result());
3467  __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
3468}
3469
3470
3471void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3472                                 int formal_parameter_count,
3473                                 int arity,
3474                                 LInstruction* instr,
3475                                 CallKind call_kind,
3476                                 A1State a1_state) {
3477  bool dont_adapt_arguments =
3478      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3479  bool can_invoke_directly =
3480      dont_adapt_arguments || formal_parameter_count == arity;
3481
3482  LPointerMap* pointers = instr->pointer_map();
3483  RecordPosition(pointers->position());
3484
3485  if (can_invoke_directly) {
3486    if (a1_state == A1_UNINITIALIZED) {
3487      __ LoadHeapObject(a1, function);
3488    }
3489
3490    // Change context.
3491    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3492
3493    // Set r0 to arguments count if adaption is not needed. Assumes that r0
3494    // is available to write to at this point.
3495    if (dont_adapt_arguments) {
3496      __ li(a0, Operand(arity));
3497    }
3498
3499    // Invoke function.
3500    __ SetCallKind(t1, call_kind);
3501    __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3502    __ Call(at);
3503
3504    // Set up deoptimization.
3505    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3506  } else {
3507    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3508    ParameterCount count(arity);
3509    ParameterCount expected(formal_parameter_count);
3510    __ InvokeFunction(
3511        function, expected, count, CALL_FUNCTION, generator, call_kind);
3512  }
3513
3514  // Restore context.
3515  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3516}
3517
3518
3519void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3520  ASSERT(ToRegister(instr->result()).is(v0));
3521  __ mov(a0, v0);
3522  CallKnownFunction(instr->hydrogen()->function(),
3523                    instr->hydrogen()->formal_parameter_count(),
3524                    instr->arity(),
3525                    instr,
3526                    CALL_AS_METHOD,
3527                    A1_UNINITIALIZED);
3528}
3529
3530
3531void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3532  Register input = ToRegister(instr->value());
3533  Register result = ToRegister(instr->result());
3534  Register scratch = scratch0();
3535
3536  // Deoptimize if not a heap number.
3537  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3538  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3539  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3540
3541  Label done;
3542  Register exponent = scratch0();
3543  scratch = no_reg;
3544  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3545  // Check the sign of the argument. If the argument is positive, just
3546  // return it.
3547  __ Move(result, input);
3548  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3549  __ Branch(&done, eq, at, Operand(zero_reg));
3550
3551  // Input is negative. Reverse its sign.
3552  // Preserve the value of all registers.
3553  {
3554    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3555
3556    // Registers were saved at the safepoint, so we can use
3557    // many scratch registers.
3558    Register tmp1 = input.is(a1) ? a0 : a1;
3559    Register tmp2 = input.is(a2) ? a0 : a2;
3560    Register tmp3 = input.is(a3) ? a0 : a3;
3561    Register tmp4 = input.is(t0) ? a0 : t0;
3562
3563    // exponent: floating point exponent value.
3564
3565    Label allocated, slow;
3566    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3567    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3568    __ Branch(&allocated);
3569
3570    // Slow case: Call the runtime system to do the number allocation.
3571    __ bind(&slow);
3572
3573    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3574    // Set the pointer to the new heap number in tmp.
3575    if (!tmp1.is(v0))
3576      __ mov(tmp1, v0);
3577    // Restore input_reg after call to runtime.
3578    __ LoadFromSafepointRegisterSlot(input, input);
3579    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3580
3581    __ bind(&allocated);
3582    // exponent: floating point exponent value.
3583    // tmp1: allocated heap number.
3584    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3585    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3586    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3587    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3588
3589    __ StoreToSafepointRegisterSlot(tmp1, result);
3590  }
3591
3592  __ bind(&done);
3593}
3594
3595
3596void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3597  Register input = ToRegister(instr->value());
3598  Register result = ToRegister(instr->result());
3599  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3600  Label done;
3601  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3602  __ mov(result, input);
3603  __ subu(result, zero_reg, input);
3604  // Overflow if result is still negative, i.e. 0x80000000.
3605  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
3606  __ bind(&done);
3607}
3608
3609
3610void LCodeGen::DoMathAbs(LMathAbs* instr) {
3611  // Class for deferred case.
3612  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3613   public:
3614    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3615        : LDeferredCode(codegen), instr_(instr) { }
3616    virtual void Generate() {
3617      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3618    }
3619    virtual LInstruction* instr() { return instr_; }
3620   private:
3621    LMathAbs* instr_;
3622  };
3623
3624  Representation r = instr->hydrogen()->value()->representation();
3625  if (r.IsDouble()) {
3626    FPURegister input = ToDoubleRegister(instr->value());
3627    FPURegister result = ToDoubleRegister(instr->result());
3628    __ abs_d(result, input);
3629  } else if (r.IsSmiOrInteger32()) {
3630    EmitIntegerMathAbs(instr);
3631  } else {
3632    // Representation is tagged.
3633    DeferredMathAbsTaggedHeapNumber* deferred =
3634        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3635    Register input = ToRegister(instr->value());
3636    // Smi check.
3637    __ JumpIfNotSmi(input, deferred->entry());
3638    // If smi, handle it directly.
3639    EmitIntegerMathAbs(instr);
3640    __ bind(deferred->exit());
3641  }
3642}
3643
3644
3645void LCodeGen::DoMathFloor(LMathFloor* instr) {
3646  DoubleRegister input = ToDoubleRegister(instr->value());
3647  Register result = ToRegister(instr->result());
3648  Register scratch1 = scratch0();
3649  Register except_flag = ToRegister(instr->temp());
3650
3651  __ EmitFPUTruncate(kRoundToMinusInf,
3652                     result,
3653                     input,
3654                     scratch1,
3655                     double_scratch0(),
3656                     except_flag);
3657
3658  // Deopt if the operation did not succeed.
3659  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3660
3661  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3662    // Test for -0.
3663    Label done;
3664    __ Branch(&done, ne, result, Operand(zero_reg));
3665    __ mfc1(scratch1, input.high());
3666    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3667    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3668    __ bind(&done);
3669  }
3670}
3671
3672
3673void LCodeGen::DoMathRound(LMathRound* instr) {
3674  DoubleRegister input = ToDoubleRegister(instr->value());
3675  Register result = ToRegister(instr->result());
3676  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3677  Register scratch = scratch0();
3678  Label done, check_sign_on_zero;
3679
3680  // Extract exponent bits.
3681  __ mfc1(result, input.high());
3682  __ Ext(scratch,
3683         result,
3684         HeapNumber::kExponentShift,
3685         HeapNumber::kExponentBits);
3686
3687  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3688  Label skip1;
3689  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3690  __ mov(result, zero_reg);
3691  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3692    __ Branch(&check_sign_on_zero);
3693  } else {
3694    __ Branch(&done);
3695  }
3696  __ bind(&skip1);
3697
3698  // The following conversion will not work with numbers
3699  // outside of ]-2^32, 2^32[.
3700  DeoptimizeIf(ge, instr->environment(), scratch,
3701               Operand(HeapNumber::kExponentBias + 32));
3702
3703  // Save the original sign for later comparison.
3704  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3705
3706  __ Move(double_scratch0(), 0.5);
3707  __ add_d(double_scratch0(), input, double_scratch0());
3708
3709  // Check sign of the result: if the sign changed, the input
3710  // value was in ]0.5, 0[ and the result should be -0.
3711  __ mfc1(result, double_scratch0().high());
3712  __ Xor(result, result, Operand(scratch));
3713  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3714    // ARM uses 'mi' here, which is 'lt'
3715    DeoptimizeIf(lt, instr->environment(), result,
3716                 Operand(zero_reg));
3717  } else {
3718    Label skip2;
3719    // ARM uses 'mi' here, which is 'lt'
3720    // Negating it results in 'ge'
3721    __ Branch(&skip2, ge, result, Operand(zero_reg));
3722    __ mov(result, zero_reg);
3723    __ Branch(&done);
3724    __ bind(&skip2);
3725  }
3726
3727  Register except_flag = scratch;
3728  __ EmitFPUTruncate(kRoundToMinusInf,
3729                     result,
3730                     double_scratch0(),
3731                     at,
3732                     double_scratch1,
3733                     except_flag);
3734
3735  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3736
3737  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3738    // Test for -0.
3739    __ Branch(&done, ne, result, Operand(zero_reg));
3740    __ bind(&check_sign_on_zero);
3741    __ mfc1(scratch, input.high());
3742    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3743    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3744  }
3745  __ bind(&done);
3746}
3747
3748
3749void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3750  DoubleRegister input = ToDoubleRegister(instr->value());
3751  DoubleRegister result = ToDoubleRegister(instr->result());
3752  __ sqrt_d(result, input);
3753}
3754
3755
3756void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3757  DoubleRegister input = ToDoubleRegister(instr->value());
3758  DoubleRegister result = ToDoubleRegister(instr->result());
3759  DoubleRegister temp = ToDoubleRegister(instr->temp());
3760
3761  ASSERT(!input.is(result));
3762
3763  // Note that according to ECMA-262 15.8.2.13:
3764  // Math.pow(-Infinity, 0.5) == Infinity
3765  // Math.sqrt(-Infinity) == NaN
3766  Label done;
3767  __ Move(temp, -V8_INFINITY);
3768  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3769  // Set up Infinity in the delay slot.
3770  // result is overwritten if the branch is not taken.
3771  __ neg_d(result, temp);
3772
3773  // Add +0 to convert -0 to +0.
3774  __ add_d(result, input, kDoubleRegZero);
3775  __ sqrt_d(result, result);
3776  __ bind(&done);
3777}
3778
3779
3780void LCodeGen::DoPower(LPower* instr) {
3781  Representation exponent_type = instr->hydrogen()->right()->representation();
3782  // Having marked this as a call, we can use any registers.
3783  // Just make sure that the input/output registers are the expected ones.
3784  ASSERT(!instr->right()->IsDoubleRegister() ||
3785         ToDoubleRegister(instr->right()).is(f4));
3786  ASSERT(!instr->right()->IsRegister() ||
3787         ToRegister(instr->right()).is(a2));
3788  ASSERT(ToDoubleRegister(instr->left()).is(f2));
3789  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3790
3791  if (exponent_type.IsSmi()) {
3792    MathPowStub stub(MathPowStub::TAGGED);
3793    __ CallStub(&stub);
3794  } else if (exponent_type.IsTagged()) {
3795    Label no_deopt;
3796    __ JumpIfSmi(a2, &no_deopt);
3797    __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
3798    DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3799    __ bind(&no_deopt);
3800    MathPowStub stub(MathPowStub::TAGGED);
3801    __ CallStub(&stub);
3802  } else if (exponent_type.IsInteger32()) {
3803    MathPowStub stub(MathPowStub::INTEGER);
3804    __ CallStub(&stub);
3805  } else {
3806    ASSERT(exponent_type.IsDouble());
3807    MathPowStub stub(MathPowStub::DOUBLE);
3808    __ CallStub(&stub);
3809  }
3810}
3811
3812
3813void LCodeGen::DoRandom(LRandom* instr) {
3814  class DeferredDoRandom: public LDeferredCode {
3815   public:
3816    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3817        : LDeferredCode(codegen), instr_(instr) { }
3818    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3819    virtual LInstruction* instr() { return instr_; }
3820   private:
3821    LRandom* instr_;
3822  };
3823
3824  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3825  // Having marked this instruction as a call we can use any
3826  // registers.
3827  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3828  ASSERT(ToRegister(instr->global_object()).is(a0));
3829
3830  static const int kSeedSize = sizeof(uint32_t);
3831  STATIC_ASSERT(kPointerSize == kSeedSize);
3832
3833  __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
3834  static const int kRandomSeedOffset =
3835      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3836  __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
3837  // a2: FixedArray of the native context's random seeds
3838
3839  // Load state[0].
3840  __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
3841  __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
3842  // Load state[1].
3843  __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3844  // a1: state[0].
3845  // a0: state[1].
3846
3847  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3848  __ And(a3, a1, Operand(0xFFFF));
3849  __ li(t0, Operand(18273));
3850  __ Mul(a3, a3, t0);
3851  __ srl(a1, a1, 16);
3852  __ Addu(a1, a3, a1);
3853  // Save state[0].
3854  __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
3855
3856  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3857  __ And(a3, a0, Operand(0xFFFF));
3858  __ li(t0, Operand(36969));
3859  __ Mul(a3, a3, t0);
3860  __ srl(a0, a0, 16),
3861  __ Addu(a0, a3, a0);
3862  // Save state[1].
3863  __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3864
3865  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3866  __ And(a0, a0, Operand(0x3FFFF));
3867  __ sll(a1, a1, 14);
3868  __ Addu(v0, a0, a1);
3869
3870  __ bind(deferred->exit());
3871
3872  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3873  __ li(a2, Operand(0x41300000));
3874  // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3875  __ Move(f12, v0, a2);
3876  // Move 0x4130000000000000 to FPU.
3877  __ Move(f14, zero_reg, a2);
3878  // Subtract to get the result.
3879  __ sub_d(f0, f12, f14);
3880}
3881
3882
3883void LCodeGen::DoDeferredRandom(LRandom* instr) {
3884  __ PrepareCallCFunction(1, scratch0());
3885  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3886  // Return value is in v0.
3887}
3888
3889
3890void LCodeGen::DoMathExp(LMathExp* instr) {
3891  DoubleRegister input = ToDoubleRegister(instr->value());
3892  DoubleRegister result = ToDoubleRegister(instr->result());
3893  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3894  DoubleRegister double_scratch2 = double_scratch0();
3895  Register temp1 = ToRegister(instr->temp1());
3896  Register temp2 = ToRegister(instr->temp2());
3897
3898  MathExpGenerator::EmitMathExp(
3899      masm(), input, result, double_scratch1, double_scratch2,
3900      temp1, temp2, scratch0());
3901}
3902
3903
3904void LCodeGen::DoMathLog(LMathLog* instr) {
3905  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3906  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3907                               TranscendentalCacheStub::UNTAGGED);
3908  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3909}
3910
3911
3912void LCodeGen::DoMathTan(LMathTan* instr) {
3913  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3914  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3915                               TranscendentalCacheStub::UNTAGGED);
3916  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3917}
3918
3919
3920void LCodeGen::DoMathCos(LMathCos* instr) {
3921  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3922  TranscendentalCacheStub stub(TranscendentalCache::COS,
3923                               TranscendentalCacheStub::UNTAGGED);
3924  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3925}
3926
3927
3928void LCodeGen::DoMathSin(LMathSin* instr) {
3929  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3930  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3931                               TranscendentalCacheStub::UNTAGGED);
3932  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3933}
3934
3935
3936void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3937  ASSERT(ToRegister(instr->function()).is(a1));
3938  ASSERT(instr->HasPointerMap());
3939
3940  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3941  if (known_function.is_null()) {
3942    LPointerMap* pointers = instr->pointer_map();
3943    RecordPosition(pointers->position());
3944    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3945    ParameterCount count(instr->arity());
3946    __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3947    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3948  } else {
3949    CallKnownFunction(known_function,
3950                      instr->hydrogen()->formal_parameter_count(),
3951                      instr->arity(),
3952                      instr,
3953                      CALL_AS_METHOD,
3954                      A1_CONTAINS_TARGET);
3955  }
3956}
3957
3958
3959void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3960  ASSERT(ToRegister(instr->result()).is(v0));
3961
3962  int arity = instr->arity();
3963  Handle<Code> ic =
3964      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3965  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3966  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3967}
3968
3969
3970void LCodeGen::DoCallNamed(LCallNamed* instr) {
3971  ASSERT(ToRegister(instr->result()).is(v0));
3972
3973  int arity = instr->arity();
3974  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3975  Handle<Code> ic =
3976      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3977  __ li(a2, Operand(instr->name()));
3978  CallCode(ic, mode, instr);
3979  // Restore context register.
3980  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3981}
3982
3983
3984void LCodeGen::DoCallFunction(LCallFunction* instr) {
3985  ASSERT(ToRegister(instr->function()).is(a1));
3986  ASSERT(ToRegister(instr->result()).is(v0));
3987
3988  int arity = instr->arity();
3989  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3990  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3991  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3992}
3993
3994
3995void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3996  ASSERT(ToRegister(instr->result()).is(v0));
3997
3998  int arity = instr->arity();
3999  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
4000  Handle<Code> ic =
4001      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4002  __ li(a2, Operand(instr->name()));
4003  CallCode(ic, mode, instr);
4004  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4005}
4006
4007
4008void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
4009  ASSERT(ToRegister(instr->result()).is(v0));
4010  CallKnownFunction(instr->hydrogen()->target(),
4011                    instr->hydrogen()->formal_parameter_count(),
4012                    instr->arity(),
4013                    instr,
4014                    CALL_AS_FUNCTION,
4015                    A1_UNINITIALIZED);
4016}
4017
4018
4019void LCodeGen::DoCallNew(LCallNew* instr) {
4020  ASSERT(ToRegister(instr->constructor()).is(a1));
4021  ASSERT(ToRegister(instr->result()).is(v0));
4022
4023  __ li(a0, Operand(instr->arity()));
4024  // No cell in a2 for construct type feedback in optimized code
4025  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
4026  __ li(a2, Operand(undefined_value));
4027  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4028  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4029}
4030
4031
4032void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4033  ASSERT(ToRegister(instr->constructor()).is(a1));
4034  ASSERT(ToRegister(instr->result()).is(v0));
4035
4036  __ li(a0, Operand(instr->arity()));
4037  __ li(a2, Operand(instr->hydrogen()->property_cell()));
4038  ElementsKind kind = instr->hydrogen()->elements_kind();
4039  AllocationSiteOverrideMode override_mode =
4040      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4041          ? DISABLE_ALLOCATION_SITES
4042          : DONT_OVERRIDE;
4043  ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
4044
4045  if (instr->arity() == 0) {
4046    ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
4047    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4048  } else if (instr->arity() == 1) {
4049    Label done;
4050    if (IsFastPackedElementsKind(kind)) {
4051      Label packed_case;
4052      // We might need a change here,
4053      // look at the first argument.
4054      __ lw(t1, MemOperand(sp, 0));
4055      __ Branch(&packed_case, eq, t1, Operand(zero_reg));
4056
4057      ElementsKind holey_kind = GetHoleyElementsKind(kind);
4058      ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
4059                                              override_mode);
4060      CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4061      __ jmp(&done);
4062      __ bind(&packed_case);
4063    }
4064
4065    ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
4066    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4067    __ bind(&done);
4068  } else {
4069    ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
4070    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4071  }
4072}
4073
4074
4075void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4076  CallRuntime(instr->function(), instr->arity(), instr);
4077}
4078
4079
4080void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4081  Register result = ToRegister(instr->result());
4082  Register base = ToRegister(instr->base_object());
4083  __ Addu(result, base, Operand(instr->offset()));
4084}
4085
4086
4087void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4088  Representation representation = instr->representation();
4089
4090  Register object = ToRegister(instr->object());
4091  Register scratch = scratch0();
4092  HObjectAccess access = instr->hydrogen()->access();
4093  int offset = access.offset();
4094
4095  if (access.IsExternalMemory()) {
4096    Register value = ToRegister(instr->value());
4097    __ sw(value, MemOperand(object, offset));
4098    return;
4099  }
4100
4101  Handle<Map> transition = instr->transition();
4102
4103  if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4104    Register value = ToRegister(instr->value());
4105    if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4106      __ And(scratch, value, Operand(kSmiTagMask));
4107      DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
4108    }
4109  } else if (FLAG_track_double_fields && representation.IsDouble()) {
4110    ASSERT(transition.is_null());
4111    ASSERT(access.IsInobject());
4112    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4113    DoubleRegister value = ToDoubleRegister(instr->value());
4114    __ sdc1(value, FieldMemOperand(object, offset));
4115    return;
4116  }
4117
4118  if (!transition.is_null()) {
4119    __ li(scratch, Operand(transition));
4120    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4121    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4122      Register temp = ToRegister(instr->temp());
4123      // Update the write barrier for the map field.
4124      __ RecordWriteField(object,
4125                          HeapObject::kMapOffset,
4126                          scratch,
4127                          temp,
4128                          GetRAState(),
4129                          kSaveFPRegs,
4130                          OMIT_REMEMBERED_SET,
4131                          OMIT_SMI_CHECK);
4132    }
4133  }
4134
4135  // Do the store.
4136  Register value = ToRegister(instr->value());
4137  ASSERT(!object.is(value));
4138  SmiCheck check_needed =
4139      instr->hydrogen()->value()->IsHeapObject()
4140          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4141  if (access.IsInobject()) {
4142    __ sw(value, FieldMemOperand(object, offset));
4143    if (instr->hydrogen()->NeedsWriteBarrier()) {
4144      // Update the write barrier for the object for in-object properties.
4145      __ RecordWriteField(object,
4146                          offset,
4147                          value,
4148                          scratch,
4149                          GetRAState(),
4150                          kSaveFPRegs,
4151                          EMIT_REMEMBERED_SET,
4152                          check_needed);
4153    }
4154  } else {
4155    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4156    __ sw(value, FieldMemOperand(scratch, offset));
4157    if (instr->hydrogen()->NeedsWriteBarrier()) {
4158      // Update the write barrier for the properties array.
4159      // object is used as a scratch register.
4160      __ RecordWriteField(scratch,
4161                          offset,
4162                          value,
4163                          object,
4164                          GetRAState(),
4165                          kSaveFPRegs,
4166                          EMIT_REMEMBERED_SET,
4167                          check_needed);
4168    }
4169  }
4170}
4171
4172
4173void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4174  ASSERT(ToRegister(instr->object()).is(a1));
4175  ASSERT(ToRegister(instr->value()).is(a0));
4176
4177  // Name is always in a2.
4178  __ li(a2, Operand(instr->name()));
4179  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4180      ? isolate()->builtins()->StoreIC_Initialize_Strict()
4181      : isolate()->builtins()->StoreIC_Initialize();
4182  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4183}
4184
4185
4186void LCodeGen::ApplyCheckIf(Condition condition,
4187                            LBoundsCheck* check,
4188                            Register src1,
4189                            const Operand& src2) {
4190  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4191    Label done;
4192    __ Branch(&done, NegateCondition(condition), src1, src2);
4193    __ stop("eliminated bounds check failed");
4194    __ bind(&done);
4195  } else {
4196    DeoptimizeIf(condition, check->environment(), src1, src2);
4197  }
4198}
4199
4200
4201void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4202  if (instr->hydrogen()->skip_check()) return;
4203
4204  Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4205  if (instr->index()->IsConstantOperand()) {
4206    int constant_index =
4207        ToInteger32(LConstantOperand::cast(instr->index()));
4208    if (instr->hydrogen()->length()->representation().IsSmi()) {
4209      __ li(at, Operand(Smi::FromInt(constant_index)));
4210    } else {
4211      __ li(at, Operand(constant_index));
4212    }
4213    ApplyCheckIf(condition,
4214                 instr,
4215                 at,
4216                 Operand(ToRegister(instr->length())));
4217  } else {
4218    ApplyCheckIf(condition,
4219                 instr,
4220                 ToRegister(instr->index()),
4221                 Operand(ToRegister(instr->length())));
4222  }
4223}
4224
4225
4226void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4227  Register external_pointer = ToRegister(instr->elements());
4228  Register key = no_reg;
4229  ElementsKind elements_kind = instr->elements_kind();
4230  bool key_is_constant = instr->key()->IsConstantOperand();
4231  int constant_key = 0;
4232  if (key_is_constant) {
4233    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4234    if (constant_key & 0xF0000000) {
4235      Abort(kArrayIndexConstantValueTooBig);
4236    }
4237  } else {
4238    key = ToRegister(instr->key());
4239  }
4240  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4241  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4242      ? (element_size_shift - kSmiTagSize) : element_size_shift;
4243  int additional_offset = instr->additional_index() << element_size_shift;
4244
4245  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
4246      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4247    FPURegister value(ToDoubleRegister(instr->value()));
4248    if (key_is_constant) {
4249      __ Addu(scratch0(), external_pointer, constant_key <<
4250          element_size_shift);
4251    } else {
4252      __ sll(scratch0(), key, shift_size);
4253      __ Addu(scratch0(), scratch0(), external_pointer);
4254    }
4255
4256    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4257      __ cvt_s_d(double_scratch0(), value);
4258      __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
4259    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
4260      __ sdc1(value, MemOperand(scratch0(), additional_offset));
4261    }
4262  } else {
4263    Register value(ToRegister(instr->value()));
4264    MemOperand mem_operand = PrepareKeyedOperand(
4265        key, external_pointer, key_is_constant, constant_key,
4266        element_size_shift, shift_size,
4267        instr->additional_index(), additional_offset);
4268    switch (elements_kind) {
4269      case EXTERNAL_PIXEL_ELEMENTS:
4270      case EXTERNAL_BYTE_ELEMENTS:
4271      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4272        __ sb(value, mem_operand);
4273        break;
4274      case EXTERNAL_SHORT_ELEMENTS:
4275      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4276        __ sh(value, mem_operand);
4277        break;
4278      case EXTERNAL_INT_ELEMENTS:
4279      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
4280        __ sw(value, mem_operand);
4281        break;
4282      case EXTERNAL_FLOAT_ELEMENTS:
4283      case EXTERNAL_DOUBLE_ELEMENTS:
4284      case FAST_DOUBLE_ELEMENTS:
4285      case FAST_ELEMENTS:
4286      case FAST_SMI_ELEMENTS:
4287      case FAST_HOLEY_DOUBLE_ELEMENTS:
4288      case FAST_HOLEY_ELEMENTS:
4289      case FAST_HOLEY_SMI_ELEMENTS:
4290      case DICTIONARY_ELEMENTS:
4291      case NON_STRICT_ARGUMENTS_ELEMENTS:
4292        UNREACHABLE();
4293        break;
4294    }
4295  }
4296}
4297
4298
4299void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4300  DoubleRegister value = ToDoubleRegister(instr->value());
4301  Register elements = ToRegister(instr->elements());
4302  Register key = no_reg;
4303  Register scratch = scratch0();
4304  bool key_is_constant = instr->key()->IsConstantOperand();
4305  int constant_key = 0;
4306  Label not_nan;
4307
4308  // Calculate the effective address of the slot in the array to store the
4309  // double value.
4310  if (key_is_constant) {
4311    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4312    if (constant_key & 0xF0000000) {
4313      Abort(kArrayIndexConstantValueTooBig);
4314    }
4315  } else {
4316    key = ToRegister(instr->key());
4317  }
4318  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4319  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4320      ? (element_size_shift - kSmiTagSize) : element_size_shift;
4321  if (key_is_constant) {
4322    __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
4323            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4324  } else {
4325    __ sll(scratch, key, shift_size);
4326    __ Addu(scratch, elements, Operand(scratch));
4327    __ Addu(scratch, scratch,
4328            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4329  }
4330
4331  if (instr->NeedsCanonicalization()) {
4332    Label is_nan;
4333    // Check for NaN. All NaNs must be canonicalized.
4334    __ BranchF(NULL, &is_nan, eq, value, value);
4335    __ Branch(&not_nan);
4336
4337    // Only load canonical NaN if the comparison above set the overflow.
4338    __ bind(&is_nan);
4339    __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4340  }
4341
4342  __ bind(&not_nan);
4343  __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
4344      element_size_shift));
4345}
4346
4347
4348void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4349  Register value = ToRegister(instr->value());
4350  Register elements = ToRegister(instr->elements());
4351  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4352      : no_reg;
4353  Register scratch = scratch0();
4354  Register store_base = scratch;
4355  int offset = 0;
4356
4357  // Do the store.
4358  if (instr->key()->IsConstantOperand()) {
4359    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4360    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4361    offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4362                                           instr->additional_index());
4363    store_base = elements;
4364  } else {
4365    // Even though the HLoadKeyed instruction forces the input
4366    // representation for the key to be an integer, the input gets replaced
4367    // during bound check elimination with the index argument to the bounds
4368    // check, which can be tagged, so that case must be handled here, too.
4369    if (instr->hydrogen()->key()->representation().IsSmi()) {
4370      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4371      __ addu(scratch, elements, scratch);
4372    } else {
4373      __ sll(scratch, key, kPointerSizeLog2);
4374      __ addu(scratch, elements, scratch);
4375    }
4376    offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4377  }
4378  __ sw(value, FieldMemOperand(store_base, offset));
4379
4380  if (instr->hydrogen()->NeedsWriteBarrier()) {
4381    SmiCheck check_needed =
4382        instr->hydrogen()->value()->IsHeapObject()
4383            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4384    // Compute address of modified element and store it into key register.
4385    __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
4386    __ RecordWrite(elements,
4387                   key,
4388                   value,
4389                   GetRAState(),
4390                   kSaveFPRegs,
4391                   EMIT_REMEMBERED_SET,
4392                   check_needed);
4393  }
4394}
4395
4396
4397void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4398  // By cases: external, fast double
4399  if (instr->is_external()) {
4400    DoStoreKeyedExternalArray(instr);
4401  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4402    DoStoreKeyedFixedDoubleArray(instr);
4403  } else {
4404    DoStoreKeyedFixedArray(instr);
4405  }
4406}
4407
4408
4409void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4410  ASSERT(ToRegister(instr->object()).is(a2));
4411  ASSERT(ToRegister(instr->key()).is(a1));
4412  ASSERT(ToRegister(instr->value()).is(a0));
4413
4414  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4415      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4416      : isolate()->builtins()->KeyedStoreIC_Initialize();
4417  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4418}
4419
4420
4421void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4422  Register object_reg = ToRegister(instr->object());
4423  Register scratch = scratch0();
4424
4425  Handle<Map> from_map = instr->original_map();
4426  Handle<Map> to_map = instr->transitioned_map();
4427  ElementsKind from_kind = instr->from_kind();
4428  ElementsKind to_kind = instr->to_kind();
4429
4430  Label not_applicable;
4431  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4432  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4433
4434  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4435    Register new_map_reg = ToRegister(instr->new_map_temp());
4436    __ li(new_map_reg, Operand(to_map));
4437    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4438    // Write barrier.
4439    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4440                        scratch, GetRAState(), kDontSaveFPRegs);
4441  } else {
4442    PushSafepointRegistersScope scope(
4443        this, Safepoint::kWithRegistersAndDoubles);
4444    __ mov(a0, object_reg);
4445    __ li(a1, Operand(to_map));
4446    TransitionElementsKindStub stub(from_kind, to_kind);
4447    __ CallStub(&stub);
4448    RecordSafepointWithRegistersAndDoubles(
4449        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4450  }
4451  __ bind(&not_applicable);
4452}
4453
4454
4455void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4456  Register object = ToRegister(instr->object());
4457  Register temp = ToRegister(instr->temp());
4458  Label fail;
4459  __ TestJSArrayForAllocationMemento(object, temp, ne, &fail);
4460  DeoptimizeIf(al, instr->environment());
4461  __ bind(&fail);
4462}
4463
4464
4465void LCodeGen::DoStringAdd(LStringAdd* instr) {
4466  __ push(ToRegister(instr->left()));
4467  __ push(ToRegister(instr->right()));
4468  StringAddStub stub(instr->hydrogen()->flags());
4469  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4470}
4471
4472
4473void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4474  class DeferredStringCharCodeAt: public LDeferredCode {
4475   public:
4476    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4477        : LDeferredCode(codegen), instr_(instr) { }
4478    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
4479    virtual LInstruction* instr() { return instr_; }
4480   private:
4481    LStringCharCodeAt* instr_;
4482  };
4483
4484  DeferredStringCharCodeAt* deferred =
4485      new(zone()) DeferredStringCharCodeAt(this, instr);
4486  StringCharLoadGenerator::Generate(masm(),
4487                                    ToRegister(instr->string()),
4488                                    ToRegister(instr->index()),
4489                                    ToRegister(instr->result()),
4490                                    deferred->entry());
4491  __ bind(deferred->exit());
4492}
4493
4494
4495void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4496  Register string = ToRegister(instr->string());
4497  Register result = ToRegister(instr->result());
4498  Register scratch = scratch0();
4499
4500  // TODO(3095996): Get rid of this. For now, we need to make the
4501  // result register contain a valid pointer because it is already
4502  // contained in the register pointer map.
4503  __ mov(result, zero_reg);
4504
4505  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4506  __ push(string);
4507  // Push the index as a smi. This is safe because of the checks in
4508  // DoStringCharCodeAt above.
4509  if (instr->index()->IsConstantOperand()) {
4510    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4511    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4512    __ push(scratch);
4513  } else {
4514    Register index = ToRegister(instr->index());
4515    __ SmiTag(index);
4516    __ push(index);
4517  }
4518  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4519  __ AssertSmi(v0);
4520  __ SmiUntag(v0);
4521  __ StoreToSafepointRegisterSlot(v0, result);
4522}
4523
4524
4525void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4526  class DeferredStringCharFromCode: public LDeferredCode {
4527   public:
4528    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4529        : LDeferredCode(codegen), instr_(instr) { }
4530    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4531    virtual LInstruction* instr() { return instr_; }
4532   private:
4533    LStringCharFromCode* instr_;
4534  };
4535
4536  DeferredStringCharFromCode* deferred =
4537      new(zone()) DeferredStringCharFromCode(this, instr);
4538
4539  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4540  Register char_code = ToRegister(instr->char_code());
4541  Register result = ToRegister(instr->result());
4542  Register scratch = scratch0();
4543  ASSERT(!char_code.is(result));
4544
4545  __ Branch(deferred->entry(), hi,
4546            char_code, Operand(String::kMaxOneByteCharCode));
4547  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4548  __ sll(scratch, char_code, kPointerSizeLog2);
4549  __ Addu(result, result, scratch);
4550  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4551  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4552  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4553  __ bind(deferred->exit());
4554}
4555
4556
4557void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4558  Register char_code = ToRegister(instr->char_code());
4559  Register result = ToRegister(instr->result());
4560
4561  // TODO(3095996): Get rid of this. For now, we need to make the
4562  // result register contain a valid pointer because it is already
4563  // contained in the register pointer map.
4564  __ mov(result, zero_reg);
4565
4566  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4567  __ SmiTag(char_code);
4568  __ push(char_code);
4569  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4570  __ StoreToSafepointRegisterSlot(v0, result);
4571}
4572
4573
4574void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4575  LOperand* input = instr->value();
4576  ASSERT(input->IsRegister() || input->IsStackSlot());
4577  LOperand* output = instr->result();
4578  ASSERT(output->IsDoubleRegister());
4579  FPURegister single_scratch = double_scratch0().low();
4580  if (input->IsStackSlot()) {
4581    Register scratch = scratch0();
4582    __ lw(scratch, ToMemOperand(input));
4583    __ mtc1(scratch, single_scratch);
4584  } else {
4585    __ mtc1(ToRegister(input), single_scratch);
4586  }
4587  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4588}
4589
4590
4591void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4592  LOperand* input = instr->value();
4593  ASSERT(input->IsRegister());
4594  LOperand* output = instr->result();
4595  ASSERT(output->IsRegister());
4596  Register scratch = scratch0();
4597
4598  __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
4599  if (!instr->hydrogen()->value()->HasRange() ||
4600      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4601    DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
4602  }
4603}
4604
4605
4606void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4607  LOperand* input = instr->value();
4608  LOperand* output = instr->result();
4609
4610  FPURegister dbl_scratch = double_scratch0();
4611  __ mtc1(ToRegister(input), dbl_scratch);
4612  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4613}
4614
4615
4616void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4617  class DeferredNumberTagI: public LDeferredCode {
4618   public:
4619    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4620        : LDeferredCode(codegen), instr_(instr) { }
4621    virtual void Generate() {
4622      codegen()->DoDeferredNumberTagI(instr_,
4623                                      instr_->value(),
4624                                      SIGNED_INT32);
4625    }
4626    virtual LInstruction* instr() { return instr_; }
4627   private:
4628    LNumberTagI* instr_;
4629  };
4630
4631  Register src = ToRegister(instr->value());
4632  Register dst = ToRegister(instr->result());
4633  Register overflow = scratch0();
4634
4635  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4636  __ SmiTagCheckOverflow(dst, src, overflow);
4637  __ BranchOnOverflow(deferred->entry(), overflow);
4638  __ bind(deferred->exit());
4639}
4640
4641
4642void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4643  class DeferredNumberTagU: public LDeferredCode {
4644   public:
4645    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4646        : LDeferredCode(codegen), instr_(instr) { }
4647    virtual void Generate() {
4648      codegen()->DoDeferredNumberTagI(instr_,
4649                                      instr_->value(),
4650                                      UNSIGNED_INT32);
4651    }
4652    virtual LInstruction* instr() { return instr_; }
4653   private:
4654    LNumberTagU* instr_;
4655  };
4656
4657  LOperand* input = instr->value();
4658  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4659  Register reg = ToRegister(input);
4660
4661  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4662  __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
4663  __ SmiTag(reg, reg);
4664  __ bind(deferred->exit());
4665}
4666
4667
4668void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4669                                    LOperand* value,
4670                                    IntegerSignedness signedness) {
4671  Label slow;
4672  Register src = ToRegister(value);
4673  Register dst = ToRegister(instr->result());
4674  DoubleRegister dbl_scratch = double_scratch0();
4675
4676  // Preserve the value of all registers.
4677  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4678
4679  Label done;
4680  if (signedness == SIGNED_INT32) {
4681    // There was overflow, so bits 30 and 31 of the original integer
4682    // disagree. Try to allocate a heap number in new space and store
4683    // the value in there. If that fails, call the runtime system.
4684    if (dst.is(src)) {
4685      __ SmiUntag(src, dst);
4686      __ Xor(src, src, Operand(0x80000000));
4687    }
4688    __ mtc1(src, dbl_scratch);
4689    __ cvt_d_w(dbl_scratch, dbl_scratch);
4690  } else {
4691    __ mtc1(src, dbl_scratch);
4692    __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4693  }
4694
4695  if (FLAG_inline_new) {
4696    __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4697    __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
4698    __ Move(dst, t1);
4699    __ Branch(&done);
4700  }
4701
4702  // Slow case: Call the runtime system to do the number allocation.
4703  __ bind(&slow);
4704
4705  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4706  // register is stored, as this register is in the pointer map, but contains an
4707  // integer value.
4708  __ StoreToSafepointRegisterSlot(zero_reg, dst);
4709  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4710  __ Move(dst, v0);
4711  __ Subu(dst, dst, kHeapObjectTag);
4712
4713  // Done. Put the value in dbl_scratch into the value of the allocated heap
4714  // number.
4715  __ bind(&done);
4716  __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4717  __ Addu(dst, dst, kHeapObjectTag);
4718  __ StoreToSafepointRegisterSlot(dst, dst);
4719}
4720
4721
4722void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4723  class DeferredNumberTagD: public LDeferredCode {
4724   public:
4725    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4726        : LDeferredCode(codegen), instr_(instr) { }
4727    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4728    virtual LInstruction* instr() { return instr_; }
4729   private:
4730    LNumberTagD* instr_;
4731  };
4732
4733  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4734  Register scratch = scratch0();
4735  Register reg = ToRegister(instr->result());
4736  Register temp1 = ToRegister(instr->temp());
4737  Register temp2 = ToRegister(instr->temp2());
4738
4739  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4740  if (FLAG_inline_new) {
4741    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4742    // We want the untagged address first for performance
4743    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4744                          DONT_TAG_RESULT);
4745  } else {
4746    __ Branch(deferred->entry());
4747  }
4748  __ bind(deferred->exit());
4749  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4750  // Now that we have finished with the object's real address tag it
4751  __ Addu(reg, reg, kHeapObjectTag);
4752}
4753
4754
4755void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4756  // TODO(3095996): Get rid of this. For now, we need to make the
4757  // result register contain a valid pointer because it is already
4758  // contained in the register pointer map.
4759  Register reg = ToRegister(instr->result());
4760  __ mov(reg, zero_reg);
4761
4762  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4763  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4764  __ Subu(v0, v0, kHeapObjectTag);
4765  __ StoreToSafepointRegisterSlot(v0, reg);
4766}
4767
4768
4769void LCodeGen::DoSmiTag(LSmiTag* instr) {
4770  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4771  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4772}
4773
4774
4775void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4776  Register scratch = scratch0();
4777  Register input = ToRegister(instr->value());
4778  Register result = ToRegister(instr->result());
4779  if (instr->needs_check()) {
4780    STATIC_ASSERT(kHeapObjectTag == 1);
4781    // If the input is a HeapObject, value of scratch won't be zero.
4782    __ And(scratch, input, Operand(kHeapObjectTag));
4783    __ SmiUntag(result, input);
4784    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4785  } else {
4786    __ SmiUntag(result, input);
4787  }
4788}
4789
4790
4791void LCodeGen::EmitNumberUntagD(Register input_reg,
4792                                DoubleRegister result_reg,
4793                                bool can_convert_undefined_to_nan,
4794                                bool deoptimize_on_minus_zero,
4795                                LEnvironment* env,
4796                                NumberUntagDMode mode) {
4797  Register scratch = scratch0();
4798
4799  Label load_smi, heap_number, done;
4800
4801  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4802    // Smi check.
4803    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4804
4805    // Heap number map check.
4806    __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4807    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4808    if (!can_convert_undefined_to_nan) {
4809      DeoptimizeIf(ne, env, scratch, Operand(at));
4810    } else {
4811      Label heap_number, convert;
4812      __ Branch(&heap_number, eq, scratch, Operand(at));
4813
4814      // Convert undefined (and hole) to NaN.
4815      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4816      DeoptimizeIf(ne, env, input_reg, Operand(at));
4817
4818      __ bind(&convert);
4819      __ LoadRoot(at, Heap::kNanValueRootIndex);
4820      __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
4821      __ Branch(&done);
4822
4823      __ bind(&heap_number);
4824    }
4825    // Heap number to double register conversion.
4826    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4827    if (deoptimize_on_minus_zero) {
4828      __ mfc1(at, result_reg.low());
4829      __ Branch(&done, ne, at, Operand(zero_reg));
4830      __ mfc1(scratch, result_reg.high());
4831      DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4832    }
4833    __ Branch(&done);
4834  } else {
4835    __ SmiUntag(scratch, input_reg);
4836    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4837  }
4838
4839  // Smi to double register conversion
4840  __ bind(&load_smi);
4841  // scratch: untagged value of input_reg
4842  __ mtc1(scratch, result_reg);
4843  __ cvt_d_w(result_reg, result_reg);
4844  __ bind(&done);
4845}
4846
4847
4848void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4849  Register input_reg = ToRegister(instr->value());
4850  Register scratch1 = scratch0();
4851  Register scratch2 = ToRegister(instr->temp());
4852  DoubleRegister double_scratch = double_scratch0();
4853  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
4854
4855  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4856  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4857
4858  Label done;
4859
4860  // The input is a tagged HeapObject.
4861  // Heap number map check.
4862  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4863  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4864  // This 'at' value and scratch1 map value are used for tests in both clauses
4865  // of the if.
4866
4867  if (instr->truncating()) {
4868    Register scratch3 = ToRegister(instr->temp2());
4869    FPURegister single_scratch = double_scratch.low();
4870    ASSERT(!scratch3.is(input_reg) &&
4871           !scratch3.is(scratch1) &&
4872           !scratch3.is(scratch2));
4873    // Performs a truncating conversion of a floating point number as used by
4874    // the JS bitwise operations.
4875    Label heap_number;
4876    __ Branch(&heap_number, eq, scratch1, Operand(at));  // HeapNumber map?
4877    // Check for undefined. Undefined is converted to zero for truncating
4878    // conversions.
4879    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4880    DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
4881    ASSERT(ToRegister(instr->result()).is(input_reg));
4882    __ mov(input_reg, zero_reg);
4883    __ Branch(&done);
4884
4885    __ bind(&heap_number);
4886    __ ldc1(double_scratch2,
4887            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4888    __ EmitECMATruncate(input_reg,
4889                        double_scratch2,
4890                        single_scratch,
4891                        scratch1,
4892                        scratch2,
4893                        scratch3);
4894  } else {
4895    // Deoptimize if we don't have a heap number.
4896    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4897
4898    // Load the double value.
4899    __ ldc1(double_scratch,
4900            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4901
4902    Register except_flag = scratch2;
4903    __ EmitFPUTruncate(kRoundToZero,
4904                       input_reg,
4905                       double_scratch,
4906                       scratch1,
4907                       double_scratch2,
4908                       except_flag,
4909                       kCheckForInexactConversion);
4910
4911    // Deopt if the operation did not succeed.
4912    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4913
4914    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4915      __ Branch(&done, ne, input_reg, Operand(zero_reg));
4916
4917      __ mfc1(scratch1, double_scratch.high());
4918      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4919      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4920    }
4921  }
4922  __ bind(&done);
4923}
4924
4925
4926void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4927  class DeferredTaggedToI: public LDeferredCode {
4928   public:
4929    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4930        : LDeferredCode(codegen), instr_(instr) { }
4931    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4932    virtual LInstruction* instr() { return instr_; }
4933   private:
4934    LTaggedToI* instr_;
4935  };
4936
4937  LOperand* input = instr->value();
4938  ASSERT(input->IsRegister());
4939  ASSERT(input->Equals(instr->result()));
4940
4941  Register input_reg = ToRegister(input);
4942
4943  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4944
4945  // Let the deferred code handle the HeapObject case.
4946  __ JumpIfNotSmi(input_reg, deferred->entry());
4947
4948  // Smi to int32 conversion.
4949  __ SmiUntag(input_reg);
4950  __ bind(deferred->exit());
4951}
4952
4953
4954void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4955  LOperand* input = instr->value();
4956  ASSERT(input->IsRegister());
4957  LOperand* result = instr->result();
4958  ASSERT(result->IsDoubleRegister());
4959
4960  Register input_reg = ToRegister(input);
4961  DoubleRegister result_reg = ToDoubleRegister(result);
4962
4963  HValue* value = instr->hydrogen()->value();
4964  NumberUntagDMode mode = value->representation().IsSmi()
4965      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4966
4967  EmitNumberUntagD(input_reg, result_reg,
4968                   instr->hydrogen()->can_convert_undefined_to_nan(),
4969                   instr->hydrogen()->deoptimize_on_minus_zero(),
4970                   instr->environment(),
4971                   mode);
4972}
4973
4974
4975void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4976  Register result_reg = ToRegister(instr->result());
4977  Register scratch1 = scratch0();
4978  Register scratch2 = ToRegister(instr->temp());
4979  DoubleRegister double_input = ToDoubleRegister(instr->value());
4980
4981  if (instr->truncating()) {
4982    Register scratch3 = ToRegister(instr->temp2());
4983    FPURegister single_scratch = double_scratch0().low();
4984    __ EmitECMATruncate(result_reg,
4985                        double_input,
4986                        single_scratch,
4987                        scratch1,
4988                        scratch2,
4989                        scratch3);
4990  } else {
4991    Register except_flag = scratch2;
4992
4993    __ EmitFPUTruncate(kRoundToMinusInf,
4994                       result_reg,
4995                       double_input,
4996                       scratch1,
4997                       double_scratch0(),
4998                       except_flag,
4999                       kCheckForInexactConversion);
5000
5001    // Deopt if the operation did not succeed (except_flag != 0).
5002    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
5003
5004    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5005      Label done;
5006      __ Branch(&done, ne, result_reg, Operand(zero_reg));
5007      __ mfc1(scratch1, double_input.high());
5008      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5009      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5010      __ bind(&done);
5011    }
5012  }
5013}
5014
5015
5016void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5017  Register result_reg = ToRegister(instr->result());
5018  Register scratch1 = scratch0();
5019  Register scratch2 = ToRegister(instr->temp());
5020  DoubleRegister double_input = ToDoubleRegister(instr->value());
5021
5022  if (instr->truncating()) {
5023    Register scratch3 = ToRegister(instr->temp2());
5024    FPURegister single_scratch = double_scratch0().low();
5025    __ EmitECMATruncate(result_reg,
5026                        double_input,
5027                        single_scratch,
5028                        scratch1,
5029                        scratch2,
5030                        scratch3);
5031  } else {
5032    Register except_flag = scratch2;
5033
5034    __ EmitFPUTruncate(kRoundToMinusInf,
5035                       result_reg,
5036                       double_input,
5037                       scratch1,
5038                       double_scratch0(),
5039                       except_flag,
5040                       kCheckForInexactConversion);
5041
5042    // Deopt if the operation did not succeed (except_flag != 0).
5043    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
5044
5045    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5046      Label done;
5047      __ Branch(&done, ne, result_reg, Operand(zero_reg));
5048      __ mfc1(scratch1, double_input.high());
5049      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5050      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5051      __ bind(&done);
5052    }
5053  }
5054  __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5055  DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
5056}
5057
5058
5059void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5060  LOperand* input = instr->value();
5061  __ And(at, ToRegister(input), Operand(kSmiTagMask));
5062  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
5063}
5064
5065
5066void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5067  if (!instr->hydrogen()->value()->IsHeapObject()) {
5068    LOperand* input = instr->value();
5069    __ And(at, ToRegister(input), Operand(kSmiTagMask));
5070    DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5071  }
5072}
5073
5074
5075void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5076  Register input = ToRegister(instr->value());
5077  Register scratch = scratch0();
5078
5079  __ GetObjectType(input, scratch, scratch);
5080
5081  if (instr->hydrogen()->is_interval_check()) {
5082    InstanceType first;
5083    InstanceType last;
5084    instr->hydrogen()->GetCheckInterval(&first, &last);
5085
5086    // If there is only one type in the interval check for equality.
5087    if (first == last) {
5088      DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
5089    } else {
5090      DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
5091      // Omit check for the last type.
5092      if (last != LAST_TYPE) {
5093        DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
5094      }
5095    }
5096  } else {
5097    uint8_t mask;
5098    uint8_t tag;
5099    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5100
5101    if (IsPowerOf2(mask)) {
5102      ASSERT(tag == 0 || IsPowerOf2(tag));
5103      __ And(at, scratch, mask);
5104      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
5105          at, Operand(zero_reg));
5106    } else {
5107      __ And(scratch, scratch, Operand(mask));
5108      DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
5109    }
5110  }
5111}
5112
5113
5114void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
5115  Register reg = ToRegister(instr->value());
5116  Handle<JSFunction> target = instr->hydrogen()->target();
5117  AllowDeferredHandleDereference smi_check;
5118  if (isolate()->heap()->InNewSpace(*target)) {
5119    Register reg = ToRegister(instr->value());
5120    Handle<Cell> cell = isolate()->factory()->NewCell(target);
5121    __ li(at, Operand(Handle<Object>(cell)));
5122    __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5123    DeoptimizeIf(ne, instr->environment(), reg,
5124                 Operand(at));
5125  } else {
5126    DeoptimizeIf(ne, instr->environment(), reg,
5127                 Operand(target));
5128  }
5129}
5130
5131
5132void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5133  {
5134    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5135    __ push(object);
5136    CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
5137    __ StoreToSafepointRegisterSlot(v0, scratch0());
5138  }
5139  __ And(at, scratch0(), Operand(kSmiTagMask));
5140  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5141}
5142
5143
5144void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5145  class DeferredCheckMaps: public LDeferredCode {
5146   public:
5147    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5148        : LDeferredCode(codegen), instr_(instr), object_(object) {
5149      SetExit(check_maps());
5150    }
5151    virtual void Generate() {
5152      codegen()->DoDeferredInstanceMigration(instr_, object_);
5153    }
5154    Label* check_maps() { return &check_maps_; }
5155    virtual LInstruction* instr() { return instr_; }
5156   private:
5157    LCheckMaps* instr_;
5158    Label check_maps_;
5159    Register object_;
5160  };
5161
5162  if (instr->hydrogen()->CanOmitMapChecks()) return;
5163  Register map_reg = scratch0();
5164  LOperand* input = instr->value();
5165  ASSERT(input->IsRegister());
5166  Register reg = ToRegister(input);
5167  SmallMapList* map_set = instr->hydrogen()->map_set();
5168  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5169
5170  DeferredCheckMaps* deferred = NULL;
5171  if (instr->hydrogen()->has_migration_target()) {
5172    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5173    __ bind(deferred->check_maps());
5174  }
5175
5176  Label success;
5177  for (int i = 0; i < map_set->length() - 1; i++) {
5178    Handle<Map> map = map_set->at(i);
5179    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5180  }
5181  Handle<Map> map = map_set->last();
5182  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5183  if (instr->hydrogen()->has_migration_target()) {
5184    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5185  } else {
5186    DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
5187  }
5188
5189  __ bind(&success);
5190}
5191
5192
5193void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5194  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5195  Register result_reg = ToRegister(instr->result());
5196  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5197  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5198}
5199
5200
5201void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5202  Register unclamped_reg = ToRegister(instr->unclamped());
5203  Register result_reg = ToRegister(instr->result());
5204  __ ClampUint8(result_reg, unclamped_reg);
5205}
5206
5207
5208void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5209  Register scratch = scratch0();
5210  Register input_reg = ToRegister(instr->unclamped());
5211  Register result_reg = ToRegister(instr->result());
5212  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5213  Label is_smi, done, heap_number;
5214
5215  // Both smi and heap number cases are handled.
5216  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5217
5218  // Check for heap number
5219  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5220  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5221
5222  // Check for undefined. Undefined is converted to zero for clamping
5223  // conversions.
5224  DeoptimizeIf(ne, instr->environment(), input_reg,
5225               Operand(factory()->undefined_value()));
5226  __ mov(result_reg, zero_reg);
5227  __ jmp(&done);
5228
5229  // Heap number
5230  __ bind(&heap_number);
5231  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5232                                             HeapNumber::kValueOffset));
5233  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5234  __ jmp(&done);
5235
5236  __ bind(&is_smi);
5237  __ ClampUint8(result_reg, scratch);
5238
5239  __ bind(&done);
5240}
5241
5242
5243void LCodeGen::DoAllocate(LAllocate* instr) {
5244  class DeferredAllocate: public LDeferredCode {
5245   public:
5246    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5247        : LDeferredCode(codegen), instr_(instr) { }
5248    virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
5249    virtual LInstruction* instr() { return instr_; }
5250   private:
5251    LAllocate* instr_;
5252  };
5253
5254  DeferredAllocate* deferred =
5255      new(zone()) DeferredAllocate(this, instr);
5256
5257  Register result = ToRegister(instr->result());
5258  Register scratch = ToRegister(instr->temp1());
5259  Register scratch2 = ToRegister(instr->temp2());
5260
5261  // Allocate memory for the object.
5262  AllocationFlags flags = TAG_OBJECT;
5263  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5264    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5265  }
5266  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5267    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5268    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5269    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5270  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5271    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5272    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5273  }
5274  if (instr->size()->IsConstantOperand()) {
5275    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5276    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5277  } else {
5278    Register size = ToRegister(instr->size());
5279    __ Allocate(size,
5280                result,
5281                scratch,
5282                scratch2,
5283                deferred->entry(),
5284                flags);
5285  }
5286
5287  __ bind(deferred->exit());
5288
5289  if (instr->hydrogen()->MustPrefillWithFiller()) {
5290    if (instr->size()->IsConstantOperand()) {
5291      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5292      __ li(scratch, Operand(size));
5293    } else {
5294      scratch = ToRegister(instr->size());
5295    }
5296    __ Subu(scratch, scratch, Operand(kPointerSize));
5297    __ Subu(result, result, Operand(kHeapObjectTag));
5298    Label loop;
5299    __ bind(&loop);
5300    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5301    __ Addu(at, result, Operand(scratch));
5302    __ sw(scratch2, MemOperand(at));
5303    __ Subu(scratch, scratch, Operand(kPointerSize));
5304    __ Branch(&loop, ge, scratch, Operand(zero_reg));
5305    __ Addu(result, result, Operand(kHeapObjectTag));
5306  }
5307}
5308
5309
5310void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5311  Register result = ToRegister(instr->result());
5312
5313  // TODO(3095996): Get rid of this. For now, we need to make the
5314  // result register contain a valid pointer because it is already
5315  // contained in the register pointer map.
5316  __ mov(result, zero_reg);
5317
5318  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5319  if (instr->size()->IsRegister()) {
5320    Register size = ToRegister(instr->size());
5321    ASSERT(!size.is(result));
5322    __ SmiTag(size);
5323    __ push(size);
5324  } else {
5325    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5326    __ Push(Smi::FromInt(size));
5327  }
5328
5329  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5330    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5331    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5332    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
5333  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5334    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5335    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
5336  } else {
5337    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5338  }
5339  __ StoreToSafepointRegisterSlot(v0, result);
5340}
5341
5342
5343void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5344  ASSERT(ToRegister(instr->value()).is(a0));
5345  ASSERT(ToRegister(instr->result()).is(v0));
5346  __ push(a0);
5347  CallRuntime(Runtime::kToFastProperties, 1, instr);
5348}
5349
5350
5351void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5352  Label materialized;
5353  // Registers will be used as follows:
5354  // t3 = literals array.
5355  // a1 = regexp literal.
5356  // a0 = regexp literal clone.
5357  // a2 and t0-t2 are used as temporaries.
5358  int literal_offset =
5359      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5360  __ LoadHeapObject(t3, instr->hydrogen()->literals());
5361  __ lw(a1, FieldMemOperand(t3, literal_offset));
5362  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5363  __ Branch(&materialized, ne, a1, Operand(at));
5364
5365  // Create regexp literal using runtime function
5366  // Result will be in v0.
5367  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5368  __ li(t1, Operand(instr->hydrogen()->pattern()));
5369  __ li(t0, Operand(instr->hydrogen()->flags()));
5370  __ Push(t3, t2, t1, t0);
5371  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5372  __ mov(a1, v0);
5373
5374  __ bind(&materialized);
5375  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5376  Label allocated, runtime_allocate;
5377
5378  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5379  __ jmp(&allocated);
5380
5381  __ bind(&runtime_allocate);
5382  __ li(a0, Operand(Smi::FromInt(size)));
5383  __ Push(a1, a0);
5384  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5385  __ pop(a1);
5386
5387  __ bind(&allocated);
5388  // Copy the content into the newly allocated memory.
5389  // (Unroll copy loop once for better throughput).
5390  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5391    __ lw(a3, FieldMemOperand(a1, i));
5392    __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5393    __ sw(a3, FieldMemOperand(v0, i));
5394    __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5395  }
5396  if ((size % (2 * kPointerSize)) != 0) {
5397    __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5398    __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5399  }
5400}
5401
5402
5403void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5404  // Use the fast case closure allocation code that allocates in new
5405  // space for nested functions that don't need literals cloning.
5406  bool pretenure = instr->hydrogen()->pretenure();
5407  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5408    FastNewClosureStub stub(instr->hydrogen()->language_mode(),
5409                            instr->hydrogen()->is_generator());
5410    __ li(a1, Operand(instr->hydrogen()->shared_info()));
5411    __ push(a1);
5412    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5413  } else {
5414    __ li(a2, Operand(instr->hydrogen()->shared_info()));
5415    __ li(a1, Operand(pretenure ? factory()->true_value()
5416                                : factory()->false_value()));
5417    __ Push(cp, a2, a1);
5418    CallRuntime(Runtime::kNewClosure, 3, instr);
5419  }
5420}
5421
5422
5423void LCodeGen::DoTypeof(LTypeof* instr) {
5424  ASSERT(ToRegister(instr->result()).is(v0));
5425  Register input = ToRegister(instr->value());
5426  __ push(input);
5427  CallRuntime(Runtime::kTypeof, 1, instr);
5428}
5429
5430
5431void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5432  Register input = ToRegister(instr->value());
5433
5434  Register cmp1 = no_reg;
5435  Operand cmp2 = Operand(no_reg);
5436
5437  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5438                                                  instr->FalseLabel(chunk_),
5439                                                  input,
5440                                                  instr->type_literal(),
5441                                                  cmp1,
5442                                                  cmp2);
5443
5444  ASSERT(cmp1.is_valid());
5445  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
5446
5447  if (final_branch_condition != kNoCondition) {
5448    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5449  }
5450}
5451
5452
5453Condition LCodeGen::EmitTypeofIs(Label* true_label,
5454                                 Label* false_label,
5455                                 Register input,
5456                                 Handle<String> type_name,
5457                                 Register& cmp1,
5458                                 Operand& cmp2) {
5459  // This function utilizes the delay slot heavily. This is used to load
5460  // values that are always usable without depending on the type of the input
5461  // register.
5462  Condition final_branch_condition = kNoCondition;
5463  Register scratch = scratch0();
5464  if (type_name->Equals(heap()->number_string())) {
5465    __ JumpIfSmi(input, true_label);
5466    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5467    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5468    cmp1 = input;
5469    cmp2 = Operand(at);
5470    final_branch_condition = eq;
5471
5472  } else if (type_name->Equals(heap()->string_string())) {
5473    __ JumpIfSmi(input, false_label);
5474    __ GetObjectType(input, input, scratch);
5475    __ Branch(USE_DELAY_SLOT, false_label,
5476              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5477    // input is an object so we can load the BitFieldOffset even if we take the
5478    // other branch.
5479    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5480    __ And(at, at, 1 << Map::kIsUndetectable);
5481    cmp1 = at;
5482    cmp2 = Operand(zero_reg);
5483    final_branch_condition = eq;
5484
5485  } else if (type_name->Equals(heap()->symbol_string())) {
5486    __ JumpIfSmi(input, false_label);
5487    __ GetObjectType(input, input, scratch);
5488    cmp1 = scratch;
5489    cmp2 = Operand(SYMBOL_TYPE);
5490    final_branch_condition = eq;
5491
5492  } else if (type_name->Equals(heap()->boolean_string())) {
5493    __ LoadRoot(at, Heap::kTrueValueRootIndex);
5494    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5495    __ LoadRoot(at, Heap::kFalseValueRootIndex);
5496    cmp1 = at;
5497    cmp2 = Operand(input);
5498    final_branch_condition = eq;
5499
5500  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5501    __ LoadRoot(at, Heap::kNullValueRootIndex);
5502    cmp1 = at;
5503    cmp2 = Operand(input);
5504    final_branch_condition = eq;
5505
5506  } else if (type_name->Equals(heap()->undefined_string())) {
5507    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5508    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5509    // The first instruction of JumpIfSmi is an And - it is safe in the delay
5510    // slot.
5511    __ JumpIfSmi(input, false_label);
5512    // Check for undetectable objects => true.
5513    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5514    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5515    __ And(at, at, 1 << Map::kIsUndetectable);
5516    cmp1 = at;
5517    cmp2 = Operand(zero_reg);
5518    final_branch_condition = ne;
5519
5520  } else if (type_name->Equals(heap()->function_string())) {
5521    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5522    __ JumpIfSmi(input, false_label);
5523    __ GetObjectType(input, scratch, input);
5524    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5525    cmp1 = input;
5526    cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5527    final_branch_condition = eq;
5528
5529  } else if (type_name->Equals(heap()->object_string())) {
5530    __ JumpIfSmi(input, false_label);
5531    if (!FLAG_harmony_typeof) {
5532      __ LoadRoot(at, Heap::kNullValueRootIndex);
5533      __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5534    }
5535    Register map = input;
5536    __ GetObjectType(input, map, scratch);
5537    __ Branch(false_label,
5538              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5539    __ Branch(USE_DELAY_SLOT, false_label,
5540              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5541    // map is still valid, so the BitField can be loaded in delay slot.
5542    // Check for undetectable objects => false.
5543    __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
5544    __ And(at, at, 1 << Map::kIsUndetectable);
5545    cmp1 = at;
5546    cmp2 = Operand(zero_reg);
5547    final_branch_condition = eq;
5548
5549  } else {
5550    cmp1 = at;
5551    cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
5552    __ Branch(false_label);
5553  }
5554
5555  return final_branch_condition;
5556}
5557
5558
5559void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5560  Register temp1 = ToRegister(instr->temp());
5561
5562  EmitIsConstructCall(temp1, scratch0());
5563
5564  EmitBranch(instr, eq, temp1,
5565             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5566}
5567
5568
5569void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5570  ASSERT(!temp1.is(temp2));
5571  // Get the frame pointer for the calling frame.
5572  __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5573
5574  // Skip the arguments adaptor frame if it exists.
5575  Label check_frame_marker;
5576  __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5577  __ Branch(&check_frame_marker, ne, temp2,
5578            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5579  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5580
5581  // Check the marker in the calling frame.
5582  __ bind(&check_frame_marker);
5583  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5584}
5585
5586
5587void LCodeGen::EnsureSpaceForLazyDeopt() {
5588  if (info()->IsStub()) return;
5589  // Ensure that we have enough space after the previous lazy-bailout
5590  // instruction for patching the code here.
5591  int current_pc = masm()->pc_offset();
5592  int patch_size = Deoptimizer::patch_size();
5593  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5594    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5595    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5596    while (padding_size > 0) {
5597      __ nop();
5598      padding_size -= Assembler::kInstrSize;
5599    }
5600  }
5601}
5602
5603
5604void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5605  EnsureSpaceForLazyDeopt();
5606  last_lazy_deopt_pc_ = masm()->pc_offset();
5607  ASSERT(instr->HasEnvironment());
5608  LEnvironment* env = instr->environment();
5609  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5610  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5611}
5612
5613
5614void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5615  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5616  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5617  // needed return address), even though the implementation of LAZY and EAGER is
5618  // now identical. When LAZY is eventually completely folded into EAGER, remove
5619  // the special case below.
5620  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5621    type = Deoptimizer::LAZY;
5622  }
5623
5624  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5625  DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
5626}
5627
5628
5629void LCodeGen::DoDummyUse(LDummyUse* instr) {
5630  // Nothing to see here, move on!
5631}
5632
5633
5634void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5635  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5636  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5637  RecordSafepointWithLazyDeopt(
5638      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5639  ASSERT(instr->HasEnvironment());
5640  LEnvironment* env = instr->environment();
5641  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5642}
5643
5644
5645void LCodeGen::DoStackCheck(LStackCheck* instr) {
5646  class DeferredStackCheck: public LDeferredCode {
5647   public:
5648    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5649        : LDeferredCode(codegen), instr_(instr) { }
5650    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5651    virtual LInstruction* instr() { return instr_; }
5652   private:
5653    LStackCheck* instr_;
5654  };
5655
5656  ASSERT(instr->HasEnvironment());
5657  LEnvironment* env = instr->environment();
5658  // There is no LLazyBailout instruction for stack-checks. We have to
5659  // prepare for lazy deoptimization explicitly here.
5660  if (instr->hydrogen()->is_function_entry()) {
5661    // Perform stack overflow check.
5662    Label done;
5663    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5664    __ Branch(&done, hs, sp, Operand(at));
5665    StackCheckStub stub;
5666    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5667    EnsureSpaceForLazyDeopt();
5668    last_lazy_deopt_pc_ = masm()->pc_offset();
5669    __ bind(&done);
5670    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5671    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5672  } else {
5673    ASSERT(instr->hydrogen()->is_backwards_branch());
5674    // Perform stack overflow check if this goto needs it before jumping.
5675    DeferredStackCheck* deferred_stack_check =
5676        new(zone()) DeferredStackCheck(this, instr);
5677    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5678    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5679    EnsureSpaceForLazyDeopt();
5680    last_lazy_deopt_pc_ = masm()->pc_offset();
5681    __ bind(instr->done_label());
5682    deferred_stack_check->SetExit(instr->done_label());
5683    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5684    // Don't record a deoptimization index for the safepoint here.
5685    // This will be done explicitly when emitting call and the safepoint in
5686    // the deferred code.
5687  }
5688}
5689
5690
5691void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5692  // This is a pseudo-instruction that ensures that the environment here is
5693  // properly registered for deoptimization and records the assembler's PC
5694  // offset.
5695  LEnvironment* environment = instr->environment();
5696
5697  // If the environment were already registered, we would have no way of
5698  // backpatching it with the spill slot operands.
5699  ASSERT(!environment->HasBeenRegistered());
5700  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5701
5702  // Normally we record the first unknown OSR value as the entrypoint to the OSR
5703  // code, but if there were none, record the entrypoint here.
5704  if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
5705}
5706
5707
5708void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5709  Register result = ToRegister(instr->result());
5710  Register object = ToRegister(instr->object());
5711  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5712  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5713
5714  Register null_value = t1;
5715  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5716  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5717
5718  __ And(at, object, kSmiTagMask);
5719  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5720
5721  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5722  __ GetObjectType(object, a1, a1);
5723  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5724
5725  Label use_cache, call_runtime;
5726  ASSERT(object.is(a0));
5727  __ CheckEnumCache(null_value, &call_runtime);
5728
5729  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5730  __ Branch(&use_cache);
5731
5732  // Get the set of properties to enumerate.
5733  __ bind(&call_runtime);
5734  __ push(object);
5735  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5736
5737  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5738  ASSERT(result.is(v0));
5739  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5740  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5741  __ bind(&use_cache);
5742}
5743
5744
5745void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5746  Register map = ToRegister(instr->map());
5747  Register result = ToRegister(instr->result());
5748  Label load_cache, done;
5749  __ EnumLength(result, map);
5750  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5751  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5752  __ jmp(&done);
5753
5754  __ bind(&load_cache);
5755  __ LoadInstanceDescriptors(map, result);
5756  __ lw(result,
5757        FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5758  __ lw(result,
5759        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5760  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5761
5762  __ bind(&done);
5763}
5764
5765
5766void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5767  Register object = ToRegister(instr->value());
5768  Register map = ToRegister(instr->map());
5769  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5770  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5771}
5772
5773
5774void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5775  Register object = ToRegister(instr->object());
5776  Register index = ToRegister(instr->index());
5777  Register result = ToRegister(instr->result());
5778  Register scratch = scratch0();
5779
5780  Label out_of_object, done;
5781  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5782  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
5783
5784  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5785  __ Addu(scratch, object, scratch);
5786  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5787
5788  __ Branch(&done);
5789
5790  __ bind(&out_of_object);
5791  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5792  // Index is equal to negated out of object property index plus 1.
5793  __ Subu(scratch, result, scratch);
5794  __ lw(result, FieldMemOperand(scratch,
5795                                FixedArray::kHeaderSize - kPointerSize));
5796  __ bind(&done);
5797}
5798
5799
5800#undef __
5801
5802} }  // namespace v8::internal
5803