1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "mips/lithium-codegen-mips.h"
31#include "mips/lithium-gap-resolver-mips.h"
32#include "code-stubs.h"
33#include "stub-cache.h"
34
35namespace v8 {
36namespace internal {
37
38
39class SafepointGenerator : public CallWrapper {
40 public:
41  SafepointGenerator(LCodeGen* codegen,
42                     LPointerMap* pointers,
43                     Safepoint::DeoptMode mode)
44      : codegen_(codegen),
45        pointers_(pointers),
46        deopt_mode_(mode) { }
47  virtual ~SafepointGenerator() { }
48
49  virtual void BeforeCall(int call_size) const { }
50
51  virtual void AfterCall() const {
52    codegen_->RecordSafepoint(pointers_, deopt_mode_);
53  }
54
55 private:
56  LCodeGen* codegen_;
57  LPointerMap* pointers_;
58  Safepoint::DeoptMode deopt_mode_;
59};
60
61
62#define __ masm()->
63
64bool LCodeGen::GenerateCode() {
65  HPhase phase("Z_Code generation", chunk());
66  ASSERT(is_unused());
67  status_ = GENERATING;
68  CpuFeatures::Scope scope(FPU);
69
70  CodeStub::GenerateFPStubs();
71
72  // Open a frame scope to indicate that there is a frame on the stack.  The
73  // NONE indicates that the scope shouldn't actually generate code to set up
74  // the frame (that is done in GeneratePrologue).
75  FrameScope frame_scope(masm_, StackFrame::NONE);
76
77  return GeneratePrologue() &&
78      GenerateBody() &&
79      GenerateDeferredCode() &&
80      GenerateSafepointTable();
81}
82
83
84void LCodeGen::FinishCode(Handle<Code> code) {
85  ASSERT(is_done());
86  code->set_stack_slots(GetStackSlotCount());
87  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88  PopulateDeoptimizationData(code);
89}
90
91
92void LCodeGen::Abort(const char* format, ...) {
93  if (FLAG_trace_bailout) {
94    SmartArrayPointer<char> name(
95        info()->shared_info()->DebugName()->ToCString());
96    PrintF("Aborting LCodeGen in @\"%s\": ", *name);
97    va_list arguments;
98    va_start(arguments, format);
99    OS::VPrint(format, arguments);
100    va_end(arguments);
101    PrintF("\n");
102  }
103  status_ = ABORTED;
104}
105
106
107void LCodeGen::Comment(const char* format, ...) {
108  if (!FLAG_code_comments) return;
109  char buffer[4 * KB];
110  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
111  va_list arguments;
112  va_start(arguments, format);
113  builder.AddFormattedList(format, arguments);
114  va_end(arguments);
115
116  // Copy the string before recording it in the assembler to avoid
117  // issues when the stack allocated buffer goes out of scope.
118  size_t length = builder.position();
119  Vector<char> copy = Vector<char>::New(length + 1);
120  memcpy(copy.start(), builder.Finalize(), copy.length());
121  masm()->RecordComment(copy.start());
122}
123
124
125bool LCodeGen::GeneratePrologue() {
126  ASSERT(is_generating());
127
128#ifdef DEBUG
129  if (strlen(FLAG_stop_at) > 0 &&
130      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
131    __ stop("stop_at");
132  }
133#endif
134
135  // a1: Callee's JS function.
136  // cp: Callee's context.
137  // fp: Caller's frame pointer.
138  // lr: Caller's pc.
139
140  // Strict mode functions and builtins need to replace the receiver
141  // with undefined when called as functions (without an explicit
142  // receiver object). r5 is zero for method calls and non-zero for
143  // function calls.
144  if (!info_->is_classic_mode() || info_->is_native()) {
145    Label ok;
146    __ Branch(&ok, eq, t1, Operand(zero_reg));
147
148    int receiver_offset = scope()->num_parameters() * kPointerSize;
149    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
150    __ sw(a2, MemOperand(sp, receiver_offset));
151    __ bind(&ok);
152  }
153
154  __ Push(ra, fp, cp, a1);
155  __ Addu(fp, sp, Operand(2 * kPointerSize));  // Adj. FP to point to saved FP.
156
157  // Reserve space for the stack slots needed by the code.
158  int slots = GetStackSlotCount();
159  if (slots > 0) {
160    if (FLAG_debug_code) {
161      __ li(a0, Operand(slots));
162      __ li(a2, Operand(kSlotsZapValue));
163      Label loop;
164      __ bind(&loop);
165      __ push(a2);
166      __ Subu(a0, a0, 1);
167      __ Branch(&loop, ne, a0, Operand(zero_reg));
168    } else {
169      __ Subu(sp, sp, Operand(slots * kPointerSize));
170    }
171  }
172
173  // Possibly allocate a local context.
174  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175  if (heap_slots > 0) {
176    Comment(";;; Allocate local context");
177    // Argument to NewContext is the function, which is in a1.
178    __ push(a1);
179    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180      FastNewContextStub stub(heap_slots);
181      __ CallStub(&stub);
182    } else {
183      __ CallRuntime(Runtime::kNewFunctionContext, 1);
184    }
185    RecordSafepoint(Safepoint::kNoLazyDeopt);
186    // Context is returned in both v0 and cp.  It replaces the context
187    // passed to us.  It's saved in the stack and kept live in cp.
188    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
189    // Copy any necessary parameters into the context.
190    int num_parameters = scope()->num_parameters();
191    for (int i = 0; i < num_parameters; i++) {
192      Variable* var = scope()->parameter(i);
193      if (var->IsContextSlot()) {
194        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
195            (num_parameters - 1 - i) * kPointerSize;
196        // Load parameter from stack.
197        __ lw(a0, MemOperand(fp, parameter_offset));
198        // Store it in the context.
199        MemOperand target = ContextOperand(cp, var->index());
200        __ sw(a0, target);
201        // Update the write barrier. This clobbers a3 and a0.
202        __ RecordWriteContextSlot(
203            cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
204      }
205    }
206    Comment(";;; End allocate local context");
207  }
208
209  // Trace the call.
210  if (FLAG_trace) {
211    __ CallRuntime(Runtime::kTraceEnter, 0);
212  }
213  EnsureSpaceForLazyDeopt();
214  return !is_aborted();
215}
216
217
218bool LCodeGen::GenerateBody() {
219  ASSERT(is_generating());
220  bool emit_instructions = true;
221  for (current_instruction_ = 0;
222       !is_aborted() && current_instruction_ < instructions_->length();
223       current_instruction_++) {
224    LInstruction* instr = instructions_->at(current_instruction_);
225    if (instr->IsLabel()) {
226      LLabel* label = LLabel::cast(instr);
227      emit_instructions = !label->HasReplacement();
228    }
229
230    if (emit_instructions) {
231      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
232      instr->CompileToNative(this);
233    }
234  }
235  return !is_aborted();
236}
237
238
239bool LCodeGen::GenerateDeferredCode() {
240  ASSERT(is_generating());
241  if (deferred_.length() > 0) {
242    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
243      LDeferredCode* code = deferred_[i];
244      __ bind(code->entry());
245      Comment(";;; Deferred code @%d: %s.",
246              code->instruction_index(),
247              code->instr()->Mnemonic());
248      code->Generate();
249      __ jmp(code->exit());
250    }
251  }
252  // Deferred code is the last part of the instruction sequence. Mark
253  // the generated code as done unless we bailed out.
254  if (!is_aborted()) status_ = DONE;
255  return !is_aborted();
256}
257
258
259bool LCodeGen::GenerateDeoptJumpTable() {
260  // TODO(plind): not clear that this will have advantage for MIPS.
261  // Skipping it for now. Raised issue #100 for this.
262  Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
263  return false;
264}
265
266
267bool LCodeGen::GenerateSafepointTable() {
268  ASSERT(is_done());
269  safepoints_.Emit(masm(), GetStackSlotCount());
270  return !is_aborted();
271}
272
273
274Register LCodeGen::ToRegister(int index) const {
275  return Register::FromAllocationIndex(index);
276}
277
278
279DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
280  return DoubleRegister::FromAllocationIndex(index);
281}
282
283
284Register LCodeGen::ToRegister(LOperand* op) const {
285  ASSERT(op->IsRegister());
286  return ToRegister(op->index());
287}
288
289
290Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
291  if (op->IsRegister()) {
292    return ToRegister(op->index());
293  } else if (op->IsConstantOperand()) {
294    LConstantOperand* const_op = LConstantOperand::cast(op);
295    Handle<Object> literal = chunk_->LookupLiteral(const_op);
296    Representation r = chunk_->LookupLiteralRepresentation(const_op);
297    if (r.IsInteger32()) {
298      ASSERT(literal->IsNumber());
299      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
300    } else if (r.IsDouble()) {
301      Abort("EmitLoadRegister: Unsupported double immediate.");
302    } else {
303      ASSERT(r.IsTagged());
304      if (literal->IsSmi()) {
305        __ li(scratch, Operand(literal));
306      } else {
307       __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
308      }
309    }
310    return scratch;
311  } else if (op->IsStackSlot() || op->IsArgument()) {
312    __ lw(scratch, ToMemOperand(op));
313    return scratch;
314  }
315  UNREACHABLE();
316  return scratch;
317}
318
319
320DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
321  ASSERT(op->IsDoubleRegister());
322  return ToDoubleRegister(op->index());
323}
324
325
326DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
327                                                FloatRegister flt_scratch,
328                                                DoubleRegister dbl_scratch) {
329  if (op->IsDoubleRegister()) {
330    return ToDoubleRegister(op->index());
331  } else if (op->IsConstantOperand()) {
332    LConstantOperand* const_op = LConstantOperand::cast(op);
333    Handle<Object> literal = chunk_->LookupLiteral(const_op);
334    Representation r = chunk_->LookupLiteralRepresentation(const_op);
335    if (r.IsInteger32()) {
336      ASSERT(literal->IsNumber());
337      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
338      __ mtc1(at, flt_scratch);
339      __ cvt_d_w(dbl_scratch, flt_scratch);
340      return dbl_scratch;
341    } else if (r.IsDouble()) {
342      Abort("unsupported double immediate");
343    } else if (r.IsTagged()) {
344      Abort("unsupported tagged immediate");
345    }
346  } else if (op->IsStackSlot() || op->IsArgument()) {
347    MemOperand mem_op = ToMemOperand(op);
348    __ ldc1(dbl_scratch, mem_op);
349    return dbl_scratch;
350  }
351  UNREACHABLE();
352  return dbl_scratch;
353}
354
355
356Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
357  Handle<Object> literal = chunk_->LookupLiteral(op);
358  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
359  return literal;
360}
361
362
363bool LCodeGen::IsInteger32(LConstantOperand* op) const {
364  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
365}
366
367
368int LCodeGen::ToInteger32(LConstantOperand* op) const {
369  Handle<Object> value = chunk_->LookupLiteral(op);
370  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
371  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
372      value->Number());
373  return static_cast<int32_t>(value->Number());
374}
375
376
377double LCodeGen::ToDouble(LConstantOperand* op) const {
378  Handle<Object> value = chunk_->LookupLiteral(op);
379  return value->Number();
380}
381
382
383Operand LCodeGen::ToOperand(LOperand* op) {
384  if (op->IsConstantOperand()) {
385    LConstantOperand* const_op = LConstantOperand::cast(op);
386    Handle<Object> literal = chunk_->LookupLiteral(const_op);
387    Representation r = chunk_->LookupLiteralRepresentation(const_op);
388    if (r.IsInteger32()) {
389      ASSERT(literal->IsNumber());
390      return Operand(static_cast<int32_t>(literal->Number()));
391    } else if (r.IsDouble()) {
392      Abort("ToOperand Unsupported double immediate.");
393    }
394    ASSERT(r.IsTagged());
395    return Operand(literal);
396  } else if (op->IsRegister()) {
397    return Operand(ToRegister(op));
398  } else if (op->IsDoubleRegister()) {
399    Abort("ToOperand IsDoubleRegister unimplemented");
400    return Operand(0);
401  }
402  // Stack slots not implemented, use ToMemOperand instead.
403  UNREACHABLE();
404  return Operand(0);
405}
406
407
408MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
409  ASSERT(!op->IsRegister());
410  ASSERT(!op->IsDoubleRegister());
411  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
412  int index = op->index();
413  if (index >= 0) {
414    // Local or spill slot. Skip the frame pointer, function, and
415    // context in the fixed part of the frame.
416    return MemOperand(fp, -(index + 3) * kPointerSize);
417  } else {
418    // Incoming parameter. Skip the return address.
419    return MemOperand(fp, -(index - 1) * kPointerSize);
420  }
421}
422
423
424MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
425  ASSERT(op->IsDoubleStackSlot());
426  int index = op->index();
427  if (index >= 0) {
428    // Local or spill slot. Skip the frame pointer, function, context,
429    // and the first word of the double in the fixed part of the frame.
430    return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
431  } else {
432    // Incoming parameter. Skip the return address and the first word of
433    // the double.
434    return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
435  }
436}
437
438
439void LCodeGen::WriteTranslation(LEnvironment* environment,
440                                Translation* translation) {
441  if (environment == NULL) return;
442
443  // The translation includes one command per value in the environment.
444  int translation_size = environment->values()->length();
445  // The output frame height does not include the parameters.
446  int height = translation_size - environment->parameter_count();
447
448  WriteTranslation(environment->outer(), translation);
449  int closure_id = DefineDeoptimizationLiteral(environment->closure());
450  switch (environment->frame_type()) {
451    case JS_FUNCTION:
452      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
453      break;
454    case JS_CONSTRUCT:
455      translation->BeginConstructStubFrame(closure_id, translation_size);
456      break;
457    case ARGUMENTS_ADAPTOR:
458      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
459      break;
460    default:
461      UNREACHABLE();
462  }
463  for (int i = 0; i < translation_size; ++i) {
464    LOperand* value = environment->values()->at(i);
465    // spilled_registers_ and spilled_double_registers_ are either
466    // both NULL or both set.
467    if (environment->spilled_registers() != NULL && value != NULL) {
468      if (value->IsRegister() &&
469          environment->spilled_registers()[value->index()] != NULL) {
470        translation->MarkDuplicate();
471        AddToTranslation(translation,
472                         environment->spilled_registers()[value->index()],
473                         environment->HasTaggedValueAt(i));
474      } else if (
475          value->IsDoubleRegister() &&
476          environment->spilled_double_registers()[value->index()] != NULL) {
477        translation->MarkDuplicate();
478        AddToTranslation(
479            translation,
480            environment->spilled_double_registers()[value->index()],
481            false);
482      }
483    }
484
485    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
486  }
487}
488
489
490void LCodeGen::AddToTranslation(Translation* translation,
491                                LOperand* op,
492                                bool is_tagged) {
493  if (op == NULL) {
494    // TODO(twuerthinger): Introduce marker operands to indicate that this value
495    // is not present and must be reconstructed from the deoptimizer. Currently
496    // this is only used for the arguments object.
497    translation->StoreArgumentsObject();
498  } else if (op->IsStackSlot()) {
499    if (is_tagged) {
500      translation->StoreStackSlot(op->index());
501    } else {
502      translation->StoreInt32StackSlot(op->index());
503    }
504  } else if (op->IsDoubleStackSlot()) {
505    translation->StoreDoubleStackSlot(op->index());
506  } else if (op->IsArgument()) {
507    ASSERT(is_tagged);
508    int src_index = GetStackSlotCount() + op->index();
509    translation->StoreStackSlot(src_index);
510  } else if (op->IsRegister()) {
511    Register reg = ToRegister(op);
512    if (is_tagged) {
513      translation->StoreRegister(reg);
514    } else {
515      translation->StoreInt32Register(reg);
516    }
517  } else if (op->IsDoubleRegister()) {
518    DoubleRegister reg = ToDoubleRegister(op);
519    translation->StoreDoubleRegister(reg);
520  } else if (op->IsConstantOperand()) {
521    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
522    int src_index = DefineDeoptimizationLiteral(literal);
523    translation->StoreLiteral(src_index);
524  } else {
525    UNREACHABLE();
526  }
527}
528
529
530void LCodeGen::CallCode(Handle<Code> code,
531                        RelocInfo::Mode mode,
532                        LInstruction* instr) {
533  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
534}
535
536
537void LCodeGen::CallCodeGeneric(Handle<Code> code,
538                               RelocInfo::Mode mode,
539                               LInstruction* instr,
540                               SafepointMode safepoint_mode) {
541  ASSERT(instr != NULL);
542  LPointerMap* pointers = instr->pointer_map();
543  RecordPosition(pointers->position());
544  __ Call(code, mode);
545  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
546}
547
548
549void LCodeGen::CallRuntime(const Runtime::Function* function,
550                           int num_arguments,
551                           LInstruction* instr) {
552  ASSERT(instr != NULL);
553  LPointerMap* pointers = instr->pointer_map();
554  ASSERT(pointers != NULL);
555  RecordPosition(pointers->position());
556
557  __ CallRuntime(function, num_arguments);
558  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
559}
560
561
562void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
563                                       int argc,
564                                       LInstruction* instr) {
565  __ CallRuntimeSaveDoubles(id);
566  RecordSafepointWithRegisters(
567      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
568}
569
570
571void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
572                                                    Safepoint::DeoptMode mode) {
573  if (!environment->HasBeenRegistered()) {
574    // Physical stack frame layout:
575    // -x ............. -4  0 ..................................... y
576    // [incoming arguments] [spill slots] [pushed outgoing arguments]
577
578    // Layout of the environment:
579    // 0 ..................................................... size-1
580    // [parameters] [locals] [expression stack including arguments]
581
582    // Layout of the translation:
583    // 0 ........................................................ size - 1 + 4
584    // [expression stack including arguments] [locals] [4 words] [parameters]
585    // |>------------  translation_size ------------<|
586
587    int frame_count = 0;
588    int jsframe_count = 0;
589    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
590      ++frame_count;
591      if (e->frame_type() == JS_FUNCTION) {
592        ++jsframe_count;
593      }
594    }
595    Translation translation(&translations_, frame_count, jsframe_count);
596    WriteTranslation(environment, &translation);
597    int deoptimization_index = deoptimizations_.length();
598    int pc_offset = masm()->pc_offset();
599    environment->Register(deoptimization_index,
600                          translation.index(),
601                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
602    deoptimizations_.Add(environment);
603  }
604}
605
606
607void LCodeGen::DeoptimizeIf(Condition cc,
608                            LEnvironment* environment,
609                            Register src1,
610                            const Operand& src2) {
611  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
612  ASSERT(environment->HasBeenRegistered());
613  int id = environment->deoptimization_index();
614  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
615  if (entry == NULL) {
616    Abort("bailout was not prepared");
617    return;
618  }
619
620  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS.
621
622  if (FLAG_deopt_every_n_times == 1 &&
623      info_->shared_info()->opt_count() == id) {
624    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
625    return;
626  }
627
628  if (FLAG_trap_on_deopt) {
629    Label skip;
630    if (cc != al) {
631      __ Branch(&skip, NegateCondition(cc), src1, src2);
632    }
633    __ stop("trap_on_deopt");
634    __ bind(&skip);
635  }
636
637  // TODO(plind): The Arm port is a little different here, due to their
638  // DeOpt jump table, which is not used for Mips yet.
639  __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
640}
641
642
643void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
644  int length = deoptimizations_.length();
645  if (length == 0) return;
646  Handle<DeoptimizationInputData> data =
647      factory()->NewDeoptimizationInputData(length, TENURED);
648
649  Handle<ByteArray> translations = translations_.CreateByteArray();
650  data->SetTranslationByteArray(*translations);
651  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
652
653  Handle<FixedArray> literals =
654      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
655  for (int i = 0; i < deoptimization_literals_.length(); i++) {
656    literals->set(i, *deoptimization_literals_[i]);
657  }
658  data->SetLiteralArray(*literals);
659
660  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
661  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
662
663  // Populate the deoptimization entries.
664  for (int i = 0; i < length; i++) {
665    LEnvironment* env = deoptimizations_[i];
666    data->SetAstId(i, Smi::FromInt(env->ast_id()));
667    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
668    data->SetArgumentsStackHeight(i,
669                                  Smi::FromInt(env->arguments_stack_height()));
670    data->SetPc(i, Smi::FromInt(env->pc_offset()));
671  }
672  code->set_deoptimization_data(*data);
673}
674
675
676int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
677  int result = deoptimization_literals_.length();
678  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
679    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
680  }
681  deoptimization_literals_.Add(literal);
682  return result;
683}
684
685
686void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
687  ASSERT(deoptimization_literals_.length() == 0);
688
689  const ZoneList<Handle<JSFunction> >* inlined_closures =
690      chunk()->inlined_closures();
691
692  for (int i = 0, length = inlined_closures->length();
693       i < length;
694       i++) {
695    DefineDeoptimizationLiteral(inlined_closures->at(i));
696  }
697
698  inlined_function_count_ = deoptimization_literals_.length();
699}
700
701
702void LCodeGen::RecordSafepointWithLazyDeopt(
703    LInstruction* instr, SafepointMode safepoint_mode) {
704  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
705    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
706  } else {
707    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
708    RecordSafepointWithRegisters(
709        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
710  }
711}
712
713
714void LCodeGen::RecordSafepoint(
715    LPointerMap* pointers,
716    Safepoint::Kind kind,
717    int arguments,
718    Safepoint::DeoptMode deopt_mode) {
719  ASSERT(expected_safepoint_kind_ == kind);
720
721  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
722  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
723      kind, arguments, deopt_mode);
724  for (int i = 0; i < operands->length(); i++) {
725    LOperand* pointer = operands->at(i);
726    if (pointer->IsStackSlot()) {
727      safepoint.DefinePointerSlot(pointer->index());
728    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
729      safepoint.DefinePointerRegister(ToRegister(pointer));
730    }
731  }
732  if (kind & Safepoint::kWithRegisters) {
733    // Register cp always contains a pointer to the context.
734    safepoint.DefinePointerRegister(cp);
735  }
736}
737
738
739void LCodeGen::RecordSafepoint(LPointerMap* pointers,
740                               Safepoint::DeoptMode deopt_mode) {
741  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
742}
743
744
745void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
746  LPointerMap empty_pointers(RelocInfo::kNoPosition);
747  RecordSafepoint(&empty_pointers, deopt_mode);
748}
749
750
751void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
752                                            int arguments,
753                                            Safepoint::DeoptMode deopt_mode) {
754  RecordSafepoint(
755      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
756}
757
758
759void LCodeGen::RecordSafepointWithRegistersAndDoubles(
760    LPointerMap* pointers,
761    int arguments,
762    Safepoint::DeoptMode deopt_mode) {
763  RecordSafepoint(
764      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
765}
766
767
768void LCodeGen::RecordPosition(int position) {
769  if (position == RelocInfo::kNoPosition) return;
770  masm()->positions_recorder()->RecordPosition(position);
771}
772
773
774void LCodeGen::DoLabel(LLabel* label) {
775  if (label->is_loop_header()) {
776    Comment(";;; B%d - LOOP entry", label->block_id());
777  } else {
778    Comment(";;; B%d", label->block_id());
779  }
780  __ bind(label->label());
781  current_block_ = label->block_id();
782  DoGap(label);
783}
784
785
786void LCodeGen::DoParallelMove(LParallelMove* move) {
787  resolver_.Resolve(move);
788}
789
790
791void LCodeGen::DoGap(LGap* gap) {
792  for (int i = LGap::FIRST_INNER_POSITION;
793       i <= LGap::LAST_INNER_POSITION;
794       i++) {
795    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
796    LParallelMove* move = gap->GetParallelMove(inner_pos);
797    if (move != NULL) DoParallelMove(move);
798  }
799}
800
801
802void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
803  DoGap(instr);
804}
805
806
807void LCodeGen::DoParameter(LParameter* instr) {
808  // Nothing to do.
809}
810
811
812void LCodeGen::DoCallStub(LCallStub* instr) {
813  ASSERT(ToRegister(instr->result()).is(v0));
814  switch (instr->hydrogen()->major_key()) {
815    case CodeStub::RegExpConstructResult: {
816      RegExpConstructResultStub stub;
817      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
818      break;
819    }
820    case CodeStub::RegExpExec: {
821      RegExpExecStub stub;
822      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
823      break;
824    }
825    case CodeStub::SubString: {
826      SubStringStub stub;
827      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
828      break;
829    }
830    case CodeStub::NumberToString: {
831      NumberToStringStub stub;
832      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
833      break;
834    }
835    case CodeStub::StringAdd: {
836      StringAddStub stub(NO_STRING_ADD_FLAGS);
837      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
838      break;
839    }
840    case CodeStub::StringCompare: {
841      StringCompareStub stub;
842      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
843      break;
844    }
845    case CodeStub::TranscendentalCache: {
846      __ lw(a0, MemOperand(sp, 0));
847      TranscendentalCacheStub stub(instr->transcendental_type(),
848                                   TranscendentalCacheStub::TAGGED);
849      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
850      break;
851    }
852    default:
853      UNREACHABLE();
854  }
855}
856
857
858void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
859  // Nothing to do.
860}
861
862
863void LCodeGen::DoModI(LModI* instr) {
864  Register scratch = scratch0();
865  const Register left = ToRegister(instr->InputAt(0));
866  const Register result = ToRegister(instr->result());
867
868  Label done;
869
870  if (instr->hydrogen()->HasPowerOf2Divisor()) {
871    Register scratch = scratch0();
872    ASSERT(!left.is(scratch));
873    __ mov(scratch, left);
874    int32_t p2constant = HConstant::cast(
875        instr->hydrogen()->right())->Integer32Value();
876    ASSERT(p2constant != 0);
877    // Result always takes the sign of the dividend (left).
878    p2constant = abs(p2constant);
879
880    Label positive_dividend;
881    __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
882    __ subu(result, zero_reg, left);
883    __ And(result, result, p2constant - 1);
884    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
885      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
886    }
887    __ Branch(USE_DELAY_SLOT, &done);
888    __ subu(result, zero_reg, result);
889    __ bind(&positive_dividend);
890    __ And(result, scratch, p2constant - 1);
891  } else {
892    // div runs in the background while we check for special cases.
893    Register right = EmitLoadRegister(instr->InputAt(1), scratch);
894    __ div(left, right);
895
896    // Check for x % 0.
897    if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
898      DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
899    }
900
901    __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
902    __ mfhi(result);
903
904    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
905      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
906    }
907  }
908  __ bind(&done);
909}
910
911
912void LCodeGen::DoDivI(LDivI* instr) {
913  const Register left = ToRegister(instr->InputAt(0));
914  const Register right = ToRegister(instr->InputAt(1));
915  const Register result = ToRegister(instr->result());
916
917  // On MIPS div is asynchronous - it will run in the background while we
918  // check for special cases.
919  __ div(left, right);
920
921  // Check for x / 0.
922  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
923    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
924  }
925
926  // Check for (0 / -x) that will produce negative zero.
927  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
928    Label left_not_zero;
929    __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
930    DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
931    __ bind(&left_not_zero);
932  }
933
934  // Check for (-kMinInt / -1).
935  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
936    Label left_not_min_int;
937    __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
938    DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
939    __ bind(&left_not_min_int);
940  }
941
942  __ mfhi(result);
943  DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
944  __ mflo(result);
945}
946
947
948void LCodeGen::DoMulI(LMulI* instr) {
949  Register scratch = scratch0();
950  Register result = ToRegister(instr->result());
951  // Note that result may alias left.
952  Register left = ToRegister(instr->InputAt(0));
953  LOperand* right_op = instr->InputAt(1);
954
955  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
956  bool bailout_on_minus_zero =
957    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
958
959  if (right_op->IsConstantOperand() && !can_overflow) {
960    // Use optimized code for specific constants.
961    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
962
963    if (bailout_on_minus_zero && (constant < 0)) {
964      // The case of a null constant will be handled separately.
965      // If constant is negative and left is null, the result should be -0.
966      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
967    }
968
969    switch (constant) {
970      case -1:
971        __ Subu(result, zero_reg, left);
972        break;
973      case 0:
974        if (bailout_on_minus_zero) {
975          // If left is strictly negative and the constant is null, the
976          // result is -0. Deoptimize if required, otherwise return 0.
977          DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
978        }
979        __ mov(result, zero_reg);
980        break;
981      case 1:
982        // Nothing to do.
983        __ Move(result, left);
984        break;
985      default:
986        // Multiplying by powers of two and powers of two plus or minus
987        // one can be done faster with shifted operands.
988        // For other constants we emit standard code.
989        int32_t mask = constant >> 31;
990        uint32_t constant_abs = (constant + mask) ^ mask;
991
992        if (IsPowerOf2(constant_abs) ||
993            IsPowerOf2(constant_abs - 1) ||
994            IsPowerOf2(constant_abs + 1)) {
995          if (IsPowerOf2(constant_abs)) {
996            int32_t shift = WhichPowerOf2(constant_abs);
997            __ sll(result, left, shift);
998          } else if (IsPowerOf2(constant_abs - 1)) {
999            int32_t shift = WhichPowerOf2(constant_abs - 1);
1000            __ sll(result, left, shift);
1001            __ Addu(result, result, left);
1002          } else if (IsPowerOf2(constant_abs + 1)) {
1003            int32_t shift = WhichPowerOf2(constant_abs + 1);
1004            __ sll(result, left, shift);
1005            __ Subu(result, result, left);
1006          }
1007
1008          // Correct the sign of the result is the constant is negative.
1009          if (constant < 0)  {
1010            __ Subu(result, zero_reg, result);
1011          }
1012
1013        } else {
1014          // Generate standard code.
1015          __ li(at, constant);
1016          __ Mul(result, left, at);
1017        }
1018    }
1019
1020  } else {
1021    Register right = EmitLoadRegister(right_op, scratch);
1022    if (bailout_on_minus_zero) {
1023      __ Or(ToRegister(instr->TempAt(0)), left, right);
1024    }
1025
1026    if (can_overflow) {
1027      // hi:lo = left * right.
1028      __ mult(left, right);
1029      __ mfhi(scratch);
1030      __ mflo(result);
1031      __ sra(at, result, 31);
1032      DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1033    } else {
1034      __ Mul(result, left, right);
1035    }
1036
1037    if (bailout_on_minus_zero) {
1038      // Bail out if the result is supposed to be negative zero.
1039      Label done;
1040      __ Branch(&done, ne, result, Operand(zero_reg));
1041      DeoptimizeIf(lt,
1042                   instr->environment(),
1043                   ToRegister(instr->TempAt(0)),
1044                   Operand(zero_reg));
1045      __ bind(&done);
1046    }
1047  }
1048}
1049
1050
1051void LCodeGen::DoBitI(LBitI* instr) {
1052  LOperand* left_op = instr->InputAt(0);
1053  LOperand* right_op = instr->InputAt(1);
1054  ASSERT(left_op->IsRegister());
1055  Register left = ToRegister(left_op);
1056  Register result = ToRegister(instr->result());
1057  Operand right(no_reg);
1058
1059  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1060    right = Operand(EmitLoadRegister(right_op, at));
1061  } else {
1062    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1063    right = ToOperand(right_op);
1064  }
1065
1066  switch (instr->op()) {
1067    case Token::BIT_AND:
1068      __ And(result, left, right);
1069      break;
1070    case Token::BIT_OR:
1071      __ Or(result, left, right);
1072      break;
1073    case Token::BIT_XOR:
1074      __ Xor(result, left, right);
1075      break;
1076    default:
1077      UNREACHABLE();
1078      break;
1079  }
1080}
1081
1082
1083void LCodeGen::DoShiftI(LShiftI* instr) {
1084  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1085  // result may alias either of them.
1086  LOperand* right_op = instr->InputAt(1);
1087  Register left = ToRegister(instr->InputAt(0));
1088  Register result = ToRegister(instr->result());
1089
1090  if (right_op->IsRegister()) {
1091    // No need to mask the right operand on MIPS, it is built into the variable
1092    // shift instructions.
1093    switch (instr->op()) {
1094      case Token::SAR:
1095        __ srav(result, left, ToRegister(right_op));
1096        break;
1097      case Token::SHR:
1098        __ srlv(result, left, ToRegister(right_op));
1099        if (instr->can_deopt()) {
1100          DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1101        }
1102        break;
1103      case Token::SHL:
1104        __ sllv(result, left, ToRegister(right_op));
1105        break;
1106      default:
1107        UNREACHABLE();
1108        break;
1109    }
1110  } else {
1111    // Mask the right_op operand.
1112    int value = ToInteger32(LConstantOperand::cast(right_op));
1113    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1114    switch (instr->op()) {
1115      case Token::SAR:
1116        if (shift_count != 0) {
1117          __ sra(result, left, shift_count);
1118        } else {
1119          __ Move(result, left);
1120        }
1121        break;
1122      case Token::SHR:
1123        if (shift_count != 0) {
1124          __ srl(result, left, shift_count);
1125        } else {
1126          if (instr->can_deopt()) {
1127            __ And(at, left, Operand(0x80000000));
1128            DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1129          }
1130          __ Move(result, left);
1131        }
1132        break;
1133      case Token::SHL:
1134        if (shift_count != 0) {
1135          __ sll(result, left, shift_count);
1136        } else {
1137          __ Move(result, left);
1138        }
1139        break;
1140      default:
1141        UNREACHABLE();
1142        break;
1143    }
1144  }
1145}
1146
1147
1148void LCodeGen::DoSubI(LSubI* instr) {
1149  LOperand* left = instr->InputAt(0);
1150  LOperand* right = instr->InputAt(1);
1151  LOperand* result = instr->result();
1152  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1153
1154  if (!can_overflow) {
1155    if (right->IsStackSlot() || right->IsArgument()) {
1156      Register right_reg = EmitLoadRegister(right, at);
1157      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1158    } else {
1159      ASSERT(right->IsRegister() || right->IsConstantOperand());
1160      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1161    }
1162  } else {  // can_overflow.
1163    Register overflow = scratch0();
1164    Register scratch = scratch1();
1165    if (right->IsStackSlot() ||
1166        right->IsArgument() ||
1167        right->IsConstantOperand()) {
1168      Register right_reg = EmitLoadRegister(right, scratch);
1169      __ SubuAndCheckForOverflow(ToRegister(result),
1170                                 ToRegister(left),
1171                                 right_reg,
1172                                 overflow);  // Reg at also used as scratch.
1173    } else {
1174      ASSERT(right->IsRegister());
1175      // Due to overflow check macros not supporting constant operands,
1176      // handling the IsConstantOperand case was moved to prev if clause.
1177      __ SubuAndCheckForOverflow(ToRegister(result),
1178                                 ToRegister(left),
1179                                 ToRegister(right),
1180                                 overflow);  // Reg at also used as scratch.
1181    }
1182    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1183  }
1184}
1185
1186
1187void LCodeGen::DoConstantI(LConstantI* instr) {
1188  ASSERT(instr->result()->IsRegister());
1189  __ li(ToRegister(instr->result()), Operand(instr->value()));
1190}
1191
1192
1193void LCodeGen::DoConstantD(LConstantD* instr) {
1194  ASSERT(instr->result()->IsDoubleRegister());
1195  DoubleRegister result = ToDoubleRegister(instr->result());
1196  double v = instr->value();
1197  __ Move(result, v);
1198}
1199
1200
1201void LCodeGen::DoConstantT(LConstantT* instr) {
1202  Handle<Object> value = instr->value();
1203  if (value->IsSmi()) {
1204    __ li(ToRegister(instr->result()), Operand(value));
1205  } else {
1206    __ LoadHeapObject(ToRegister(instr->result()),
1207                      Handle<HeapObject>::cast(value));
1208  }
1209}
1210
1211
1212void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1213  Register result = ToRegister(instr->result());
1214  Register array = ToRegister(instr->InputAt(0));
1215  __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
1216}
1217
1218
1219void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1220  Register result = ToRegister(instr->result());
1221  Register array = ToRegister(instr->InputAt(0));
1222  __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1223}
1224
1225
1226void LCodeGen::DoElementsKind(LElementsKind* instr) {
1227  Register result = ToRegister(instr->result());
1228  Register input = ToRegister(instr->InputAt(0));
1229
1230  // Load map into |result|.
1231  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1232  // Load the map's "bit field 2" into |result|. We only need the first byte,
1233  // but the following bit field extraction takes care of that anyway.
1234  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1235  // Retrieve elements_kind from bit field 2.
1236  __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1237}
1238
1239
1240void LCodeGen::DoValueOf(LValueOf* instr) {
1241  Register input = ToRegister(instr->InputAt(0));
1242  Register result = ToRegister(instr->result());
1243  Register map = ToRegister(instr->TempAt(0));
1244  Label done;
1245
1246  // If the object is a smi return the object.
1247  __ Move(result, input);
1248  __ JumpIfSmi(input, &done);
1249
1250  // If the object is not a value type, return the object.
1251  __ GetObjectType(input, map, map);
1252  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1253  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1254
1255  __ bind(&done);
1256}
1257
1258
1259void LCodeGen::DoDateField(LDateField* instr) {
1260  Register object = ToRegister(instr->InputAt(0));
1261  Register result = ToRegister(instr->result());
1262  Register scratch = ToRegister(instr->TempAt(0));
1263  Smi* index = instr->index();
1264  Label runtime, done;
1265  ASSERT(object.is(a0));
1266  ASSERT(result.is(v0));
1267  ASSERT(!scratch.is(scratch0()));
1268  ASSERT(!scratch.is(object));
1269
1270#ifdef DEBUG
1271  __ AbortIfSmi(object);
1272  __ GetObjectType(object, scratch, scratch);
1273  __ Assert(eq, "Trying to get date field from non-date.",
1274      scratch, Operand(JS_DATE_TYPE));
1275#endif
1276
1277  if (index->value() == 0) {
1278    __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1279  } else {
1280    if (index->value() < JSDate::kFirstUncachedField) {
1281      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1282      __ li(scratch, Operand(stamp));
1283      __ lw(scratch, MemOperand(scratch));
1284      __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1285      __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1286      __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1287                                            kPointerSize * index->value()));
1288      __ jmp(&done);
1289    }
1290    __ bind(&runtime);
1291    __ PrepareCallCFunction(2, scratch);
1292    __ li(a1, Operand(index));
1293    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1294    __ bind(&done);
1295  }
1296}
1297
1298
1299void LCodeGen::DoBitNotI(LBitNotI* instr) {
1300  Register input = ToRegister(instr->InputAt(0));
1301  Register result = ToRegister(instr->result());
1302  __ Nor(result, zero_reg, Operand(input));
1303}
1304
1305
1306void LCodeGen::DoThrow(LThrow* instr) {
1307  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1308  __ push(input_reg);
1309  CallRuntime(Runtime::kThrow, 1, instr);
1310
1311  if (FLAG_debug_code) {
1312    __ stop("Unreachable code.");
1313  }
1314}
1315
1316
1317void LCodeGen::DoAddI(LAddI* instr) {
1318  LOperand* left = instr->InputAt(0);
1319  LOperand* right = instr->InputAt(1);
1320  LOperand* result = instr->result();
1321  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1322
1323  if (!can_overflow) {
1324    if (right->IsStackSlot() || right->IsArgument()) {
1325      Register right_reg = EmitLoadRegister(right, at);
1326      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1327    } else {
1328      ASSERT(right->IsRegister() || right->IsConstantOperand());
1329      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1330    }
1331  } else {  // can_overflow.
1332    Register overflow = scratch0();
1333    Register scratch = scratch1();
1334    if (right->IsStackSlot() ||
1335        right->IsArgument() ||
1336        right->IsConstantOperand()) {
1337      Register right_reg = EmitLoadRegister(right, scratch);
1338      __ AdduAndCheckForOverflow(ToRegister(result),
1339                                 ToRegister(left),
1340                                 right_reg,
1341                                 overflow);  // Reg at also used as scratch.
1342    } else {
1343      ASSERT(right->IsRegister());
1344      // Due to overflow check macros not supporting constant operands,
1345      // handling the IsConstantOperand case was moved to prev if clause.
1346      __ AdduAndCheckForOverflow(ToRegister(result),
1347                                 ToRegister(left),
1348                                 ToRegister(right),
1349                                 overflow);  // Reg at also used as scratch.
1350    }
1351    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1352  }
1353}
1354
1355
1356void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1357  DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1358  DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1359  DoubleRegister result = ToDoubleRegister(instr->result());
1360  switch (instr->op()) {
1361    case Token::ADD:
1362      __ add_d(result, left, right);
1363      break;
1364    case Token::SUB:
1365      __ sub_d(result, left, right);
1366      break;
1367    case Token::MUL:
1368      __ mul_d(result, left, right);
1369      break;
1370    case Token::DIV:
1371      __ div_d(result, left, right);
1372      break;
1373    case Token::MOD: {
1374      // Save a0-a3 on the stack.
1375      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1376      __ MultiPush(saved_regs);
1377
1378      __ PrepareCallCFunction(0, 2, scratch0());
1379      __ SetCallCDoubleArguments(left, right);
1380      __ CallCFunction(
1381          ExternalReference::double_fp_operation(Token::MOD, isolate()),
1382          0, 2);
1383      // Move the result in the double result register.
1384      __ GetCFunctionDoubleResult(result);
1385
1386      // Restore saved register.
1387      __ MultiPop(saved_regs);
1388      break;
1389    }
1390    default:
1391      UNREACHABLE();
1392      break;
1393  }
1394}
1395
1396
1397void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1398  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
1399  ASSERT(ToRegister(instr->InputAt(1)).is(a0));
1400  ASSERT(ToRegister(instr->result()).is(v0));
1401
1402  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1403  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1404  // Other arch use a nop here, to signal that there is no inlined
1405  // patchable code. Mips does not need the nop, since our marker
1406  // instruction (andi zero_reg) will never be used in normal code.
1407}
1408
1409
1410int LCodeGen::GetNextEmittedBlock(int block) {
1411  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1412    LLabel* label = chunk_->GetLabel(i);
1413    if (!label->HasReplacement()) return i;
1414  }
1415  return -1;
1416}
1417
1418
1419void LCodeGen::EmitBranch(int left_block, int right_block,
1420                          Condition cc, Register src1, const Operand& src2) {
1421  int next_block = GetNextEmittedBlock(current_block_);
1422  right_block = chunk_->LookupDestination(right_block);
1423  left_block = chunk_->LookupDestination(left_block);
1424  if (right_block == left_block) {
1425    EmitGoto(left_block);
1426  } else if (left_block == next_block) {
1427    __ Branch(chunk_->GetAssemblyLabel(right_block),
1428              NegateCondition(cc), src1, src2);
1429  } else if (right_block == next_block) {
1430    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1431  } else {
1432    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1433    __ Branch(chunk_->GetAssemblyLabel(right_block));
1434  }
1435}
1436
1437
1438void LCodeGen::EmitBranchF(int left_block, int right_block,
1439                           Condition cc, FPURegister src1, FPURegister src2) {
1440  int next_block = GetNextEmittedBlock(current_block_);
1441  right_block = chunk_->LookupDestination(right_block);
1442  left_block = chunk_->LookupDestination(left_block);
1443  if (right_block == left_block) {
1444    EmitGoto(left_block);
1445  } else if (left_block == next_block) {
1446    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1447               NegateCondition(cc), src1, src2);
1448  } else if (right_block == next_block) {
1449    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1450  } else {
1451    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1452    __ Branch(chunk_->GetAssemblyLabel(right_block));
1453  }
1454}
1455
1456
1457void LCodeGen::DoBranch(LBranch* instr) {
1458  int true_block = chunk_->LookupDestination(instr->true_block_id());
1459  int false_block = chunk_->LookupDestination(instr->false_block_id());
1460
1461  Representation r = instr->hydrogen()->value()->representation();
1462  if (r.IsInteger32()) {
1463    Register reg = ToRegister(instr->InputAt(0));
1464    EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1465  } else if (r.IsDouble()) {
1466    DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1467    // Test the double value. Zero and NaN are false.
1468    EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
1469  } else {
1470    ASSERT(r.IsTagged());
1471    Register reg = ToRegister(instr->InputAt(0));
1472    HType type = instr->hydrogen()->value()->type();
1473    if (type.IsBoolean()) {
1474      __ LoadRoot(at, Heap::kTrueValueRootIndex);
1475      EmitBranch(true_block, false_block, eq, reg, Operand(at));
1476    } else if (type.IsSmi()) {
1477      EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1478    } else {
1479      Label* true_label = chunk_->GetAssemblyLabel(true_block);
1480      Label* false_label = chunk_->GetAssemblyLabel(false_block);
1481
1482      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1483      // Avoid deopts in the case where we've never executed this path before.
1484      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1485
1486      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1487        // undefined -> false.
1488        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1489        __ Branch(false_label, eq, reg, Operand(at));
1490      }
1491      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1492        // Boolean -> its value.
1493        __ LoadRoot(at, Heap::kTrueValueRootIndex);
1494        __ Branch(true_label, eq, reg, Operand(at));
1495        __ LoadRoot(at, Heap::kFalseValueRootIndex);
1496        __ Branch(false_label, eq, reg, Operand(at));
1497      }
1498      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1499        // 'null' -> false.
1500        __ LoadRoot(at, Heap::kNullValueRootIndex);
1501        __ Branch(false_label, eq, reg, Operand(at));
1502      }
1503
1504      if (expected.Contains(ToBooleanStub::SMI)) {
1505        // Smis: 0 -> false, all other -> true.
1506        __ Branch(false_label, eq, reg, Operand(zero_reg));
1507        __ JumpIfSmi(reg, true_label);
1508      } else if (expected.NeedsMap()) {
1509        // If we need a map later and have a Smi -> deopt.
1510        __ And(at, reg, Operand(kSmiTagMask));
1511        DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1512      }
1513
1514      const Register map = scratch0();
1515      if (expected.NeedsMap()) {
1516        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1517        if (expected.CanBeUndetectable()) {
1518          // Undetectable -> false.
1519          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1520          __ And(at, at, Operand(1 << Map::kIsUndetectable));
1521          __ Branch(false_label, ne, at, Operand(zero_reg));
1522        }
1523      }
1524
1525      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1526        // spec object -> true.
1527        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1528        __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1529      }
1530
1531      if (expected.Contains(ToBooleanStub::STRING)) {
1532        // String value -> false iff empty.
1533        Label not_string;
1534        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1535        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
1536        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1537        __ Branch(true_label, ne, at, Operand(zero_reg));
1538        __ Branch(false_label);
1539        __ bind(&not_string);
1540      }
1541
1542      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1543        // heap number -> false iff +0, -0, or NaN.
1544        DoubleRegister dbl_scratch = double_scratch0();
1545        Label not_heap_number;
1546        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1547        __ Branch(&not_heap_number, ne, map, Operand(at));
1548        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1549        __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
1550        // Falls through if dbl_scratch == 0.
1551        __ Branch(false_label);
1552        __ bind(&not_heap_number);
1553      }
1554
1555      // We've seen something for the first time -> deopt.
1556      DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
1557    }
1558  }
1559}
1560
1561
1562void LCodeGen::EmitGoto(int block) {
1563  block = chunk_->LookupDestination(block);
1564  int next_block = GetNextEmittedBlock(current_block_);
1565  if (block != next_block) {
1566    __ jmp(chunk_->GetAssemblyLabel(block));
1567  }
1568}
1569
1570
1571void LCodeGen::DoGoto(LGoto* instr) {
1572  EmitGoto(instr->block_id());
1573}
1574
1575
1576Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1577  Condition cond = kNoCondition;
1578  switch (op) {
1579    case Token::EQ:
1580    case Token::EQ_STRICT:
1581      cond = eq;
1582      break;
1583    case Token::LT:
1584      cond = is_unsigned ? lo : lt;
1585      break;
1586    case Token::GT:
1587      cond = is_unsigned ? hi : gt;
1588      break;
1589    case Token::LTE:
1590      cond = is_unsigned ? ls : le;
1591      break;
1592    case Token::GTE:
1593      cond = is_unsigned ? hs : ge;
1594      break;
1595    case Token::IN:
1596    case Token::INSTANCEOF:
1597    default:
1598      UNREACHABLE();
1599  }
1600  return cond;
1601}
1602
1603
1604void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1605  LOperand* left = instr->InputAt(0);
1606  LOperand* right = instr->InputAt(1);
1607  int false_block = chunk_->LookupDestination(instr->false_block_id());
1608  int true_block = chunk_->LookupDestination(instr->true_block_id());
1609
1610  Condition cond = TokenToCondition(instr->op(), false);
1611
1612  if (left->IsConstantOperand() && right->IsConstantOperand()) {
1613    // We can statically evaluate the comparison.
1614    double left_val = ToDouble(LConstantOperand::cast(left));
1615    double right_val = ToDouble(LConstantOperand::cast(right));
1616    int next_block =
1617      EvalComparison(instr->op(), left_val, right_val) ? true_block
1618                                                       : false_block;
1619    EmitGoto(next_block);
1620  } else {
1621    if (instr->is_double()) {
1622      // Compare left and right as doubles and load the
1623      // resulting flags into the normal status register.
1624      FPURegister left_reg = ToDoubleRegister(left);
1625      FPURegister right_reg = ToDoubleRegister(right);
1626
1627      // If a NaN is involved, i.e. the result is unordered,
1628      // jump to false block label.
1629      __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
1630                 left_reg, right_reg);
1631
1632      EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
1633    } else {
1634      Register cmp_left;
1635      Operand cmp_right = Operand(0);
1636
1637      if (right->IsConstantOperand()) {
1638        cmp_left = ToRegister(left);
1639        cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
1640      } else if (left->IsConstantOperand()) {
1641        cmp_left = ToRegister(right);
1642        cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
1643        // We transposed the operands. Reverse the condition.
1644        cond = ReverseCondition(cond);
1645      } else {
1646        cmp_left = ToRegister(left);
1647        cmp_right = Operand(ToRegister(right));
1648      }
1649
1650      EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
1651    }
1652  }
1653}
1654
1655
1656void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1657  Register left = ToRegister(instr->InputAt(0));
1658  Register right = ToRegister(instr->InputAt(1));
1659  int false_block = chunk_->LookupDestination(instr->false_block_id());
1660  int true_block = chunk_->LookupDestination(instr->true_block_id());
1661
1662  EmitBranch(true_block, false_block, eq, left, Operand(right));
1663}
1664
1665
1666void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1667  Register left = ToRegister(instr->InputAt(0));
1668  int true_block = chunk_->LookupDestination(instr->true_block_id());
1669  int false_block = chunk_->LookupDestination(instr->false_block_id());
1670
1671  EmitBranch(true_block, false_block, eq, left,
1672             Operand(instr->hydrogen()->right()));
1673}
1674
1675
1676
1677void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1678  Register scratch = scratch0();
1679  Register reg = ToRegister(instr->InputAt(0));
1680  int false_block = chunk_->LookupDestination(instr->false_block_id());
1681
1682  // If the expression is known to be untagged or a smi, then it's definitely
1683  // not null, and it can't be a an undetectable object.
1684  if (instr->hydrogen()->representation().IsSpecialization() ||
1685      instr->hydrogen()->type().IsSmi()) {
1686    EmitGoto(false_block);
1687    return;
1688  }
1689
1690  int true_block = chunk_->LookupDestination(instr->true_block_id());
1691
1692  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1693      Heap::kNullValueRootIndex :
1694      Heap::kUndefinedValueRootIndex;
1695  __ LoadRoot(at, nil_value);
1696  if (instr->kind() == kStrictEquality) {
1697    EmitBranch(true_block, false_block, eq, reg, Operand(at));
1698  } else {
1699    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1700        Heap::kUndefinedValueRootIndex :
1701        Heap::kNullValueRootIndex;
1702    Label* true_label = chunk_->GetAssemblyLabel(true_block);
1703    Label* false_label = chunk_->GetAssemblyLabel(false_block);
1704    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1705    __ LoadRoot(at, other_nil_value);  // In the delay slot.
1706    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1707    __ JumpIfSmi(reg, false_label);  // In the delay slot.
1708    // Check for undetectable objects by looking in the bit field in
1709    // the map. The object has already been smi checked.
1710    __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1711    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1712    __ And(scratch, scratch, 1 << Map::kIsUndetectable);
1713    EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
1714  }
1715}
1716
1717
1718Condition LCodeGen::EmitIsObject(Register input,
1719                                 Register temp1,
1720                                 Register temp2,
1721                                 Label* is_not_object,
1722                                 Label* is_object) {
1723  __ JumpIfSmi(input, is_not_object);
1724
1725  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1726  __ Branch(is_object, eq, input, Operand(temp2));
1727
1728  // Load map.
1729  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1730  // Undetectable objects behave like undefined.
1731  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1732  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
1733  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
1734
1735  // Load instance type and check that it is in object type range.
1736  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1737  __ Branch(is_not_object,
1738            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1739
1740  return le;
1741}
1742
1743
1744void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1745  Register reg = ToRegister(instr->InputAt(0));
1746  Register temp1 = ToRegister(instr->TempAt(0));
1747  Register temp2 = scratch0();
1748
1749  int true_block = chunk_->LookupDestination(instr->true_block_id());
1750  int false_block = chunk_->LookupDestination(instr->false_block_id());
1751  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1752  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1753
1754  Condition true_cond =
1755      EmitIsObject(reg, temp1, temp2, false_label, true_label);
1756
1757  EmitBranch(true_block, false_block, true_cond, temp2,
1758             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1759}
1760
1761
1762Condition LCodeGen::EmitIsString(Register input,
1763                                 Register temp1,
1764                                 Label* is_not_string) {
1765  __ JumpIfSmi(input, is_not_string);
1766  __ GetObjectType(input, temp1, temp1);
1767
1768  return lt;
1769}
1770
1771
1772void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1773  Register reg = ToRegister(instr->InputAt(0));
1774  Register temp1 = ToRegister(instr->TempAt(0));
1775
1776  int true_block = chunk_->LookupDestination(instr->true_block_id());
1777  int false_block = chunk_->LookupDestination(instr->false_block_id());
1778  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1779
1780  Condition true_cond =
1781      EmitIsString(reg, temp1, false_label);
1782
1783  EmitBranch(true_block, false_block, true_cond, temp1,
1784             Operand(FIRST_NONSTRING_TYPE));
1785}
1786
1787
1788void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1789  int true_block = chunk_->LookupDestination(instr->true_block_id());
1790  int false_block = chunk_->LookupDestination(instr->false_block_id());
1791
1792  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1793  __ And(at, input_reg, kSmiTagMask);
1794  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1795}
1796
1797
1798void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1799  Register input = ToRegister(instr->InputAt(0));
1800  Register temp = ToRegister(instr->TempAt(0));
1801
1802  int true_block = chunk_->LookupDestination(instr->true_block_id());
1803  int false_block = chunk_->LookupDestination(instr->false_block_id());
1804
1805  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1806  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1807  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1808  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
1809  EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
1810}
1811
1812
1813static Condition ComputeCompareCondition(Token::Value op) {
1814  switch (op) {
1815    case Token::EQ_STRICT:
1816    case Token::EQ:
1817      return eq;
1818    case Token::LT:
1819      return lt;
1820    case Token::GT:
1821      return gt;
1822    case Token::LTE:
1823      return le;
1824    case Token::GTE:
1825      return ge;
1826    default:
1827      UNREACHABLE();
1828      return kNoCondition;
1829  }
1830}
1831
1832
1833void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1834  Token::Value op = instr->op();
1835  int true_block = chunk_->LookupDestination(instr->true_block_id());
1836  int false_block = chunk_->LookupDestination(instr->false_block_id());
1837
1838  Handle<Code> ic = CompareIC::GetUninitialized(op);
1839  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1840
1841  Condition condition = ComputeCompareCondition(op);
1842
1843  EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
1844}
1845
1846
1847static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1848  InstanceType from = instr->from();
1849  InstanceType to = instr->to();
1850  if (from == FIRST_TYPE) return to;
1851  ASSERT(from == to || to == LAST_TYPE);
1852  return from;
1853}
1854
1855
1856static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1857  InstanceType from = instr->from();
1858  InstanceType to = instr->to();
1859  if (from == to) return eq;
1860  if (to == LAST_TYPE) return hs;
1861  if (from == FIRST_TYPE) return ls;
1862  UNREACHABLE();
1863  return eq;
1864}
1865
1866
1867void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1868  Register scratch = scratch0();
1869  Register input = ToRegister(instr->InputAt(0));
1870
1871  int true_block = chunk_->LookupDestination(instr->true_block_id());
1872  int false_block = chunk_->LookupDestination(instr->false_block_id());
1873
1874  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1875
1876  __ JumpIfSmi(input, false_label);
1877
1878  __ GetObjectType(input, scratch, scratch);
1879  EmitBranch(true_block,
1880             false_block,
1881             BranchCondition(instr->hydrogen()),
1882             scratch,
1883             Operand(TestType(instr->hydrogen())));
1884}
1885
1886
1887void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1888  Register input = ToRegister(instr->InputAt(0));
1889  Register result = ToRegister(instr->result());
1890
1891  if (FLAG_debug_code) {
1892    __ AbortIfNotString(input);
1893  }
1894
1895  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
1896  __ IndexFromHash(result, result);
1897}
1898
1899
1900void LCodeGen::DoHasCachedArrayIndexAndBranch(
1901    LHasCachedArrayIndexAndBranch* instr) {
1902  Register input = ToRegister(instr->InputAt(0));
1903  Register scratch = scratch0();
1904
1905  int true_block = chunk_->LookupDestination(instr->true_block_id());
1906  int false_block = chunk_->LookupDestination(instr->false_block_id());
1907
1908  __ lw(scratch,
1909         FieldMemOperand(input, String::kHashFieldOffset));
1910  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
1911  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1912}
1913
1914
1915// Branches to a label or falls through with the answer in flags.  Trashes
1916// the temp registers, but not the input.
1917void LCodeGen::EmitClassOfTest(Label* is_true,
1918                               Label* is_false,
1919                               Handle<String>class_name,
1920                               Register input,
1921                               Register temp,
1922                               Register temp2) {
1923  ASSERT(!input.is(temp));
1924  ASSERT(!input.is(temp2));
1925  ASSERT(!temp.is(temp2));
1926
1927  __ JumpIfSmi(input, is_false);
1928
1929  if (class_name->IsEqualTo(CStrVector("Function"))) {
1930    // Assuming the following assertions, we can use the same compares to test
1931    // for both being a function type and being in the object type range.
1932    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
1933    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1934                  FIRST_SPEC_OBJECT_TYPE + 1);
1935    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1936                  LAST_SPEC_OBJECT_TYPE - 1);
1937    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
1938
1939    __ GetObjectType(input, temp, temp2);
1940    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1941    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1942    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
1943  } else {
1944    // Faster code path to avoid two compares: subtract lower bound from the
1945    // actual type and do a signed compare with the width of the type range.
1946    __ GetObjectType(input, temp, temp2);
1947    __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1948    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1949                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1950  }
1951
1952  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1953  // Check if the constructor in the map is a function.
1954  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
1955
1956  // Objects with a non-function constructor have class 'Object'.
1957  __ GetObjectType(temp, temp2, temp2);
1958  if (class_name->IsEqualTo(CStrVector("Object"))) {
1959    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
1960  } else {
1961    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
1962  }
1963
1964  // temp now contains the constructor function. Grab the
1965  // instance class name from there.
1966  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1967  __ lw(temp, FieldMemOperand(temp,
1968                               SharedFunctionInfo::kInstanceClassNameOffset));
1969  // The class name we are testing against is a symbol because it's a literal.
1970  // The name in the constructor is a symbol because of the way the context is
1971  // booted.  This routine isn't expected to work for random API-created
1972  // classes and it doesn't have to because you can't access it with natives
1973  // syntax.  Since both sides are symbols it is sufficient to use an identity
1974  // comparison.
1975
1976  // End with the address of this class_name instance in temp register.
1977  // On MIPS, the caller must do the comparison with Handle<String>class_name.
1978}
1979
1980
1981void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1982  Register input = ToRegister(instr->InputAt(0));
1983  Register temp = scratch0();
1984  Register temp2 = ToRegister(instr->TempAt(0));
1985  Handle<String> class_name = instr->hydrogen()->class_name();
1986
1987  int true_block = chunk_->LookupDestination(instr->true_block_id());
1988  int false_block = chunk_->LookupDestination(instr->false_block_id());
1989
1990  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1991  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1992
1993  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1994
1995  EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
1996}
1997
1998
1999void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2000  Register reg = ToRegister(instr->InputAt(0));
2001  Register temp = ToRegister(instr->TempAt(0));
2002  int true_block = instr->true_block_id();
2003  int false_block = instr->false_block_id();
2004
2005  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2006  EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
2007}
2008
2009
2010void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2011  Label true_label, done;
2012  ASSERT(ToRegister(instr->InputAt(0)).is(a0));  // Object is in a0.
2013  ASSERT(ToRegister(instr->InputAt(1)).is(a1));  // Function is in a1.
2014  Register result = ToRegister(instr->result());
2015  ASSERT(result.is(v0));
2016
2017  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2018  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2019
2020  __ Branch(&true_label, eq, result, Operand(zero_reg));
2021  __ li(result, Operand(factory()->false_value()));
2022  __ Branch(&done);
2023  __ bind(&true_label);
2024  __ li(result, Operand(factory()->true_value()));
2025  __ bind(&done);
2026}
2027
2028
2029void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2030  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2031   public:
2032    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2033                                  LInstanceOfKnownGlobal* instr)
2034        : LDeferredCode(codegen), instr_(instr) { }
2035    virtual void Generate() {
2036      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2037    }
2038    virtual LInstruction* instr() { return instr_; }
2039    Label* map_check() { return &map_check_; }
2040
2041   private:
2042    LInstanceOfKnownGlobal* instr_;
2043    Label map_check_;
2044  };
2045
2046  DeferredInstanceOfKnownGlobal* deferred;
2047  deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2048
2049  Label done, false_result;
2050  Register object = ToRegister(instr->InputAt(0));
2051  Register temp = ToRegister(instr->TempAt(0));
2052  Register result = ToRegister(instr->result());
2053
2054  ASSERT(object.is(a0));
2055  ASSERT(result.is(v0));
2056
2057  // A Smi is not instance of anything.
2058  __ JumpIfSmi(object, &false_result);
2059
2060  // This is the inlined call site instanceof cache. The two occurences of the
2061  // hole value will be patched to the last map/result pair generated by the
2062  // instanceof stub.
2063  Label cache_miss;
2064  Register map = temp;
2065  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2066
2067  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2068  __ bind(deferred->map_check());  // Label for calculating code patching.
2069  // We use Factory::the_hole_value() on purpose instead of loading from the
2070  // root array to force relocation to be able to later patch with
2071  // the cached map.
2072  Handle<JSGlobalPropertyCell> cell =
2073      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2074  __ li(at, Operand(Handle<Object>(cell)));
2075  __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2076  __ Branch(&cache_miss, ne, map, Operand(at));
2077  // We use Factory::the_hole_value() on purpose instead of loading from the
2078  // root array to force relocation to be able to later patch
2079  // with true or false.
2080  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2081  __ Branch(&done);
2082
2083  // The inlined call site cache did not match. Check null and string before
2084  // calling the deferred code.
2085  __ bind(&cache_miss);
2086  // Null is not instance of anything.
2087  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2088  __ Branch(&false_result, eq, object, Operand(temp));
2089
2090  // String values is not instance of anything.
2091  Condition cc = __ IsObjectStringType(object, temp, temp);
2092  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2093
2094  // Go to the deferred code.
2095  __ Branch(deferred->entry());
2096
2097  __ bind(&false_result);
2098  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2099
2100  // Here result has either true or false. Deferred code also produces true or
2101  // false object.
2102  __ bind(deferred->exit());
2103  __ bind(&done);
2104}
2105
2106
2107void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2108                                               Label* map_check) {
2109  Register result = ToRegister(instr->result());
2110  ASSERT(result.is(v0));
2111
2112  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2113  flags = static_cast<InstanceofStub::Flags>(
2114      flags | InstanceofStub::kArgsInRegisters);
2115  flags = static_cast<InstanceofStub::Flags>(
2116      flags | InstanceofStub::kCallSiteInlineCheck);
2117  flags = static_cast<InstanceofStub::Flags>(
2118      flags | InstanceofStub::kReturnTrueFalseObject);
2119  InstanceofStub stub(flags);
2120
2121  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2122
2123  // Get the temp register reserved by the instruction. This needs to be t0 as
2124  // its slot of the pushing of safepoint registers is used to communicate the
2125  // offset to the location of the map check.
2126  Register temp = ToRegister(instr->TempAt(0));
2127  ASSERT(temp.is(t0));
2128  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2129  static const int kAdditionalDelta = 7;
2130  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2131  Label before_push_delta;
2132  __ bind(&before_push_delta);
2133  {
2134    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2135    __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2136    __ StoreToSafepointRegisterSlot(temp, temp);
2137  }
2138  CallCodeGeneric(stub.GetCode(),
2139                  RelocInfo::CODE_TARGET,
2140                  instr,
2141                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2142  ASSERT(instr->HasDeoptimizationEnvironment());
2143  LEnvironment* env = instr->deoptimization_environment();
2144  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2145  // Put the result value into the result register slot and
2146  // restore all registers.
2147  __ StoreToSafepointRegisterSlot(result, result);
2148}
2149
2150
2151void LCodeGen::DoCmpT(LCmpT* instr) {
2152  Token::Value op = instr->op();
2153
2154  Handle<Code> ic = CompareIC::GetUninitialized(op);
2155  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2156  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2157
2158  Condition condition = ComputeCompareCondition(op);
2159  // A minor optimization that relies on LoadRoot always emitting one
2160  // instruction.
2161  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2162  Label done;
2163  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2164  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2165  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2166  ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
2167  __ bind(&done);
2168}
2169
2170
2171void LCodeGen::DoReturn(LReturn* instr) {
2172  if (FLAG_trace) {
2173    // Push the return value on the stack as the parameter.
2174    // Runtime::TraceExit returns its parameter in v0.
2175    __ push(v0);
2176    __ CallRuntime(Runtime::kTraceExit, 1);
2177  }
2178  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2179  __ mov(sp, fp);
2180  __ Pop(ra, fp);
2181  __ Addu(sp, sp, Operand(sp_delta));
2182  __ Jump(ra);
2183}
2184
2185
2186void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2187  Register result = ToRegister(instr->result());
2188  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2189  __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2190  if (instr->hydrogen()->RequiresHoleCheck()) {
2191    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2192    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2193  }
2194}
2195
2196
2197void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2198  ASSERT(ToRegister(instr->global_object()).is(a0));
2199  ASSERT(ToRegister(instr->result()).is(v0));
2200
2201  __ li(a2, Operand(instr->name()));
2202  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2203                                             : RelocInfo::CODE_TARGET_CONTEXT;
2204  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2205  CallCode(ic, mode, instr);
2206}
2207
2208
2209void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2210  Register value = ToRegister(instr->value());
2211  Register cell = scratch0();
2212
2213  // Load the cell.
2214  __ li(cell, Operand(instr->hydrogen()->cell()));
2215
2216  // If the cell we are storing to contains the hole it could have
2217  // been deleted from the property dictionary. In that case, we need
2218  // to update the property details in the property dictionary to mark
2219  // it as no longer deleted.
2220  if (instr->hydrogen()->RequiresHoleCheck()) {
2221    // We use a temp to check the payload.
2222    Register payload = ToRegister(instr->TempAt(0));
2223    __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2224    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2225    DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2226  }
2227
2228  // Store the value.
2229  __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2230  // Cells are always rescanned, so no write barrier here.
2231}
2232
2233
2234void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2235  ASSERT(ToRegister(instr->global_object()).is(a1));
2236  ASSERT(ToRegister(instr->value()).is(a0));
2237
2238  __ li(a2, Operand(instr->name()));
2239  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2240      ? isolate()->builtins()->StoreIC_Initialize_Strict()
2241      : isolate()->builtins()->StoreIC_Initialize();
2242  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2243}
2244
2245
2246void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2247  Register context = ToRegister(instr->context());
2248  Register result = ToRegister(instr->result());
2249
2250  __ lw(result, ContextOperand(context, instr->slot_index()));
2251  if (instr->hydrogen()->RequiresHoleCheck()) {
2252    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2253
2254    if (instr->hydrogen()->DeoptimizesOnHole()) {
2255      DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2256    } else {
2257      Label is_not_hole;
2258      __ Branch(&is_not_hole, ne, result, Operand(at));
2259      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2260      __ bind(&is_not_hole);
2261    }
2262  }
2263}
2264
2265
2266void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2267  Register context = ToRegister(instr->context());
2268  Register value = ToRegister(instr->value());
2269  Register scratch = scratch0();
2270  MemOperand target = ContextOperand(context, instr->slot_index());
2271
2272  Label skip_assignment;
2273
2274  if (instr->hydrogen()->RequiresHoleCheck()) {
2275    __ lw(scratch, target);
2276    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2277
2278    if (instr->hydrogen()->DeoptimizesOnHole()) {
2279      DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2280    } else {
2281      __ Branch(&skip_assignment, ne, scratch, Operand(at));
2282    }
2283  }
2284
2285  __ sw(value, target);
2286  if (instr->hydrogen()->NeedsWriteBarrier()) {
2287    HType type = instr->hydrogen()->value()->type();
2288    SmiCheck check_needed =
2289        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2290    __ RecordWriteContextSlot(context,
2291                              target.offset(),
2292                              value,
2293                              scratch0(),
2294                              kRAHasBeenSaved,
2295                              kSaveFPRegs,
2296                              EMIT_REMEMBERED_SET,
2297                              check_needed);
2298  }
2299
2300  __ bind(&skip_assignment);
2301}
2302
2303
2304void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2305  Register object = ToRegister(instr->InputAt(0));
2306  Register result = ToRegister(instr->result());
2307  if (instr->hydrogen()->is_in_object()) {
2308    __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2309  } else {
2310    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2311    __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2312  }
2313}
2314
2315
2316void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2317                                               Register object,
2318                                               Handle<Map> type,
2319                                               Handle<String> name) {
2320  LookupResult lookup(isolate());
2321  type->LookupInDescriptors(NULL, *name, &lookup);
2322  ASSERT(lookup.IsFound() &&
2323         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2324  if (lookup.type() == FIELD) {
2325    int index = lookup.GetLocalFieldIndexFromMap(*type);
2326    int offset = index * kPointerSize;
2327    if (index < 0) {
2328      // Negative property indices are in-object properties, indexed
2329      // from the end of the fixed part of the object.
2330      __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
2331    } else {
2332      // Non-negative property indices are in the properties array.
2333      __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2334      __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2335    }
2336  } else {
2337    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2338    __ LoadHeapObject(result, function);
2339  }
2340}
2341
2342
2343void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2344  Register object = ToRegister(instr->object());
2345  Register result = ToRegister(instr->result());
2346  Register scratch = scratch0();
2347  int map_count = instr->hydrogen()->types()->length();
2348  Handle<String> name = instr->hydrogen()->name();
2349  if (map_count == 0) {
2350    ASSERT(instr->hydrogen()->need_generic());
2351    __ li(a2, Operand(name));
2352    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2353    CallCode(ic, RelocInfo::CODE_TARGET, instr);
2354  } else {
2355    Label done;
2356    __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2357    for (int i = 0; i < map_count - 1; ++i) {
2358      Handle<Map> map = instr->hydrogen()->types()->at(i);
2359      Label next;
2360      __ Branch(&next, ne, scratch, Operand(map));
2361      EmitLoadFieldOrConstantFunction(result, object, map, name);
2362      __ Branch(&done);
2363      __ bind(&next);
2364    }
2365    Handle<Map> map = instr->hydrogen()->types()->last();
2366    if (instr->hydrogen()->need_generic()) {
2367      Label generic;
2368      __ Branch(&generic, ne, scratch, Operand(map));
2369      EmitLoadFieldOrConstantFunction(result, object, map, name);
2370      __ Branch(&done);
2371      __ bind(&generic);
2372      __ li(a2, Operand(name));
2373      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2374      CallCode(ic, RelocInfo::CODE_TARGET, instr);
2375    } else {
2376      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
2377      EmitLoadFieldOrConstantFunction(result, object, map, name);
2378    }
2379    __ bind(&done);
2380  }
2381}
2382
2383
2384void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2385  ASSERT(ToRegister(instr->object()).is(a0));
2386  ASSERT(ToRegister(instr->result()).is(v0));
2387
2388  // Name is always in a2.
2389  __ li(a2, Operand(instr->name()));
2390  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2391  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2392}
2393
2394
2395void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2396  Register scratch = scratch0();
2397  Register function = ToRegister(instr->function());
2398  Register result = ToRegister(instr->result());
2399
2400  // Check that the function really is a function. Load map into the
2401  // result register.
2402  __ GetObjectType(function, result, scratch);
2403  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2404
2405  // Make sure that the function has an instance prototype.
2406  Label non_instance;
2407  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2408  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2409  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2410
2411  // Get the prototype or initial map from the function.
2412  __ lw(result,
2413         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2414
2415  // Check that the function has a prototype or an initial map.
2416  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2417  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2418
2419  // If the function does not have an initial map, we're done.
2420  Label done;
2421  __ GetObjectType(result, scratch, scratch);
2422  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2423
2424  // Get the prototype from the initial map.
2425  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2426  __ Branch(&done);
2427
2428  // Non-instance prototype: Fetch prototype from constructor field
2429  // in initial map.
2430  __ bind(&non_instance);
2431  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2432
2433  // All done.
2434  __ bind(&done);
2435}
2436
2437
2438void LCodeGen::DoLoadElements(LLoadElements* instr) {
2439  Register result = ToRegister(instr->result());
2440  Register input = ToRegister(instr->InputAt(0));
2441  Register scratch = scratch0();
2442
2443  __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
2444  if (FLAG_debug_code) {
2445    Label done, fail;
2446    __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2447    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2448    __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
2449    __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);  // In the delay slot.
2450    __ Branch(&done, eq, scratch, Operand(at));
2451    // |scratch| still contains |input|'s map.
2452    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2453    __ Ext(scratch, scratch, Map::kElementsKindShift,
2454           Map::kElementsKindBitCount);
2455    __ Branch(&done, eq, scratch,
2456              Operand(FAST_ELEMENTS));
2457    __ Branch(&fail, lt, scratch,
2458              Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2459    __ Branch(&done, le, scratch,
2460              Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2461    __ bind(&fail);
2462    __ Abort("Check for fast or external elements failed.");
2463    __ bind(&done);
2464  }
2465}
2466
2467
2468void LCodeGen::DoLoadExternalArrayPointer(
2469    LLoadExternalArrayPointer* instr) {
2470  Register to_reg = ToRegister(instr->result());
2471  Register from_reg  = ToRegister(instr->InputAt(0));
2472  __ lw(to_reg, FieldMemOperand(from_reg,
2473                                ExternalArray::kExternalPointerOffset));
2474}
2475
2476
2477void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2478  Register arguments = ToRegister(instr->arguments());
2479  Register length = ToRegister(instr->length());
2480  Register index = ToRegister(instr->index());
2481  Register result = ToRegister(instr->result());
2482
2483  // Bailout index is not a valid argument index. Use unsigned check to get
2484  // negative check for free.
2485
2486  // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
2487  // as they do in Arm. It will save us an instruction.
2488  DeoptimizeIf(ls, instr->environment(), length, Operand(index));
2489
2490  // There are two words between the frame pointer and the last argument.
2491  // Subtracting from length accounts for one of them, add one more.
2492  __ subu(length, length, index);
2493  __ Addu(length, length, Operand(1));
2494  __ sll(length, length, kPointerSizeLog2);
2495  __ Addu(at, arguments, Operand(length));
2496  __ lw(result, MemOperand(at, 0));
2497}
2498
2499
2500void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2501  Register elements = ToRegister(instr->elements());
2502  Register key = EmitLoadRegister(instr->key(), scratch0());
2503  Register result = ToRegister(instr->result());
2504  Register scratch = scratch0();
2505
2506  // Load the result.
2507  __ sll(scratch, key, kPointerSizeLog2);  // Key indexes words.
2508  __ addu(scratch, elements, scratch);
2509  __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2510
2511  // Check for the hole value.
2512  if (instr->hydrogen()->RequiresHoleCheck()) {
2513    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2514    DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
2515  }
2516}
2517
2518
2519void LCodeGen::DoLoadKeyedFastDoubleElement(
2520    LLoadKeyedFastDoubleElement* instr) {
2521  Register elements = ToRegister(instr->elements());
2522  bool key_is_constant = instr->key()->IsConstantOperand();
2523  Register key = no_reg;
2524  DoubleRegister result = ToDoubleRegister(instr->result());
2525  Register scratch = scratch0();
2526
2527  int shift_size =
2528      ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2529  int constant_key = 0;
2530  if (key_is_constant) {
2531    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2532    if (constant_key & 0xF0000000) {
2533      Abort("array index constant value too big.");
2534    }
2535  } else {
2536    key = ToRegister(instr->key());
2537  }
2538
2539  if (key_is_constant) {
2540    __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
2541            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2542  } else {
2543    __ sll(scratch, key, shift_size);
2544    __ Addu(elements, elements, Operand(scratch));
2545    __ Addu(elements, elements,
2546            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2547  }
2548
2549  __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2550  DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2551
2552  __ ldc1(result, MemOperand(elements));
2553}
2554
2555
2556void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2557    LLoadKeyedSpecializedArrayElement* instr) {
2558  Register external_pointer = ToRegister(instr->external_pointer());
2559  Register key = no_reg;
2560  ElementsKind elements_kind = instr->elements_kind();
2561  bool key_is_constant = instr->key()->IsConstantOperand();
2562  int constant_key = 0;
2563  if (key_is_constant) {
2564    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2565    if (constant_key & 0xF0000000) {
2566      Abort("array index constant value too big.");
2567    }
2568  } else {
2569    key = ToRegister(instr->key());
2570  }
2571  int shift_size = ElementsKindToShiftSize(elements_kind);
2572
2573  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2574      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2575    FPURegister result = ToDoubleRegister(instr->result());
2576    if (key_is_constant) {
2577      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
2578    } else {
2579      __ sll(scratch0(), key, shift_size);
2580      __ Addu(scratch0(), scratch0(), external_pointer);
2581    }
2582
2583    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2584      __ lwc1(result, MemOperand(scratch0()));
2585      __ cvt_d_s(result, result);
2586    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2587      __ ldc1(result, MemOperand(scratch0()));
2588    }
2589  } else {
2590    Register result = ToRegister(instr->result());
2591    Register scratch = scratch0();
2592    MemOperand mem_operand(zero_reg);
2593    if (key_is_constant) {
2594      mem_operand = MemOperand(external_pointer,
2595                               constant_key * (1 << shift_size));
2596    } else {
2597      __ sll(scratch, key, shift_size);
2598      __ Addu(scratch, scratch, external_pointer);
2599      mem_operand = MemOperand(scratch);
2600    }
2601    switch (elements_kind) {
2602      case EXTERNAL_BYTE_ELEMENTS:
2603        __ lb(result, mem_operand);
2604        break;
2605      case EXTERNAL_PIXEL_ELEMENTS:
2606      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2607        __ lbu(result, mem_operand);
2608        break;
2609      case EXTERNAL_SHORT_ELEMENTS:
2610        __ lh(result, mem_operand);
2611        break;
2612      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2613        __ lhu(result, mem_operand);
2614        break;
2615      case EXTERNAL_INT_ELEMENTS:
2616        __ lw(result, mem_operand);
2617        break;
2618      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2619        __ lw(result, mem_operand);
2620        // TODO(danno): we could be more clever here, perhaps having a special
2621        // version of the stub that detects if the overflow case actually
2622        // happens, and generate code that returns a double rather than int.
2623        DeoptimizeIf(Ugreater_equal, instr->environment(),
2624            result, Operand(0x80000000));
2625        break;
2626      case EXTERNAL_FLOAT_ELEMENTS:
2627      case EXTERNAL_DOUBLE_ELEMENTS:
2628      case FAST_DOUBLE_ELEMENTS:
2629      case FAST_ELEMENTS:
2630      case FAST_SMI_ONLY_ELEMENTS:
2631      case DICTIONARY_ELEMENTS:
2632      case NON_STRICT_ARGUMENTS_ELEMENTS:
2633        UNREACHABLE();
2634        break;
2635    }
2636  }
2637}
2638
2639
2640void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2641  ASSERT(ToRegister(instr->object()).is(a1));
2642  ASSERT(ToRegister(instr->key()).is(a0));
2643
2644  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2645  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2646}
2647
2648
2649void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2650  Register scratch = scratch0();
2651  Register temp = scratch1();
2652  Register result = ToRegister(instr->result());
2653
2654  // Check if the calling frame is an arguments adaptor frame.
2655  Label done, adapted;
2656  __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2657  __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2658  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2659
2660  // Result is the frame pointer for the frame if not adapted and for the real
2661  // frame below the adaptor frame if adapted.
2662  __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
2663  __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
2664}
2665
2666
2667void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2668  Register elem = ToRegister(instr->InputAt(0));
2669  Register result = ToRegister(instr->result());
2670
2671  Label done;
2672
2673  // If no arguments adaptor frame the number of arguments is fixed.
2674  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2675  __ Branch(&done, eq, fp, Operand(elem));
2676
2677  // Arguments adaptor frame present. Get argument length from there.
2678  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2679  __ lw(result,
2680        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2681  __ SmiUntag(result);
2682
2683  // Argument length is in result register.
2684  __ bind(&done);
2685}
2686
2687
2688void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2689  Register receiver = ToRegister(instr->receiver());
2690  Register function = ToRegister(instr->function());
2691  Register scratch = scratch0();
2692
2693  // If the receiver is null or undefined, we have to pass the global
2694  // object as a receiver to normal functions. Values have to be
2695  // passed unchanged to builtins and strict-mode functions.
2696  Label global_object, receiver_ok;
2697
2698  // Do not transform the receiver to object for strict mode
2699  // functions.
2700  __ lw(scratch,
2701         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2702  __ lw(scratch,
2703         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2704
2705  // Do not transform the receiver to object for builtins.
2706  int32_t strict_mode_function_mask =
2707                  1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2708  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2709  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2710  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
2711
2712  // Normal function. Replace undefined or null with global receiver.
2713  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2714  __ Branch(&global_object, eq, receiver, Operand(scratch));
2715  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2716  __ Branch(&global_object, eq, receiver, Operand(scratch));
2717
2718  // Deoptimize if the receiver is not a JS object.
2719  __ And(scratch, receiver, Operand(kSmiTagMask));
2720  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
2721
2722  __ GetObjectType(receiver, scratch, scratch);
2723  DeoptimizeIf(lt, instr->environment(),
2724               scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
2725  __ Branch(&receiver_ok);
2726
2727  __ bind(&global_object);
2728  __ lw(receiver, GlobalObjectOperand());
2729  __ lw(receiver,
2730         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2731  __ bind(&receiver_ok);
2732}
2733
2734void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2735  Register receiver = ToRegister(instr->receiver());
2736  Register function = ToRegister(instr->function());
2737  Register length = ToRegister(instr->length());
2738  Register elements = ToRegister(instr->elements());
2739  Register scratch = scratch0();
2740  ASSERT(receiver.is(a0));  // Used for parameter count.
2741  ASSERT(function.is(a1));  // Required by InvokeFunction.
2742  ASSERT(ToRegister(instr->result()).is(v0));
2743
2744  // Copy the arguments to this function possibly from the
2745  // adaptor frame below it.
2746  const uint32_t kArgumentsLimit = 1 * KB;
2747  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
2748
2749  // Push the receiver and use the register to keep the original
2750  // number of arguments.
2751  __ push(receiver);
2752  __ Move(receiver, length);
2753  // The arguments are at a one pointer size offset from elements.
2754  __ Addu(elements, elements, Operand(1 * kPointerSize));
2755
2756  // Loop through the arguments pushing them onto the execution
2757  // stack.
2758  Label invoke, loop;
2759  // length is a small non-negative integer, due to the test above.
2760  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
2761  __ sll(scratch, length, 2);
2762  __ bind(&loop);
2763  __ Addu(scratch, elements, scratch);
2764  __ lw(scratch, MemOperand(scratch));
2765  __ push(scratch);
2766  __ Subu(length, length, Operand(1));
2767  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
2768  __ sll(scratch, length, 2);
2769
2770  __ bind(&invoke);
2771  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2772  LPointerMap* pointers = instr->pointer_map();
2773  RecordPosition(pointers->position());
2774  SafepointGenerator safepoint_generator(
2775      this, pointers, Safepoint::kLazyDeopt);
2776  // The number of arguments is stored in receiver which is a0, as expected
2777  // by InvokeFunction.
2778  ParameterCount actual(receiver);
2779  __ InvokeFunction(function, actual, CALL_FUNCTION,
2780                    safepoint_generator, CALL_AS_METHOD);
2781  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2782}
2783
2784
2785void LCodeGen::DoPushArgument(LPushArgument* instr) {
2786  LOperand* argument = instr->InputAt(0);
2787  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2788    Abort("DoPushArgument not implemented for double type.");
2789  } else {
2790    Register argument_reg = EmitLoadRegister(argument, at);
2791    __ push(argument_reg);
2792  }
2793}
2794
2795
2796void LCodeGen::DoThisFunction(LThisFunction* instr) {
2797  Register result = ToRegister(instr->result());
2798  __ LoadHeapObject(result, instr->hydrogen()->closure());
2799}
2800
2801
2802void LCodeGen::DoContext(LContext* instr) {
2803  Register result = ToRegister(instr->result());
2804  __ mov(result, cp);
2805}
2806
2807
2808void LCodeGen::DoOuterContext(LOuterContext* instr) {
2809  Register context = ToRegister(instr->context());
2810  Register result = ToRegister(instr->result());
2811  __ lw(result,
2812        MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2813}
2814
2815
2816void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2817  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
2818  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
2819  // The context is the first argument.
2820  __ Push(cp, scratch0(), scratch1());
2821  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2822}
2823
2824
2825void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2826  Register result = ToRegister(instr->result());
2827  __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
2828}
2829
2830
2831void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2832  Register global = ToRegister(instr->global());
2833  Register result = ToRegister(instr->result());
2834  __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2835}
2836
2837
2838void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2839                                 int arity,
2840                                 LInstruction* instr,
2841                                 CallKind call_kind) {
2842  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
2843      function->shared()->formal_parameter_count() == arity;
2844
2845  LPointerMap* pointers = instr->pointer_map();
2846  RecordPosition(pointers->position());
2847
2848  if (can_invoke_directly) {
2849    __ LoadHeapObject(a1, function);
2850    // Change context if needed.
2851    bool change_context =
2852        (info()->closure()->context() != function->context()) ||
2853        scope()->contains_with() ||
2854        (scope()->num_heap_slots() > 0);
2855    if (change_context) {
2856      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2857    }
2858
2859    // Set r0 to arguments count if adaption is not needed. Assumes that r0
2860    // is available to write to at this point.
2861    if (!function->NeedsArgumentsAdaption()) {
2862      __ li(a0, Operand(arity));
2863    }
2864
2865    // Invoke function.
2866    __ SetCallKind(t1, call_kind);
2867    __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2868    __ Call(at);
2869
2870    // Set up deoptimization.
2871    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2872  } else {
2873    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2874    ParameterCount count(arity);
2875    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
2876  }
2877
2878  // Restore context.
2879  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2880}
2881
2882
2883void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2884  ASSERT(ToRegister(instr->result()).is(v0));
2885  __ mov(a0, v0);
2886  CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
2887}
2888
2889
2890void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2891  Register input = ToRegister(instr->InputAt(0));
2892  Register result = ToRegister(instr->result());
2893  Register scratch = scratch0();
2894
2895  // Deoptimize if not a heap number.
2896  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2897  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2898  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
2899
2900  Label done;
2901  Register exponent = scratch0();
2902  scratch = no_reg;
2903  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2904  // Check the sign of the argument. If the argument is positive, just
2905  // return it.
2906  __ Move(result, input);
2907  __ And(at, exponent, Operand(HeapNumber::kSignMask));
2908  __ Branch(&done, eq, at, Operand(zero_reg));
2909
2910  // Input is negative. Reverse its sign.
2911  // Preserve the value of all registers.
2912  {
2913    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2914
2915    // Registers were saved at the safepoint, so we can use
2916    // many scratch registers.
2917    Register tmp1 = input.is(a1) ? a0 : a1;
2918    Register tmp2 = input.is(a2) ? a0 : a2;
2919    Register tmp3 = input.is(a3) ? a0 : a3;
2920    Register tmp4 = input.is(t0) ? a0 : t0;
2921
2922    // exponent: floating point exponent value.
2923
2924    Label allocated, slow;
2925    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
2926    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
2927    __ Branch(&allocated);
2928
2929    // Slow case: Call the runtime system to do the number allocation.
2930    __ bind(&slow);
2931
2932    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2933    // Set the pointer to the new heap number in tmp.
2934    if (!tmp1.is(v0))
2935      __ mov(tmp1, v0);
2936    // Restore input_reg after call to runtime.
2937    __ LoadFromSafepointRegisterSlot(input, input);
2938    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2939
2940    __ bind(&allocated);
2941    // exponent: floating point exponent value.
2942    // tmp1: allocated heap number.
2943    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
2944    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
2945    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
2946    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
2947
2948    __ StoreToSafepointRegisterSlot(tmp1, result);
2949  }
2950
2951  __ bind(&done);
2952}
2953
2954
2955void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2956  Register input = ToRegister(instr->InputAt(0));
2957  Register result = ToRegister(instr->result());
2958  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2959  Label done;
2960  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
2961  __ mov(result, input);
2962  ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
2963  __ subu(result, zero_reg, input);
2964  // Overflow if result is still negative, i.e. 0x80000000.
2965  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
2966  __ bind(&done);
2967}
2968
2969
2970void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2971  // Class for deferred case.
2972  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2973   public:
2974    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2975                                    LUnaryMathOperation* instr)
2976        : LDeferredCode(codegen), instr_(instr) { }
2977    virtual void Generate() {
2978      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2979    }
2980    virtual LInstruction* instr() { return instr_; }
2981   private:
2982    LUnaryMathOperation* instr_;
2983  };
2984
2985  Representation r = instr->hydrogen()->value()->representation();
2986  if (r.IsDouble()) {
2987    FPURegister input = ToDoubleRegister(instr->InputAt(0));
2988    FPURegister result = ToDoubleRegister(instr->result());
2989    __ abs_d(result, input);
2990  } else if (r.IsInteger32()) {
2991    EmitIntegerMathAbs(instr);
2992  } else {
2993    // Representation is tagged.
2994    DeferredMathAbsTaggedHeapNumber* deferred =
2995        new DeferredMathAbsTaggedHeapNumber(this, instr);
2996    Register input = ToRegister(instr->InputAt(0));
2997    // Smi check.
2998    __ JumpIfNotSmi(input, deferred->entry());
2999    // If smi, handle it directly.
3000    EmitIntegerMathAbs(instr);
3001    __ bind(deferred->exit());
3002  }
3003}
3004
3005
3006void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3007  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3008  Register result = ToRegister(instr->result());
3009  FPURegister single_scratch = double_scratch0().low();
3010  Register scratch1 = scratch0();
3011  Register except_flag = ToRegister(instr->TempAt(0));
3012
3013  __ EmitFPUTruncate(kRoundToMinusInf,
3014                     single_scratch,
3015                     input,
3016                     scratch1,
3017                     except_flag);
3018
3019  // Deopt if the operation did not succeed.
3020  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3021
3022  // Load the result.
3023  __ mfc1(result, single_scratch);
3024
3025  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3026    // Test for -0.
3027    Label done;
3028    __ Branch(&done, ne, result, Operand(zero_reg));
3029    __ mfc1(scratch1, input.high());
3030    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3031    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3032    __ bind(&done);
3033  }
3034}
3035
3036
3037void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3038  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3039  Register result = ToRegister(instr->result());
3040  Register scratch = scratch0();
3041  Label done, check_sign_on_zero;
3042
3043  // Extract exponent bits.
3044  __ mfc1(result, input.high());
3045  __ Ext(scratch,
3046         result,
3047         HeapNumber::kExponentShift,
3048         HeapNumber::kExponentBits);
3049
3050  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3051  Label skip1;
3052  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3053  __ mov(result, zero_reg);
3054  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3055    __ Branch(&check_sign_on_zero);
3056  } else {
3057    __ Branch(&done);
3058  }
3059  __ bind(&skip1);
3060
3061  // The following conversion will not work with numbers
3062  // outside of ]-2^32, 2^32[.
3063  DeoptimizeIf(ge, instr->environment(), scratch,
3064               Operand(HeapNumber::kExponentBias + 32));
3065
3066  // Save the original sign for later comparison.
3067  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3068
3069  __ Move(double_scratch0(), 0.5);
3070  __ add_d(double_scratch0(), input, double_scratch0());
3071
3072  // Check sign of the result: if the sign changed, the input
3073  // value was in ]0.5, 0[ and the result should be -0.
3074  __ mfc1(result, double_scratch0().high());
3075  __ Xor(result, result, Operand(scratch));
3076  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3077    // ARM uses 'mi' here, which is 'lt'
3078    DeoptimizeIf(lt, instr->environment(), result,
3079                 Operand(zero_reg));
3080  } else {
3081    Label skip2;
3082    // ARM uses 'mi' here, which is 'lt'
3083    // Negating it results in 'ge'
3084    __ Branch(&skip2, ge, result, Operand(zero_reg));
3085    __ mov(result, zero_reg);
3086    __ Branch(&done);
3087    __ bind(&skip2);
3088  }
3089
3090  Register except_flag = scratch;
3091
3092  __ EmitFPUTruncate(kRoundToMinusInf,
3093                     double_scratch0().low(),
3094                     double_scratch0(),
3095                     result,
3096                     except_flag);
3097
3098  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3099
3100  __ mfc1(result, double_scratch0().low());
3101
3102  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3103    // Test for -0.
3104    __ Branch(&done, ne, result, Operand(zero_reg));
3105    __ bind(&check_sign_on_zero);
3106    __ mfc1(scratch, input.high());
3107    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3108    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3109  }
3110  __ bind(&done);
3111}
3112
3113
3114void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3115  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3116  DoubleRegister result = ToDoubleRegister(instr->result());
3117  __ sqrt_d(result, input);
3118}
3119
3120
3121void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3122  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3123  DoubleRegister result = ToDoubleRegister(instr->result());
3124  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3125
3126  ASSERT(!input.is(result));
3127
3128  // Note that according to ECMA-262 15.8.2.13:
3129  // Math.pow(-Infinity, 0.5) == Infinity
3130  // Math.sqrt(-Infinity) == NaN
3131  Label done;
3132  __ Move(temp, -V8_INFINITY);
3133  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3134  // Set up Infinity in the delay slot.
3135  // result is overwritten if the branch is not taken.
3136  __ neg_d(result, temp);
3137
3138  // Add +0 to convert -0 to +0.
3139  __ add_d(result, input, kDoubleRegZero);
3140  __ sqrt_d(result, result);
3141  __ bind(&done);
3142}
3143
3144
3145void LCodeGen::DoPower(LPower* instr) {
3146  Representation exponent_type = instr->hydrogen()->right()->representation();
3147  // Having marked this as a call, we can use any registers.
3148  // Just make sure that the input/output registers are the expected ones.
3149  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3150         ToDoubleRegister(instr->InputAt(1)).is(f4));
3151  ASSERT(!instr->InputAt(1)->IsRegister() ||
3152         ToRegister(instr->InputAt(1)).is(a2));
3153  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
3154  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3155
3156  if (exponent_type.IsTagged()) {
3157    Label no_deopt;
3158    __ JumpIfSmi(a2, &no_deopt);
3159    __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
3160    DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3161    __ bind(&no_deopt);
3162    MathPowStub stub(MathPowStub::TAGGED);
3163    __ CallStub(&stub);
3164  } else if (exponent_type.IsInteger32()) {
3165    MathPowStub stub(MathPowStub::INTEGER);
3166    __ CallStub(&stub);
3167  } else {
3168    ASSERT(exponent_type.IsDouble());
3169    MathPowStub stub(MathPowStub::DOUBLE);
3170    __ CallStub(&stub);
3171  }
3172}
3173
3174
3175void LCodeGen::DoRandom(LRandom* instr) {
3176  class DeferredDoRandom: public LDeferredCode {
3177   public:
3178    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3179        : LDeferredCode(codegen), instr_(instr) { }
3180    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3181    virtual LInstruction* instr() { return instr_; }
3182   private:
3183    LRandom* instr_;
3184  };
3185
3186  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
3187  // Having marked this instruction as a call we can use any
3188  // registers.
3189  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3190  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
3191
3192  static const int kSeedSize = sizeof(uint32_t);
3193  STATIC_ASSERT(kPointerSize == kSeedSize);
3194
3195  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
3196  static const int kRandomSeedOffset =
3197      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3198  __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
3199  // a2: FixedArray of the global context's random seeds
3200
3201  // Load state[0].
3202  __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
3203  __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
3204  // Load state[1].
3205  __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3206  // a1: state[0].
3207  // a0: state[1].
3208
3209  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3210  __ And(a3, a1, Operand(0xFFFF));
3211  __ li(t0, Operand(18273));
3212  __ Mul(a3, a3, t0);
3213  __ srl(a1, a1, 16);
3214  __ Addu(a1, a3, a1);
3215  // Save state[0].
3216  __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
3217
3218  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3219  __ And(a3, a0, Operand(0xFFFF));
3220  __ li(t0, Operand(36969));
3221  __ Mul(a3, a3, t0);
3222  __ srl(a0, a0, 16),
3223  __ Addu(a0, a3, a0);
3224  // Save state[1].
3225  __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3226
3227  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3228  __ And(a0, a0, Operand(0x3FFFF));
3229  __ sll(a1, a1, 14);
3230  __ Addu(v0, a0, a1);
3231
3232  __ bind(deferred->exit());
3233
3234  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3235  __ li(a2, Operand(0x41300000));
3236  // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3237  __ Move(f12, v0, a2);
3238  // Move 0x4130000000000000 to FPU.
3239  __ Move(f14, zero_reg, a2);
3240  // Subtract to get the result.
3241  __ sub_d(f0, f12, f14);
3242}
3243
3244void LCodeGen::DoDeferredRandom(LRandom* instr) {
3245  __ PrepareCallCFunction(1, scratch0());
3246  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3247  // Return value is in v0.
3248}
3249
3250
3251void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3252  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3253  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3254                               TranscendentalCacheStub::UNTAGGED);
3255  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3256}
3257
3258
3259void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3260  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3261  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3262                               TranscendentalCacheStub::UNTAGGED);
3263  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3264}
3265
3266
3267void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3268  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3269  TranscendentalCacheStub stub(TranscendentalCache::COS,
3270                               TranscendentalCacheStub::UNTAGGED);
3271  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3272}
3273
3274
3275void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3276  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3277  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3278                               TranscendentalCacheStub::UNTAGGED);
3279  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3280}
3281
3282
3283void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3284  switch (instr->op()) {
3285    case kMathAbs:
3286      DoMathAbs(instr);
3287      break;
3288    case kMathFloor:
3289      DoMathFloor(instr);
3290      break;
3291    case kMathRound:
3292      DoMathRound(instr);
3293      break;
3294    case kMathSqrt:
3295      DoMathSqrt(instr);
3296      break;
3297    case kMathPowHalf:
3298      DoMathPowHalf(instr);
3299      break;
3300    case kMathCos:
3301      DoMathCos(instr);
3302      break;
3303    case kMathSin:
3304      DoMathSin(instr);
3305      break;
3306    case kMathTan:
3307      DoMathTan(instr);
3308      break;
3309    case kMathLog:
3310      DoMathLog(instr);
3311      break;
3312    default:
3313      Abort("Unimplemented type of LUnaryMathOperation.");
3314      UNREACHABLE();
3315  }
3316}
3317
3318
3319void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3320  ASSERT(ToRegister(instr->function()).is(a1));
3321  ASSERT(instr->HasPointerMap());
3322  ASSERT(instr->HasDeoptimizationEnvironment());
3323  LPointerMap* pointers = instr->pointer_map();
3324  RecordPosition(pointers->position());
3325  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3326  ParameterCount count(instr->arity());
3327  __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3328  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3329}
3330
3331
3332void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3333  ASSERT(ToRegister(instr->result()).is(v0));
3334
3335  int arity = instr->arity();
3336  Handle<Code> ic =
3337      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3338  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3339  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3340}
3341
3342
3343void LCodeGen::DoCallNamed(LCallNamed* instr) {
3344  ASSERT(ToRegister(instr->result()).is(v0));
3345
3346  int arity = instr->arity();
3347  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3348  Handle<Code> ic =
3349      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3350  __ li(a2, Operand(instr->name()));
3351  CallCode(ic, mode, instr);
3352  // Restore context register.
3353  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3354}
3355
3356
3357void LCodeGen::DoCallFunction(LCallFunction* instr) {
3358  ASSERT(ToRegister(instr->function()).is(a1));
3359  ASSERT(ToRegister(instr->result()).is(v0));
3360
3361  int arity = instr->arity();
3362  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3363  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3364  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3365}
3366
3367
3368void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3369  ASSERT(ToRegister(instr->result()).is(v0));
3370
3371  int arity = instr->arity();
3372  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3373  Handle<Code> ic =
3374      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3375  __ li(a2, Operand(instr->name()));
3376  CallCode(ic, mode, instr);
3377  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3378}
3379
3380
3381void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3382  ASSERT(ToRegister(instr->result()).is(v0));
3383  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3384}
3385
3386
3387void LCodeGen::DoCallNew(LCallNew* instr) {
3388  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
3389  ASSERT(ToRegister(instr->result()).is(v0));
3390
3391  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3392  __ li(a0, Operand(instr->arity()));
3393  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3394}
3395
3396
3397void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3398  CallRuntime(instr->function(), instr->arity(), instr);
3399}
3400
3401
3402void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3403  Register object = ToRegister(instr->object());
3404  Register value = ToRegister(instr->value());
3405  Register scratch = scratch0();
3406  int offset = instr->offset();
3407
3408  ASSERT(!object.is(value));
3409
3410  if (!instr->transition().is_null()) {
3411    __ li(scratch, Operand(instr->transition()));
3412    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3413  }
3414
3415  // Do the store.
3416  HType type = instr->hydrogen()->value()->type();
3417  SmiCheck check_needed =
3418      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3419  if (instr->is_in_object()) {
3420    __ sw(value, FieldMemOperand(object, offset));
3421    if (instr->hydrogen()->NeedsWriteBarrier()) {
3422      // Update the write barrier for the object for in-object properties.
3423      __ RecordWriteField(object,
3424                          offset,
3425                          value,
3426                          scratch,
3427                          kRAHasBeenSaved,
3428                          kSaveFPRegs,
3429                          EMIT_REMEMBERED_SET,
3430                          check_needed);
3431    }
3432  } else {
3433    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3434    __ sw(value, FieldMemOperand(scratch, offset));
3435    if (instr->hydrogen()->NeedsWriteBarrier()) {
3436      // Update the write barrier for the properties array.
3437      // object is used as a scratch register.
3438      __ RecordWriteField(scratch,
3439                          offset,
3440                          value,
3441                          object,
3442                          kRAHasBeenSaved,
3443                          kSaveFPRegs,
3444                          EMIT_REMEMBERED_SET,
3445                          check_needed);
3446    }
3447  }
3448}
3449
3450
3451void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3452  ASSERT(ToRegister(instr->object()).is(a1));
3453  ASSERT(ToRegister(instr->value()).is(a0));
3454
3455  // Name is always in a2.
3456  __ li(a2, Operand(instr->name()));
3457  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3458      ? isolate()->builtins()->StoreIC_Initialize_Strict()
3459      : isolate()->builtins()->StoreIC_Initialize();
3460  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3461}
3462
3463
3464void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3465  DeoptimizeIf(hs,
3466               instr->environment(),
3467               ToRegister(instr->index()),
3468               Operand(ToRegister(instr->length())));
3469}
3470
3471
3472void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3473  Register value = ToRegister(instr->value());
3474  Register elements = ToRegister(instr->object());
3475  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3476  Register scratch = scratch0();
3477
3478  // Do the store.
3479  if (instr->key()->IsConstantOperand()) {
3480    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3481    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3482    int offset =
3483        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3484    __ sw(value, FieldMemOperand(elements, offset));
3485  } else {
3486    __ sll(scratch, key, kPointerSizeLog2);
3487    __ addu(scratch, elements, scratch);
3488    __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3489  }
3490
3491  if (instr->hydrogen()->NeedsWriteBarrier()) {
3492    HType type = instr->hydrogen()->value()->type();
3493    SmiCheck check_needed =
3494        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3495    // Compute address of modified element and store it into key register.
3496    __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3497    __ RecordWrite(elements,
3498                   key,
3499                   value,
3500                   kRAHasBeenSaved,
3501                   kSaveFPRegs,
3502                   EMIT_REMEMBERED_SET,
3503                   check_needed);
3504  }
3505}
3506
3507
3508void LCodeGen::DoStoreKeyedFastDoubleElement(
3509    LStoreKeyedFastDoubleElement* instr) {
3510  DoubleRegister value = ToDoubleRegister(instr->value());
3511  Register elements = ToRegister(instr->elements());
3512  Register key = no_reg;
3513  Register scratch = scratch0();
3514  bool key_is_constant = instr->key()->IsConstantOperand();
3515  int constant_key = 0;
3516  Label not_nan;
3517
3518  // Calculate the effective address of the slot in the array to store the
3519  // double value.
3520  if (key_is_constant) {
3521    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3522    if (constant_key & 0xF0000000) {
3523      Abort("array index constant value too big.");
3524    }
3525  } else {
3526    key = ToRegister(instr->key());
3527  }
3528  int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3529  if (key_is_constant) {
3530    __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
3531            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3532  } else {
3533    __ sll(scratch, key, shift_size);
3534    __ Addu(scratch, elements, Operand(scratch));
3535    __ Addu(scratch, scratch,
3536            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3537  }
3538
3539  Label is_nan;
3540  // Check for NaN. All NaNs must be canonicalized.
3541  __ BranchF(NULL, &is_nan, eq, value, value);
3542  __ Branch(&not_nan);
3543
3544  // Only load canonical NaN if the comparison above set the overflow.
3545  __ bind(&is_nan);
3546  __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3547
3548  __ bind(&not_nan);
3549  __ sdc1(value, MemOperand(scratch));
3550}
3551
3552
3553void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3554    LStoreKeyedSpecializedArrayElement* instr) {
3555
3556  Register external_pointer = ToRegister(instr->external_pointer());
3557  Register key = no_reg;
3558  ElementsKind elements_kind = instr->elements_kind();
3559  bool key_is_constant = instr->key()->IsConstantOperand();
3560  int constant_key = 0;
3561  if (key_is_constant) {
3562    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3563    if (constant_key & 0xF0000000) {
3564      Abort("array index constant value too big.");
3565    }
3566  } else {
3567    key = ToRegister(instr->key());
3568  }
3569  int shift_size = ElementsKindToShiftSize(elements_kind);
3570
3571  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3572      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3573    FPURegister value(ToDoubleRegister(instr->value()));
3574    if (key_is_constant) {
3575      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
3576    } else {
3577      __ sll(scratch0(), key, shift_size);
3578      __ Addu(scratch0(), scratch0(), external_pointer);
3579    }
3580
3581    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3582      __ cvt_s_d(double_scratch0(), value);
3583      __ swc1(double_scratch0(), MemOperand(scratch0()));
3584    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3585      __ sdc1(value, MemOperand(scratch0()));
3586    }
3587  } else {
3588    Register value(ToRegister(instr->value()));
3589    MemOperand mem_operand(zero_reg);
3590    Register scratch = scratch0();
3591    if (key_is_constant) {
3592      mem_operand = MemOperand(external_pointer,
3593                               constant_key * (1 << shift_size));
3594    } else {
3595      __ sll(scratch, key, shift_size);
3596      __ Addu(scratch, scratch, external_pointer);
3597      mem_operand = MemOperand(scratch);
3598    }
3599    switch (elements_kind) {
3600      case EXTERNAL_PIXEL_ELEMENTS:
3601      case EXTERNAL_BYTE_ELEMENTS:
3602      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3603        __ sb(value, mem_operand);
3604        break;
3605      case EXTERNAL_SHORT_ELEMENTS:
3606      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3607        __ sh(value, mem_operand);
3608        break;
3609      case EXTERNAL_INT_ELEMENTS:
3610      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3611        __ sw(value, mem_operand);
3612        break;
3613      case EXTERNAL_FLOAT_ELEMENTS:
3614      case EXTERNAL_DOUBLE_ELEMENTS:
3615      case FAST_DOUBLE_ELEMENTS:
3616      case FAST_ELEMENTS:
3617      case FAST_SMI_ONLY_ELEMENTS:
3618      case DICTIONARY_ELEMENTS:
3619      case NON_STRICT_ARGUMENTS_ELEMENTS:
3620        UNREACHABLE();
3621        break;
3622    }
3623  }
3624}
3625
3626void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3627  ASSERT(ToRegister(instr->object()).is(a2));
3628  ASSERT(ToRegister(instr->key()).is(a1));
3629  ASSERT(ToRegister(instr->value()).is(a0));
3630
3631  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3632      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3633      : isolate()->builtins()->KeyedStoreIC_Initialize();
3634  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3635}
3636
3637
3638void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3639  Register object_reg = ToRegister(instr->object());
3640  Register new_map_reg = ToRegister(instr->new_map_reg());
3641  Register scratch = scratch0();
3642
3643  Handle<Map> from_map = instr->original_map();
3644  Handle<Map> to_map = instr->transitioned_map();
3645  ElementsKind from_kind = from_map->elements_kind();
3646  ElementsKind to_kind = to_map->elements_kind();
3647
3648  __ mov(ToRegister(instr->result()), object_reg);
3649
3650  Label not_applicable;
3651  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3652  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
3653
3654  __ li(new_map_reg, Operand(to_map));
3655  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3656    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3657    // Write barrier.
3658    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3659                        scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3660  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3661      to_kind == FAST_DOUBLE_ELEMENTS) {
3662    Register fixed_object_reg = ToRegister(instr->temp_reg());
3663    ASSERT(fixed_object_reg.is(a2));
3664    ASSERT(new_map_reg.is(a3));
3665    __ mov(fixed_object_reg, object_reg);
3666    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3667             RelocInfo::CODE_TARGET, instr);
3668  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3669    Register fixed_object_reg = ToRegister(instr->temp_reg());
3670    ASSERT(fixed_object_reg.is(a2));
3671    ASSERT(new_map_reg.is(a3));
3672    __ mov(fixed_object_reg, object_reg);
3673    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3674             RelocInfo::CODE_TARGET, instr);
3675  } else {
3676    UNREACHABLE();
3677  }
3678  __ bind(&not_applicable);
3679}
3680
3681
3682void LCodeGen::DoStringAdd(LStringAdd* instr) {
3683  __ push(ToRegister(instr->left()));
3684  __ push(ToRegister(instr->right()));
3685  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3686  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3687}
3688
3689
3690void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3691  class DeferredStringCharCodeAt: public LDeferredCode {
3692   public:
3693    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3694        : LDeferredCode(codegen), instr_(instr) { }
3695    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3696    virtual LInstruction* instr() { return instr_; }
3697   private:
3698    LStringCharCodeAt* instr_;
3699  };
3700
3701  DeferredStringCharCodeAt* deferred =
3702      new DeferredStringCharCodeAt(this, instr);
3703  StringCharLoadGenerator::Generate(masm(),
3704                                    ToRegister(instr->string()),
3705                                    ToRegister(instr->index()),
3706                                    ToRegister(instr->result()),
3707                                    deferred->entry());
3708  __ bind(deferred->exit());
3709}
3710
3711
3712void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3713  Register string = ToRegister(instr->string());
3714  Register result = ToRegister(instr->result());
3715  Register scratch = scratch0();
3716
3717  // TODO(3095996): Get rid of this. For now, we need to make the
3718  // result register contain a valid pointer because it is already
3719  // contained in the register pointer map.
3720  __ mov(result, zero_reg);
3721
3722  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3723  __ push(string);
3724  // Push the index as a smi. This is safe because of the checks in
3725  // DoStringCharCodeAt above.
3726  if (instr->index()->IsConstantOperand()) {
3727    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3728    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
3729    __ push(scratch);
3730  } else {
3731    Register index = ToRegister(instr->index());
3732    __ SmiTag(index);
3733    __ push(index);
3734  }
3735  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3736  if (FLAG_debug_code) {
3737    __ AbortIfNotSmi(v0);
3738  }
3739  __ SmiUntag(v0);
3740  __ StoreToSafepointRegisterSlot(v0, result);
3741}
3742
3743
3744void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3745  class DeferredStringCharFromCode: public LDeferredCode {
3746   public:
3747    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3748        : LDeferredCode(codegen), instr_(instr) { }
3749    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3750    virtual LInstruction* instr() { return instr_; }
3751   private:
3752    LStringCharFromCode* instr_;
3753  };
3754
3755  DeferredStringCharFromCode* deferred =
3756      new DeferredStringCharFromCode(this, instr);
3757
3758  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3759  Register char_code = ToRegister(instr->char_code());
3760  Register result = ToRegister(instr->result());
3761  Register scratch = scratch0();
3762  ASSERT(!char_code.is(result));
3763
3764  __ Branch(deferred->entry(), hi,
3765            char_code, Operand(String::kMaxAsciiCharCode));
3766  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3767  __ sll(scratch, char_code, kPointerSizeLog2);
3768  __ Addu(result, result, scratch);
3769  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3770  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3771  __ Branch(deferred->entry(), eq, result, Operand(scratch));
3772  __ bind(deferred->exit());
3773}
3774
3775
3776void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3777  Register char_code = ToRegister(instr->char_code());
3778  Register result = ToRegister(instr->result());
3779
3780  // TODO(3095996): Get rid of this. For now, we need to make the
3781  // result register contain a valid pointer because it is already
3782  // contained in the register pointer map.
3783  __ mov(result, zero_reg);
3784
3785  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3786  __ SmiTag(char_code);
3787  __ push(char_code);
3788  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3789  __ StoreToSafepointRegisterSlot(v0, result);
3790}
3791
3792
3793void LCodeGen::DoStringLength(LStringLength* instr) {
3794  Register string = ToRegister(instr->InputAt(0));
3795  Register result = ToRegister(instr->result());
3796  __ lw(result, FieldMemOperand(string, String::kLengthOffset));
3797}
3798
3799
3800void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3801  LOperand* input = instr->InputAt(0);
3802  ASSERT(input->IsRegister() || input->IsStackSlot());
3803  LOperand* output = instr->result();
3804  ASSERT(output->IsDoubleRegister());
3805  FPURegister single_scratch = double_scratch0().low();
3806  if (input->IsStackSlot()) {
3807    Register scratch = scratch0();
3808    __ lw(scratch, ToMemOperand(input));
3809    __ mtc1(scratch, single_scratch);
3810  } else {
3811    __ mtc1(ToRegister(input), single_scratch);
3812  }
3813  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
3814}
3815
3816
3817void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3818  class DeferredNumberTagI: public LDeferredCode {
3819   public:
3820    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3821        : LDeferredCode(codegen), instr_(instr) { }
3822    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3823    virtual LInstruction* instr() { return instr_; }
3824   private:
3825    LNumberTagI* instr_;
3826  };
3827
3828  Register src = ToRegister(instr->InputAt(0));
3829  Register dst = ToRegister(instr->result());
3830  Register overflow = scratch0();
3831
3832  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3833  __ SmiTagCheckOverflow(dst, src, overflow);
3834  __ BranchOnOverflow(deferred->entry(), overflow);
3835  __ bind(deferred->exit());
3836}
3837
3838
3839void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3840  Label slow;
3841  Register src = ToRegister(instr->InputAt(0));
3842  Register dst = ToRegister(instr->result());
3843  FPURegister dbl_scratch = double_scratch0();
3844
3845  // Preserve the value of all registers.
3846  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3847
3848  // There was overflow, so bits 30 and 31 of the original integer
3849  // disagree. Try to allocate a heap number in new space and store
3850  // the value in there. If that fails, call the runtime system.
3851  Label done;
3852  if (dst.is(src)) {
3853    __ SmiUntag(src, dst);
3854    __ Xor(src, src, Operand(0x80000000));
3855  }
3856  __ mtc1(src, dbl_scratch);
3857  __ cvt_d_w(dbl_scratch, dbl_scratch);
3858  if (FLAG_inline_new) {
3859    __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
3860    __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
3861    __ Move(dst, t1);
3862    __ Branch(&done);
3863  }
3864
3865  // Slow case: Call the runtime system to do the number allocation.
3866  __ bind(&slow);
3867
3868  // TODO(3095996): Put a valid pointer value in the stack slot where the result
3869  // register is stored, as this register is in the pointer map, but contains an
3870  // integer value.
3871  __ StoreToSafepointRegisterSlot(zero_reg, dst);
3872  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3873  __ Move(dst, v0);
3874
3875  // Done. Put the value in dbl_scratch into the value of the allocated heap
3876  // number.
3877  __ bind(&done);
3878  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
3879  __ StoreToSafepointRegisterSlot(dst, dst);
3880}
3881
3882
3883void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3884  class DeferredNumberTagD: public LDeferredCode {
3885   public:
3886    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3887        : LDeferredCode(codegen), instr_(instr) { }
3888    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3889    virtual LInstruction* instr() { return instr_; }
3890   private:
3891    LNumberTagD* instr_;
3892  };
3893
3894  DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3895  Register scratch = scratch0();
3896  Register reg = ToRegister(instr->result());
3897  Register temp1 = ToRegister(instr->TempAt(0));
3898  Register temp2 = ToRegister(instr->TempAt(1));
3899
3900  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3901  if (FLAG_inline_new) {
3902    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3903    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
3904  } else {
3905    __ Branch(deferred->entry());
3906  }
3907  __ bind(deferred->exit());
3908  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
3909}
3910
3911
3912void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3913  // TODO(3095996): Get rid of this. For now, we need to make the
3914  // result register contain a valid pointer because it is already
3915  // contained in the register pointer map.
3916  Register reg = ToRegister(instr->result());
3917  __ mov(reg, zero_reg);
3918
3919  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3920  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3921  __ StoreToSafepointRegisterSlot(v0, reg);
3922}
3923
3924
3925void LCodeGen::DoSmiTag(LSmiTag* instr) {
3926  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3927  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
3928}
3929
3930
3931void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3932  Register scratch = scratch0();
3933  Register input = ToRegister(instr->InputAt(0));
3934  Register result = ToRegister(instr->result());
3935  if (instr->needs_check()) {
3936    STATIC_ASSERT(kHeapObjectTag == 1);
3937    // If the input is a HeapObject, value of scratch won't be zero.
3938    __ And(scratch, input, Operand(kHeapObjectTag));
3939    __ SmiUntag(result, input);
3940    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3941  } else {
3942    __ SmiUntag(result, input);
3943  }
3944}
3945
3946
3947void LCodeGen::EmitNumberUntagD(Register input_reg,
3948                                DoubleRegister result_reg,
3949                                bool deoptimize_on_undefined,
3950                                bool deoptimize_on_minus_zero,
3951                                LEnvironment* env) {
3952  Register scratch = scratch0();
3953
3954  Label load_smi, heap_number, done;
3955
3956  // Smi check.
3957  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
3958
3959  // Heap number map check.
3960  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3961  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3962  if (deoptimize_on_undefined) {
3963    DeoptimizeIf(ne, env, scratch, Operand(at));
3964  } else {
3965    Label heap_number;
3966    __ Branch(&heap_number, eq, scratch, Operand(at));
3967
3968    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3969    DeoptimizeIf(ne, env, input_reg, Operand(at));
3970
3971    // Convert undefined to NaN.
3972    __ LoadRoot(at, Heap::kNanValueRootIndex);
3973    __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
3974    __ Branch(&done);
3975
3976    __ bind(&heap_number);
3977  }
3978  // Heap number to double register conversion.
3979  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
3980  if (deoptimize_on_minus_zero) {
3981    __ mfc1(at, result_reg.low());
3982    __ Branch(&done, ne, at, Operand(zero_reg));
3983    __ mfc1(scratch, result_reg.high());
3984    DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
3985  }
3986  __ Branch(&done);
3987
3988  // Smi to double register conversion
3989  __ bind(&load_smi);
3990  // scratch: untagged value of input_reg
3991  __ mtc1(scratch, result_reg);
3992  __ cvt_d_w(result_reg, result_reg);
3993  __ bind(&done);
3994}
3995
3996
3997void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3998  Register input_reg = ToRegister(instr->InputAt(0));
3999  Register scratch1 = scratch0();
4000  Register scratch2 = ToRegister(instr->TempAt(0));
4001  DoubleRegister double_scratch = double_scratch0();
4002  FPURegister single_scratch = double_scratch.low();
4003
4004  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4005  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4006
4007  Label done;
4008
4009  // The input is a tagged HeapObject.
4010  // Heap number map check.
4011  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4012  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4013  // This 'at' value and scratch1 map value are used for tests in both clauses
4014  // of the if.
4015
4016  if (instr->truncating()) {
4017    Register scratch3 = ToRegister(instr->TempAt(1));
4018    DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4019    ASSERT(!scratch3.is(input_reg) &&
4020           !scratch3.is(scratch1) &&
4021           !scratch3.is(scratch2));
4022    // Performs a truncating conversion of a floating point number as used by
4023    // the JS bitwise operations.
4024    Label heap_number;
4025    __ Branch(&heap_number, eq, scratch1, Operand(at));  // HeapNumber map?
4026    // Check for undefined. Undefined is converted to zero for truncating
4027    // conversions.
4028    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4029    DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
4030    ASSERT(ToRegister(instr->result()).is(input_reg));
4031    __ mov(input_reg, zero_reg);
4032    __ Branch(&done);
4033
4034    __ bind(&heap_number);
4035    __ ldc1(double_scratch2,
4036            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4037    __ EmitECMATruncate(input_reg,
4038                        double_scratch2,
4039                        single_scratch,
4040                        scratch1,
4041                        scratch2,
4042                        scratch3);
4043  } else {
4044    // Deoptimize if we don't have a heap number.
4045    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4046
4047    // Load the double value.
4048    __ ldc1(double_scratch,
4049            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4050
4051    Register except_flag = scratch2;
4052    __ EmitFPUTruncate(kRoundToZero,
4053                       single_scratch,
4054                       double_scratch,
4055                       scratch1,
4056                       except_flag,
4057                       kCheckForInexactConversion);
4058
4059    // Deopt if the operation did not succeed.
4060    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4061
4062    // Load the result.
4063    __ mfc1(input_reg, single_scratch);
4064
4065    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4066      __ Branch(&done, ne, input_reg, Operand(zero_reg));
4067
4068      __ mfc1(scratch1, double_scratch.high());
4069      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4070      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4071    }
4072  }
4073  __ bind(&done);
4074}
4075
4076
4077void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4078  class DeferredTaggedToI: public LDeferredCode {
4079   public:
4080    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4081        : LDeferredCode(codegen), instr_(instr) { }
4082    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4083    virtual LInstruction* instr() { return instr_; }
4084   private:
4085    LTaggedToI* instr_;
4086  };
4087
4088  LOperand* input = instr->InputAt(0);
4089  ASSERT(input->IsRegister());
4090  ASSERT(input->Equals(instr->result()));
4091
4092  Register input_reg = ToRegister(input);
4093
4094  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
4095
4096  // Let the deferred code handle the HeapObject case.
4097  __ JumpIfNotSmi(input_reg, deferred->entry());
4098
4099  // Smi to int32 conversion.
4100  __ SmiUntag(input_reg);
4101  __ bind(deferred->exit());
4102}
4103
4104
4105void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4106  LOperand* input = instr->InputAt(0);
4107  ASSERT(input->IsRegister());
4108  LOperand* result = instr->result();
4109  ASSERT(result->IsDoubleRegister());
4110
4111  Register input_reg = ToRegister(input);
4112  DoubleRegister result_reg = ToDoubleRegister(result);
4113
4114  EmitNumberUntagD(input_reg, result_reg,
4115                   instr->hydrogen()->deoptimize_on_undefined(),
4116                   instr->hydrogen()->deoptimize_on_minus_zero(),
4117                   instr->environment());
4118}
4119
4120
4121void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4122  Register result_reg = ToRegister(instr->result());
4123  Register scratch1 = scratch0();
4124  Register scratch2 = ToRegister(instr->TempAt(0));
4125  DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
4126  FPURegister single_scratch = double_scratch0().low();
4127
4128  if (instr->truncating()) {
4129    Register scratch3 = ToRegister(instr->TempAt(1));
4130    __ EmitECMATruncate(result_reg,
4131                        double_input,
4132                        single_scratch,
4133                        scratch1,
4134                        scratch2,
4135                        scratch3);
4136  } else {
4137    Register except_flag = scratch2;
4138
4139    __ EmitFPUTruncate(kRoundToMinusInf,
4140                       single_scratch,
4141                       double_input,
4142                       scratch1,
4143                       except_flag,
4144                       kCheckForInexactConversion);
4145
4146    // Deopt if the operation did not succeed (except_flag != 0).
4147    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4148
4149    // Load the result.
4150    __ mfc1(result_reg, single_scratch);
4151  }
4152}
4153
4154
4155void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4156  LOperand* input = instr->InputAt(0);
4157  __ And(at, ToRegister(input), Operand(kSmiTagMask));
4158  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4159}
4160
4161
4162void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4163  LOperand* input = instr->InputAt(0);
4164  __ And(at, ToRegister(input), Operand(kSmiTagMask));
4165  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4166}
4167
4168
4169void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4170  Register input = ToRegister(instr->InputAt(0));
4171  Register scratch = scratch0();
4172
4173  __ GetObjectType(input, scratch, scratch);
4174
4175  if (instr->hydrogen()->is_interval_check()) {
4176    InstanceType first;
4177    InstanceType last;
4178    instr->hydrogen()->GetCheckInterval(&first, &last);
4179
4180    // If there is only one type in the interval check for equality.
4181    if (first == last) {
4182      DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
4183    } else {
4184      DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
4185      // Omit check for the last type.
4186      if (last != LAST_TYPE) {
4187        DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
4188      }
4189    }
4190  } else {
4191    uint8_t mask;
4192    uint8_t tag;
4193    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4194
4195    if (IsPowerOf2(mask)) {
4196      ASSERT(tag == 0 || IsPowerOf2(tag));
4197      __ And(at, scratch, mask);
4198      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
4199          at, Operand(zero_reg));
4200    } else {
4201      __ And(scratch, scratch, Operand(mask));
4202      DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
4203    }
4204  }
4205}
4206
4207
4208void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4209  Register reg = ToRegister(instr->value());
4210  Handle<JSFunction> target = instr->hydrogen()->target();
4211  if (isolate()->heap()->InNewSpace(*target)) {
4212    Register reg = ToRegister(instr->value());
4213    Handle<JSGlobalPropertyCell> cell =
4214        isolate()->factory()->NewJSGlobalPropertyCell(target);
4215    __ li(at, Operand(Handle<Object>(cell)));
4216    __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
4217    DeoptimizeIf(ne, instr->environment(), reg,
4218                 Operand(at));
4219  } else {
4220    DeoptimizeIf(ne, instr->environment(), reg,
4221                 Operand(target));
4222  }
4223}
4224
4225
4226void LCodeGen::DoCheckMapCommon(Register reg,
4227                                Register scratch,
4228                                Handle<Map> map,
4229                                CompareMapMode mode,
4230                                LEnvironment* env) {
4231  Label success;
4232  __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
4233  DeoptimizeIf(al, env);
4234  __ bind(&success);
4235}
4236
4237
4238void LCodeGen::DoCheckMap(LCheckMap* instr) {
4239  Register scratch = scratch0();
4240  LOperand* input = instr->InputAt(0);
4241  ASSERT(input->IsRegister());
4242  Register reg = ToRegister(input);
4243  Handle<Map> map = instr->hydrogen()->map();
4244  DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
4245                   instr->environment());
4246}
4247
4248
4249void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4250  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4251  Register result_reg = ToRegister(instr->result());
4252  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4253  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4254}
4255
4256
4257void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4258  Register unclamped_reg = ToRegister(instr->unclamped());
4259  Register result_reg = ToRegister(instr->result());
4260  __ ClampUint8(result_reg, unclamped_reg);
4261}
4262
4263
4264void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4265  Register scratch = scratch0();
4266  Register input_reg = ToRegister(instr->unclamped());
4267  Register result_reg = ToRegister(instr->result());
4268  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4269  Label is_smi, done, heap_number;
4270
4271  // Both smi and heap number cases are handled.
4272  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4273
4274  // Check for heap number
4275  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4276  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4277
4278  // Check for undefined. Undefined is converted to zero for clamping
4279  // conversions.
4280  DeoptimizeIf(ne, instr->environment(), input_reg,
4281               Operand(factory()->undefined_value()));
4282  __ mov(result_reg, zero_reg);
4283  __ jmp(&done);
4284
4285  // Heap number
4286  __ bind(&heap_number);
4287  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4288                                             HeapNumber::kValueOffset));
4289  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4290  __ jmp(&done);
4291
4292  __ bind(&is_smi);
4293  __ ClampUint8(result_reg, scratch);
4294
4295  __ bind(&done);
4296}
4297
4298
4299void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4300  Register temp1 = ToRegister(instr->TempAt(0));
4301  Register temp2 = ToRegister(instr->TempAt(1));
4302
4303  Handle<JSObject> holder = instr->holder();
4304  Handle<JSObject> current_prototype = instr->prototype();
4305
4306  // Load prototype object.
4307  __ LoadHeapObject(temp1, current_prototype);
4308
4309  // Check prototype maps up to the holder.
4310  while (!current_prototype.is_identical_to(holder)) {
4311    DoCheckMapCommon(temp1, temp2,
4312                     Handle<Map>(current_prototype->map()),
4313                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4314    current_prototype =
4315        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4316    // Load next prototype object.
4317    __ LoadHeapObject(temp1, current_prototype);
4318  }
4319
4320  // Check the holder map.
4321  DoCheckMapCommon(temp1, temp2,
4322                   Handle<Map>(current_prototype->map()),
4323                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4324}
4325
4326
4327void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4328  class DeferredAllocateObject: public LDeferredCode {
4329   public:
4330    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4331        : LDeferredCode(codegen), instr_(instr) { }
4332    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4333    virtual LInstruction* instr() { return instr_; }
4334   private:
4335    LAllocateObject* instr_;
4336  };
4337
4338  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
4339
4340  Register result = ToRegister(instr->result());
4341  Register scratch = ToRegister(instr->TempAt(0));
4342  Register scratch2 = ToRegister(instr->TempAt(1));
4343  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4344  Handle<Map> initial_map(constructor->initial_map());
4345  int instance_size = initial_map->instance_size();
4346  ASSERT(initial_map->pre_allocated_property_fields() +
4347         initial_map->unused_property_fields() -
4348         initial_map->inobject_properties() == 0);
4349
4350  // Allocate memory for the object.  The initial map might change when
4351  // the constructor's prototype changes, but instance size and property
4352  // counts remain unchanged (if slack tracking finished).
4353  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4354  __ AllocateInNewSpace(instance_size,
4355                        result,
4356                        scratch,
4357                        scratch2,
4358                        deferred->entry(),
4359                        TAG_OBJECT);
4360
4361  // Load the initial map.
4362  Register map = scratch;
4363  __ LoadHeapObject(map, constructor);
4364  __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
4365
4366  // Initialize map and fields of the newly allocated object.
4367  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4368  __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
4369  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4370  __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4371  __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4372  if (initial_map->inobject_properties() != 0) {
4373    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4374    for (int i = 0; i < initial_map->inobject_properties(); i++) {
4375      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4376      __ sw(scratch, FieldMemOperand(result, property_offset));
4377    }
4378  }
4379
4380  __ bind(deferred->exit());
4381}
4382
4383
4384void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4385  Register result = ToRegister(instr->result());
4386  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4387
4388  // TODO(3095996): Get rid of this. For now, we need to make the
4389  // result register contain a valid pointer because it is already
4390  // contained in the register pointer map.
4391  __ mov(result, zero_reg);
4392
4393  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4394  __ LoadHeapObject(a0, constructor);
4395  __ push(a0);
4396  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
4397  __ StoreToSafepointRegisterSlot(v0, result);
4398}
4399
4400
4401void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4402  Heap* heap = isolate()->heap();
4403  ElementsKind boilerplate_elements_kind =
4404      instr->hydrogen()->boilerplate_elements_kind();
4405
4406  // Deopt if the array literal boilerplate ElementsKind is of a type different
4407  // than the expected one. The check isn't necessary if the boilerplate has
4408  // already been converted to FAST_ELEMENTS.
4409  if (boilerplate_elements_kind != FAST_ELEMENTS) {
4410    __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
4411    // Load map into a2.
4412    __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
4413    // Load the map's "bit field 2".
4414    __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
4415    // Retrieve elements_kind from bit field 2.
4416    __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
4417    DeoptimizeIf(ne,
4418                 instr->environment(),
4419                 a2,
4420                 Operand(boilerplate_elements_kind));
4421  }
4422  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4423  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4424  __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4425  // Boilerplate already exists, constant elements are never accessed.
4426  // Pass an empty fixed array.
4427  __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
4428  __ Push(a3, a2, a1);
4429
4430  // Pick the right runtime function or stub to call.
4431  int length = instr->hydrogen()->length();
4432  if (instr->hydrogen()->IsCopyOnWrite()) {
4433    ASSERT(instr->hydrogen()->depth() == 1);
4434    FastCloneShallowArrayStub::Mode mode =
4435        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4436    FastCloneShallowArrayStub stub(mode, length);
4437    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4438  } else if (instr->hydrogen()->depth() > 1) {
4439    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4440  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4441    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4442  } else {
4443    FastCloneShallowArrayStub::Mode mode =
4444        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4445            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4446            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4447    FastCloneShallowArrayStub stub(mode, length);
4448    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4449  }
4450}
4451
4452
4453void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4454                            Register result,
4455                            Register source,
4456                            int* offset) {
4457  ASSERT(!source.is(a2));
4458  ASSERT(!result.is(a2));
4459
4460  // Only elements backing stores for non-COW arrays need to be copied.
4461  Handle<FixedArrayBase> elements(object->elements());
4462  bool has_elements = elements->length() > 0 &&
4463      elements->map() != isolate()->heap()->fixed_cow_array_map();
4464
4465  // Increase the offset so that subsequent objects end up right after
4466  // this object and its backing store.
4467  int object_offset = *offset;
4468  int object_size = object->map()->instance_size();
4469  int elements_offset = *offset + object_size;
4470  int elements_size = has_elements ? elements->Size() : 0;
4471  *offset += object_size + elements_size;
4472
4473  // Copy object header.
4474  ASSERT(object->properties()->length() == 0);
4475  int inobject_properties = object->map()->inobject_properties();
4476  int header_size = object_size - inobject_properties * kPointerSize;
4477  for (int i = 0; i < header_size; i += kPointerSize) {
4478    if (has_elements && i == JSObject::kElementsOffset) {
4479      __ Addu(a2, result, Operand(elements_offset));
4480    } else {
4481      __ lw(a2, FieldMemOperand(source, i));
4482    }
4483    __ sw(a2, FieldMemOperand(result, object_offset + i));
4484  }
4485
4486  // Copy in-object properties.
4487  for (int i = 0; i < inobject_properties; i++) {
4488    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4489    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4490    if (value->IsJSObject()) {
4491      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4492      __ Addu(a2, result, Operand(*offset));
4493      __ sw(a2, FieldMemOperand(result, total_offset));
4494      __ LoadHeapObject(source, value_object);
4495      EmitDeepCopy(value_object, result, source, offset);
4496    } else if (value->IsHeapObject()) {
4497      __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4498      __ sw(a2, FieldMemOperand(result, total_offset));
4499    } else {
4500      __ li(a2, Operand(value));
4501      __ sw(a2, FieldMemOperand(result, total_offset));
4502    }
4503  }
4504
4505
4506  if (has_elements) {
4507    // Copy elements backing store header.
4508    __ LoadHeapObject(source, elements);
4509    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4510      __ lw(a2, FieldMemOperand(source, i));
4511      __ sw(a2, FieldMemOperand(result, elements_offset + i));
4512    }
4513
4514    // Copy elements backing store content.
4515    int elements_length = has_elements ? elements->length() : 0;
4516    if (elements->IsFixedDoubleArray()) {
4517      Handle<FixedDoubleArray> double_array =
4518          Handle<FixedDoubleArray>::cast(elements);
4519      for (int i = 0; i < elements_length; i++) {
4520        int64_t value = double_array->get_representation(i);
4521        // We only support little endian mode...
4522        int32_t value_low = value & 0xFFFFFFFF;
4523        int32_t value_high = value >> 32;
4524        int total_offset =
4525            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4526        __ li(a2, Operand(value_low));
4527        __ sw(a2, FieldMemOperand(result, total_offset));
4528        __ li(a2, Operand(value_high));
4529        __ sw(a2, FieldMemOperand(result, total_offset + 4));
4530      }
4531    } else if (elements->IsFixedArray()) {
4532      for (int i = 0; i < elements_length; i++) {
4533        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4534        Handle<Object> value = JSObject::GetElement(object, i);
4535        if (value->IsJSObject()) {
4536          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4537          __ Addu(a2, result, Operand(*offset));
4538          __ sw(a2, FieldMemOperand(result, total_offset));
4539          __ LoadHeapObject(source, value_object);
4540          EmitDeepCopy(value_object, result, source, offset);
4541        } else if (value->IsHeapObject()) {
4542          __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4543          __ sw(a2, FieldMemOperand(result, total_offset));
4544        } else {
4545          __ li(a2, Operand(value));
4546          __ sw(a2, FieldMemOperand(result, total_offset));
4547        }
4548      }
4549    } else {
4550      UNREACHABLE();
4551    }
4552  }
4553}
4554
4555
4556void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4557  int size = instr->hydrogen()->total_size();
4558
4559  // Allocate all objects that are part of the literal in one big
4560  // allocation. This avoids multiple limit checks.
4561  Label allocated, runtime_allocate;
4562  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4563  __ jmp(&allocated);
4564
4565  __ bind(&runtime_allocate);
4566  __ li(a0, Operand(Smi::FromInt(size)));
4567  __ push(a0);
4568  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4569
4570  __ bind(&allocated);
4571  int offset = 0;
4572  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4573  EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
4574  ASSERT_EQ(size, offset);
4575}
4576
4577
4578void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4579  ASSERT(ToRegister(instr->result()).is(v0));
4580  Handle<FixedArray> literals(instr->environment()->closure()->literals());
4581  Handle<FixedArray> constant_properties =
4582      instr->hydrogen()->constant_properties();
4583
4584  // Set up the parameters to the stub/runtime call.
4585  __ LoadHeapObject(t0, literals);
4586  __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4587  __ li(a2, Operand(constant_properties));
4588  int flags = instr->hydrogen()->fast_elements()
4589      ? ObjectLiteral::kFastElements
4590      : ObjectLiteral::kNoFlags;
4591  __ li(a1, Operand(Smi::FromInt(flags)));
4592  __ Push(t0, a3, a2, a1);
4593
4594  // Pick the right runtime function or stub to call.
4595  int properties_count = constant_properties->length() / 2;
4596  if (instr->hydrogen()->depth() > 1) {
4597    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4598  } else if (flags != ObjectLiteral::kFastElements ||
4599      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
4600    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4601  } else {
4602    FastCloneShallowObjectStub stub(properties_count);
4603    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4604  }
4605}
4606
4607
4608void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4609  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
4610  ASSERT(ToRegister(instr->result()).is(v0));
4611  __ push(a0);
4612  CallRuntime(Runtime::kToFastProperties, 1, instr);
4613}
4614
4615
4616void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4617  Label materialized;
4618  // Registers will be used as follows:
4619  // a3 = JS function.
4620  // t3 = literals array.
4621  // a1 = regexp literal.
4622  // a0 = regexp literal clone.
4623  // a2 and t0-t2 are used as temporaries.
4624  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4625  __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4626  int literal_offset = FixedArray::kHeaderSize +
4627      instr->hydrogen()->literal_index() * kPointerSize;
4628  __ lw(a1, FieldMemOperand(t3, literal_offset));
4629  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4630  __ Branch(&materialized, ne, a1, Operand(at));
4631
4632  // Create regexp literal using runtime function
4633  // Result will be in v0.
4634  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4635  __ li(t1, Operand(instr->hydrogen()->pattern()));
4636  __ li(t0, Operand(instr->hydrogen()->flags()));
4637  __ Push(t3, t2, t1, t0);
4638  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4639  __ mov(a1, v0);
4640
4641  __ bind(&materialized);
4642  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4643  Label allocated, runtime_allocate;
4644
4645  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4646  __ jmp(&allocated);
4647
4648  __ bind(&runtime_allocate);
4649  __ li(a0, Operand(Smi::FromInt(size)));
4650  __ Push(a1, a0);
4651  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4652  __ pop(a1);
4653
4654  __ bind(&allocated);
4655  // Copy the content into the newly allocated memory.
4656  // (Unroll copy loop once for better throughput).
4657  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4658    __ lw(a3, FieldMemOperand(a1, i));
4659    __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
4660    __ sw(a3, FieldMemOperand(v0, i));
4661    __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
4662  }
4663  if ((size % (2 * kPointerSize)) != 0) {
4664    __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
4665    __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
4666  }
4667}
4668
4669
4670void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4671  // Use the fast case closure allocation code that allocates in new
4672  // space for nested functions that don't need literals cloning.
4673  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4674  bool pretenure = instr->hydrogen()->pretenure();
4675  if (!pretenure && shared_info->num_literals() == 0) {
4676    FastNewClosureStub stub(shared_info->language_mode());
4677    __ li(a1, Operand(shared_info));
4678    __ push(a1);
4679    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4680  } else {
4681    __ li(a2, Operand(shared_info));
4682    __ li(a1, Operand(pretenure
4683                       ? factory()->true_value()
4684                       : factory()->false_value()));
4685    __ Push(cp, a2, a1);
4686    CallRuntime(Runtime::kNewClosure, 3, instr);
4687  }
4688}
4689
4690
4691void LCodeGen::DoTypeof(LTypeof* instr) {
4692  ASSERT(ToRegister(instr->result()).is(v0));
4693  Register input = ToRegister(instr->InputAt(0));
4694  __ push(input);
4695  CallRuntime(Runtime::kTypeof, 1, instr);
4696}
4697
4698
4699void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4700  Register input = ToRegister(instr->InputAt(0));
4701  int true_block = chunk_->LookupDestination(instr->true_block_id());
4702  int false_block = chunk_->LookupDestination(instr->false_block_id());
4703  Label* true_label = chunk_->GetAssemblyLabel(true_block);
4704  Label* false_label = chunk_->GetAssemblyLabel(false_block);
4705
4706  Register cmp1 = no_reg;
4707  Operand cmp2 = Operand(no_reg);
4708
4709  Condition final_branch_condition = EmitTypeofIs(true_label,
4710                                                  false_label,
4711                                                  input,
4712                                                  instr->type_literal(),
4713                                                  cmp1,
4714                                                  cmp2);
4715
4716  ASSERT(cmp1.is_valid());
4717  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
4718
4719  if (final_branch_condition != kNoCondition) {
4720    EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
4721  }
4722}
4723
4724
4725Condition LCodeGen::EmitTypeofIs(Label* true_label,
4726                                 Label* false_label,
4727                                 Register input,
4728                                 Handle<String> type_name,
4729                                 Register& cmp1,
4730                                 Operand& cmp2) {
4731  // This function utilizes the delay slot heavily. This is used to load
4732  // values that are always usable without depending on the type of the input
4733  // register.
4734  Condition final_branch_condition = kNoCondition;
4735  Register scratch = scratch0();
4736  if (type_name->Equals(heap()->number_symbol())) {
4737    __ JumpIfSmi(input, true_label);
4738    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4739    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4740    cmp1 = input;
4741    cmp2 = Operand(at);
4742    final_branch_condition = eq;
4743
4744  } else if (type_name->Equals(heap()->string_symbol())) {
4745    __ JumpIfSmi(input, false_label);
4746    __ GetObjectType(input, input, scratch);
4747    __ Branch(USE_DELAY_SLOT, false_label,
4748              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
4749    // input is an object so we can load the BitFieldOffset even if we take the
4750    // other branch.
4751    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4752    __ And(at, at, 1 << Map::kIsUndetectable);
4753    cmp1 = at;
4754    cmp2 = Operand(zero_reg);
4755    final_branch_condition = eq;
4756
4757  } else if (type_name->Equals(heap()->boolean_symbol())) {
4758    __ LoadRoot(at, Heap::kTrueValueRootIndex);
4759    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4760    __ LoadRoot(at, Heap::kFalseValueRootIndex);
4761    cmp1 = at;
4762    cmp2 = Operand(input);
4763    final_branch_condition = eq;
4764
4765  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4766    __ LoadRoot(at, Heap::kNullValueRootIndex);
4767    cmp1 = at;
4768    cmp2 = Operand(input);
4769    final_branch_condition = eq;
4770
4771  } else if (type_name->Equals(heap()->undefined_symbol())) {
4772    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4773    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4774    // The first instruction of JumpIfSmi is an And - it is safe in the delay
4775    // slot.
4776    __ JumpIfSmi(input, false_label);
4777    // Check for undetectable objects => true.
4778    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4779    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4780    __ And(at, at, 1 << Map::kIsUndetectable);
4781    cmp1 = at;
4782    cmp2 = Operand(zero_reg);
4783    final_branch_condition = ne;
4784
4785  } else if (type_name->Equals(heap()->function_symbol())) {
4786    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
4787    __ JumpIfSmi(input, false_label);
4788    __ GetObjectType(input, scratch, input);
4789    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
4790    cmp1 = input;
4791    cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
4792    final_branch_condition = eq;
4793
4794  } else if (type_name->Equals(heap()->object_symbol())) {
4795    __ JumpIfSmi(input, false_label);
4796    if (!FLAG_harmony_typeof) {
4797      __ LoadRoot(at, Heap::kNullValueRootIndex);
4798      __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4799    }
4800    // input is an object, it is safe to use GetObjectType in the delay slot.
4801    __ GetObjectType(input, input, scratch);
4802    __ Branch(USE_DELAY_SLOT, false_label,
4803              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4804    // Still an object, so the InstanceType can be loaded.
4805    __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
4806    __ Branch(USE_DELAY_SLOT, false_label,
4807              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4808    // Still an object, so the BitField can be loaded.
4809    // Check for undetectable objects => false.
4810    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4811    __ And(at, at, 1 << Map::kIsUndetectable);
4812    cmp1 = at;
4813    cmp2 = Operand(zero_reg);
4814    final_branch_condition = eq;
4815
4816  } else {
4817    cmp1 = at;
4818    cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
4819    __ Branch(false_label);
4820  }
4821
4822  return final_branch_condition;
4823}
4824
4825
4826void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4827  Register temp1 = ToRegister(instr->TempAt(0));
4828  int true_block = chunk_->LookupDestination(instr->true_block_id());
4829  int false_block = chunk_->LookupDestination(instr->false_block_id());
4830
4831  EmitIsConstructCall(temp1, scratch0());
4832
4833  EmitBranch(true_block, false_block, eq, temp1,
4834             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4835}
4836
4837
4838void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4839  ASSERT(!temp1.is(temp2));
4840  // Get the frame pointer for the calling frame.
4841  __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4842
4843  // Skip the arguments adaptor frame if it exists.
4844  Label check_frame_marker;
4845  __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
4846  __ Branch(&check_frame_marker, ne, temp2,
4847            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4848  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
4849
4850  // Check the marker in the calling frame.
4851  __ bind(&check_frame_marker);
4852  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
4853}
4854
4855
4856void LCodeGen::EnsureSpaceForLazyDeopt() {
4857  // Ensure that we have enough space after the previous lazy-bailout
4858  // instruction for patching the code here.
4859  int current_pc = masm()->pc_offset();
4860  int patch_size = Deoptimizer::patch_size();
4861  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
4862    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
4863    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
4864    while (padding_size > 0) {
4865      __ nop();
4866      padding_size -= Assembler::kInstrSize;
4867    }
4868  }
4869  last_lazy_deopt_pc_ = masm()->pc_offset();
4870}
4871
4872
4873void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4874  EnsureSpaceForLazyDeopt();
4875  ASSERT(instr->HasEnvironment());
4876  LEnvironment* env = instr->environment();
4877  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4878  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4879}
4880
4881
4882void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4883  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
4884}
4885
4886
4887void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4888  Register object = ToRegister(instr->object());
4889  Register key = ToRegister(instr->key());
4890  Register strict = scratch0();
4891  __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
4892  __ Push(object, key, strict);
4893  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4894  LPointerMap* pointers = instr->pointer_map();
4895  RecordPosition(pointers->position());
4896  SafepointGenerator safepoint_generator(
4897      this, pointers, Safepoint::kLazyDeopt);
4898  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4899}
4900
4901
4902void LCodeGen::DoIn(LIn* instr) {
4903  Register obj = ToRegister(instr->object());
4904  Register key = ToRegister(instr->key());
4905  __ Push(key, obj);
4906  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4907  LPointerMap* pointers = instr->pointer_map();
4908  RecordPosition(pointers->position());
4909  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
4910  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4911}
4912
4913
4914void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4915  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4916  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4917  RecordSafepointWithLazyDeopt(
4918      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4919  ASSERT(instr->HasEnvironment());
4920  LEnvironment* env = instr->environment();
4921  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4922}
4923
4924
4925void LCodeGen::DoStackCheck(LStackCheck* instr) {
4926  class DeferredStackCheck: public LDeferredCode {
4927   public:
4928    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4929        : LDeferredCode(codegen), instr_(instr) { }
4930    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4931    virtual LInstruction* instr() { return instr_; }
4932   private:
4933    LStackCheck* instr_;
4934  };
4935
4936  ASSERT(instr->HasEnvironment());
4937  LEnvironment* env = instr->environment();
4938  // There is no LLazyBailout instruction for stack-checks. We have to
4939  // prepare for lazy deoptimization explicitly here.
4940  if (instr->hydrogen()->is_function_entry()) {
4941    // Perform stack overflow check.
4942    Label done;
4943    __ LoadRoot(at, Heap::kStackLimitRootIndex);
4944    __ Branch(&done, hs, sp, Operand(at));
4945    StackCheckStub stub;
4946    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4947    EnsureSpaceForLazyDeopt();
4948    __ bind(&done);
4949    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4950    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4951  } else {
4952    ASSERT(instr->hydrogen()->is_backwards_branch());
4953    // Perform stack overflow check if this goto needs it before jumping.
4954    DeferredStackCheck* deferred_stack_check =
4955        new DeferredStackCheck(this, instr);
4956    __ LoadRoot(at, Heap::kStackLimitRootIndex);
4957    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
4958    EnsureSpaceForLazyDeopt();
4959    __ bind(instr->done_label());
4960    deferred_stack_check->SetExit(instr->done_label());
4961    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4962    // Don't record a deoptimization index for the safepoint here.
4963    // This will be done explicitly when emitting call and the safepoint in
4964    // the deferred code.
4965  }
4966}
4967
4968
4969void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4970  // This is a pseudo-instruction that ensures that the environment here is
4971  // properly registered for deoptimization and records the assembler's PC
4972  // offset.
4973  LEnvironment* environment = instr->environment();
4974  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4975                                   instr->SpilledDoubleRegisterArray());
4976
4977  // If the environment were already registered, we would have no way of
4978  // backpatching it with the spill slot operands.
4979  ASSERT(!environment->HasBeenRegistered());
4980  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4981  ASSERT(osr_pc_offset_ == -1);
4982  osr_pc_offset_ = masm()->pc_offset();
4983}
4984
4985
4986void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
4987  Register result = ToRegister(instr->result());
4988  Register object = ToRegister(instr->object());
4989  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4990  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
4991
4992  Register null_value = t1;
4993  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
4994  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
4995
4996  __ And(at, object, kSmiTagMask);
4997  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4998
4999  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5000  __ GetObjectType(object, a1, a1);
5001  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5002
5003  Label use_cache, call_runtime;
5004  ASSERT(object.is(a0));
5005  __ CheckEnumCache(null_value, &call_runtime);
5006
5007  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5008  __ Branch(&use_cache);
5009
5010  // Get the set of properties to enumerate.
5011  __ bind(&call_runtime);
5012  __ push(object);
5013  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5014
5015  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5016  ASSERT(result.is(v0));
5017  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5018  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5019  __ bind(&use_cache);
5020}
5021
5022
5023void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5024  Register map = ToRegister(instr->map());
5025  Register result = ToRegister(instr->result());
5026  __ LoadInstanceDescriptors(map, result);
5027  __ lw(result,
5028        FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
5029  __ lw(result,
5030        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5031  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5032}
5033
5034
5035void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5036  Register object = ToRegister(instr->value());
5037  Register map = ToRegister(instr->map());
5038  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5039  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5040}
5041
5042
5043void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5044  Register object = ToRegister(instr->object());
5045  Register index = ToRegister(instr->index());
5046  Register result = ToRegister(instr->result());
5047  Register scratch = scratch0();
5048
5049  Label out_of_object, done;
5050  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5051  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
5052
5053  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5054  __ Addu(scratch, object, scratch);
5055  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5056
5057  __ Branch(&done);
5058
5059  __ bind(&out_of_object);
5060  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5061  // Index is equal to negated out of object property index plus 1.
5062  __ Subu(scratch, result, scratch);
5063  __ lw(result, FieldMemOperand(scratch,
5064                                FixedArray::kHeaderSize - kPointerSize));
5065  __ bind(&done);
5066}
5067
5068
5069#undef __
5070
5071} }  // namespace v8::internal
5072