lithium-codegen-arm.cc revision 69a99ed0b2b2ef69d393c371b03db3a98aaf880e
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "arm/lithium-codegen-arm.h"
31#include "arm/lithium-gap-resolver-arm.h"
32#include "code-stubs.h"
33#include "stub-cache.h"
34
35namespace v8 {
36namespace internal {
37
38
39class SafepointGenerator : public CallWrapper {
40 public:
41  SafepointGenerator(LCodeGen* codegen,
42                     LPointerMap* pointers,
43                     int deoptimization_index)
44      : codegen_(codegen),
45        pointers_(pointers),
46        deoptimization_index_(deoptimization_index) { }
47  virtual ~SafepointGenerator() { }
48
49  virtual void BeforeCall(int call_size) const {
50    ASSERT(call_size >= 0);
51    // Ensure that we have enough space after the previous safepoint position
52    // for the generated code there.
53    int call_end = codegen_->masm()->pc_offset() + call_size;
54    int prev_jump_end =
55        codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
56    if (call_end < prev_jump_end) {
57      int padding_size = prev_jump_end - call_end;
58      ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
59      while (padding_size > 0) {
60        codegen_->masm()->nop();
61        padding_size -= Assembler::kInstrSize;
62      }
63    }
64  }
65
66  virtual void AfterCall() const {
67    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
68  }
69
70 private:
71  LCodeGen* codegen_;
72  LPointerMap* pointers_;
73  int deoptimization_index_;
74};
75
76
77#define __ masm()->
78
79bool LCodeGen::GenerateCode() {
80  HPhase phase("Code generation", chunk());
81  ASSERT(is_unused());
82  status_ = GENERATING;
83  CpuFeatures::Scope scope1(VFP3);
84  CpuFeatures::Scope scope2(ARMv7);
85  return GeneratePrologue() &&
86      GenerateBody() &&
87      GenerateDeferredCode() &&
88      GenerateDeoptJumpTable() &&
89      GenerateSafepointTable();
90}
91
92
93void LCodeGen::FinishCode(Handle<Code> code) {
94  ASSERT(is_done());
95  code->set_stack_slots(GetStackSlotCount());
96  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
97  PopulateDeoptimizationData(code);
98  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
99}
100
101
102void LCodeGen::Abort(const char* format, ...) {
103  if (FLAG_trace_bailout) {
104    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
105    PrintF("Aborting LCodeGen in @\"%s\": ", *name);
106    va_list arguments;
107    va_start(arguments, format);
108    OS::VPrint(format, arguments);
109    va_end(arguments);
110    PrintF("\n");
111  }
112  status_ = ABORTED;
113}
114
115
116void LCodeGen::Comment(const char* format, ...) {
117  if (!FLAG_code_comments) return;
118  char buffer[4 * KB];
119  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
120  va_list arguments;
121  va_start(arguments, format);
122  builder.AddFormattedList(format, arguments);
123  va_end(arguments);
124
125  // Copy the string before recording it in the assembler to avoid
126  // issues when the stack allocated buffer goes out of scope.
127  size_t length = builder.position();
128  Vector<char> copy = Vector<char>::New(length + 1);
129  memcpy(copy.start(), builder.Finalize(), copy.length());
130  masm()->RecordComment(copy.start());
131}
132
133
134bool LCodeGen::GeneratePrologue() {
135  ASSERT(is_generating());
136
137#ifdef DEBUG
138  if (strlen(FLAG_stop_at) > 0 &&
139      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
140    __ stop("stop_at");
141  }
142#endif
143
144  // r1: Callee's JS function.
145  // cp: Callee's context.
146  // fp: Caller's frame pointer.
147  // lr: Caller's pc.
148
149  // Strict mode functions and builtins need to replace the receiver
150  // with undefined when called as functions (without an explicit
151  // receiver object). r5 is zero for method calls and non-zero for
152  // function calls.
153  if (info_->is_strict_mode() || info_->is_native()) {
154    Label ok;
155    __ cmp(r5, Operand(0));
156    __ b(eq, &ok);
157    int receiver_offset = scope()->num_parameters() * kPointerSize;
158    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
159    __ str(r2, MemOperand(sp, receiver_offset));
160    __ bind(&ok);
161  }
162
163  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
164  __ add(fp, sp, Operand(2 * kPointerSize));  // Adjust FP to point to saved FP.
165
166  // Reserve space for the stack slots needed by the code.
167  int slots = GetStackSlotCount();
168  if (slots > 0) {
169    if (FLAG_debug_code) {
170      __ mov(r0, Operand(slots));
171      __ mov(r2, Operand(kSlotsZapValue));
172      Label loop;
173      __ bind(&loop);
174      __ push(r2);
175      __ sub(r0, r0, Operand(1), SetCC);
176      __ b(ne, &loop);
177    } else {
178      __ sub(sp,  sp, Operand(slots * kPointerSize));
179    }
180  }
181
182  // Possibly allocate a local context.
183  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
184  if (heap_slots > 0) {
185    Comment(";;; Allocate local context");
186    // Argument to NewContext is the function, which is in r1.
187    __ push(r1);
188    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
189      FastNewContextStub stub(heap_slots);
190      __ CallStub(&stub);
191    } else {
192      __ CallRuntime(Runtime::kNewFunctionContext, 1);
193    }
194    RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
195    // Context is returned in both r0 and cp.  It replaces the context
196    // passed to us.  It's saved in the stack and kept live in cp.
197    __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
198    // Copy any necessary parameters into the context.
199    int num_parameters = scope()->num_parameters();
200    for (int i = 0; i < num_parameters; i++) {
201      Slot* slot = scope()->parameter(i)->AsSlot();
202      if (slot != NULL && slot->type() == Slot::CONTEXT) {
203        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
204            (num_parameters - 1 - i) * kPointerSize;
205        // Load parameter from stack.
206        __ ldr(r0, MemOperand(fp, parameter_offset));
207        // Store it in the context.
208        __ mov(r1, Operand(Context::SlotOffset(slot->index())));
209        __ str(r0, MemOperand(cp, r1));
210        // Update the write barrier. This clobbers all involved
211        // registers, so we have to use two more registers to avoid
212        // clobbering cp.
213        __ mov(r2, Operand(cp));
214        __ RecordWrite(r2, Operand(r1), r3, r0);
215      }
216    }
217    Comment(";;; End allocate local context");
218  }
219
220  // Trace the call.
221  if (FLAG_trace) {
222    __ CallRuntime(Runtime::kTraceEnter, 0);
223  }
224  return !is_aborted();
225}
226
227
228bool LCodeGen::GenerateBody() {
229  ASSERT(is_generating());
230  bool emit_instructions = true;
231  for (current_instruction_ = 0;
232       !is_aborted() && current_instruction_ < instructions_->length();
233       current_instruction_++) {
234    LInstruction* instr = instructions_->at(current_instruction_);
235    if (instr->IsLabel()) {
236      LLabel* label = LLabel::cast(instr);
237      emit_instructions = !label->HasReplacement();
238    }
239
240    if (emit_instructions) {
241      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
242      instr->CompileToNative(this);
243    }
244  }
245  return !is_aborted();
246}
247
248
249LInstruction* LCodeGen::GetNextInstruction() {
250  if (current_instruction_ < instructions_->length() - 1) {
251    return instructions_->at(current_instruction_ + 1);
252  } else {
253    return NULL;
254  }
255}
256
257
258bool LCodeGen::GenerateDeferredCode() {
259  ASSERT(is_generating());
260  if (deferred_.length() > 0) {
261    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
262      LDeferredCode* code = deferred_[i];
263      __ bind(code->entry());
264      code->Generate();
265      __ jmp(code->exit());
266    }
267
268    // Pad code to ensure that the last piece of deferred code have
269    // room for lazy bailout.
270    while ((masm()->pc_offset() - LastSafepointEnd())
271           < Deoptimizer::patch_size()) {
272      __ nop();
273    }
274  }
275
276  // Force constant pool emission at the end of the deferred code to make
277  // sure that no constant pools are emitted after.
278  masm()->CheckConstPool(true, false);
279
280  return !is_aborted();
281}
282
283
284bool LCodeGen::GenerateDeoptJumpTable() {
285  // Check that the jump table is accessible from everywhere in the function
286  // code, ie that offsets to the table can be encoded in the 24bit signed
287  // immediate of a branch instruction.
288  // To simplify we consider the code size from the first instruction to the
289  // end of the jump table. We also don't consider the pc load delta.
290  // Each entry in the jump table generates one instruction and inlines one
291  // 32bit data after it.
292  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
293      deopt_jump_table_.length() * 2)) {
294    Abort("Generated code is too large");
295  }
296
297  // Block the constant pool emission during the jump table emission.
298  __ BlockConstPoolFor(deopt_jump_table_.length());
299  __ RecordComment("[ Deoptimisation jump table");
300  Label table_start;
301  __ bind(&table_start);
302  for (int i = 0; i < deopt_jump_table_.length(); i++) {
303    __ bind(&deopt_jump_table_[i].label);
304    __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
305    __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
306  }
307  ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
308      deopt_jump_table_.length() * 2);
309  __ RecordComment("]");
310
311  // The deoptimization jump table is the last part of the instruction
312  // sequence. Mark the generated code as done unless we bailed out.
313  if (!is_aborted()) status_ = DONE;
314  return !is_aborted();
315}
316
317
318bool LCodeGen::GenerateSafepointTable() {
319  ASSERT(is_done());
320  safepoints_.Emit(masm(), GetStackSlotCount());
321  return !is_aborted();
322}
323
324
325Register LCodeGen::ToRegister(int index) const {
326  return Register::FromAllocationIndex(index);
327}
328
329
330DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
331  return DoubleRegister::FromAllocationIndex(index);
332}
333
334
335Register LCodeGen::ToRegister(LOperand* op) const {
336  ASSERT(op->IsRegister());
337  return ToRegister(op->index());
338}
339
340
341Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
342  if (op->IsRegister()) {
343    return ToRegister(op->index());
344  } else if (op->IsConstantOperand()) {
345    __ mov(scratch, ToOperand(op));
346    return scratch;
347  } else if (op->IsStackSlot() || op->IsArgument()) {
348    __ ldr(scratch, ToMemOperand(op));
349    return scratch;
350  }
351  UNREACHABLE();
352  return scratch;
353}
354
355
356DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
357  ASSERT(op->IsDoubleRegister());
358  return ToDoubleRegister(op->index());
359}
360
361
362DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
363                                                SwVfpRegister flt_scratch,
364                                                DoubleRegister dbl_scratch) {
365  if (op->IsDoubleRegister()) {
366    return ToDoubleRegister(op->index());
367  } else if (op->IsConstantOperand()) {
368    LConstantOperand* const_op = LConstantOperand::cast(op);
369    Handle<Object> literal = chunk_->LookupLiteral(const_op);
370    Representation r = chunk_->LookupLiteralRepresentation(const_op);
371    if (r.IsInteger32()) {
372      ASSERT(literal->IsNumber());
373      __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
374      __ vmov(flt_scratch, ip);
375      __ vcvt_f64_s32(dbl_scratch, flt_scratch);
376      return dbl_scratch;
377    } else if (r.IsDouble()) {
378      Abort("unsupported double immediate");
379    } else if (r.IsTagged()) {
380      Abort("unsupported tagged immediate");
381    }
382  } else if (op->IsStackSlot() || op->IsArgument()) {
383    // TODO(regis): Why is vldr not taking a MemOperand?
384    // __ vldr(dbl_scratch, ToMemOperand(op));
385    MemOperand mem_op = ToMemOperand(op);
386    __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
387    return dbl_scratch;
388  }
389  UNREACHABLE();
390  return dbl_scratch;
391}
392
393
394int LCodeGen::ToInteger32(LConstantOperand* op) const {
395  Handle<Object> value = chunk_->LookupLiteral(op);
396  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
397  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
398      value->Number());
399  return static_cast<int32_t>(value->Number());
400}
401
402
403Operand LCodeGen::ToOperand(LOperand* op) {
404  if (op->IsConstantOperand()) {
405    LConstantOperand* const_op = LConstantOperand::cast(op);
406    Handle<Object> literal = chunk_->LookupLiteral(const_op);
407    Representation r = chunk_->LookupLiteralRepresentation(const_op);
408    if (r.IsInteger32()) {
409      ASSERT(literal->IsNumber());
410      return Operand(static_cast<int32_t>(literal->Number()));
411    } else if (r.IsDouble()) {
412      Abort("ToOperand Unsupported double immediate.");
413    }
414    ASSERT(r.IsTagged());
415    return Operand(literal);
416  } else if (op->IsRegister()) {
417    return Operand(ToRegister(op));
418  } else if (op->IsDoubleRegister()) {
419    Abort("ToOperand IsDoubleRegister unimplemented");
420    return Operand(0);
421  }
422  // Stack slots not implemented, use ToMemOperand instead.
423  UNREACHABLE();
424  return Operand(0);
425}
426
427
428MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
429  ASSERT(!op->IsRegister());
430  ASSERT(!op->IsDoubleRegister());
431  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
432  int index = op->index();
433  if (index >= 0) {
434    // Local or spill slot. Skip the frame pointer, function, and
435    // context in the fixed part of the frame.
436    return MemOperand(fp, -(index + 3) * kPointerSize);
437  } else {
438    // Incoming parameter. Skip the return address.
439    return MemOperand(fp, -(index - 1) * kPointerSize);
440  }
441}
442
443
444MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
445  ASSERT(op->IsDoubleStackSlot());
446  int index = op->index();
447  if (index >= 0) {
448    // Local or spill slot. Skip the frame pointer, function, context,
449    // and the first word of the double in the fixed part of the frame.
450    return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
451  } else {
452    // Incoming parameter. Skip the return address and the first word of
453    // the double.
454    return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
455  }
456}
457
458
459void LCodeGen::WriteTranslation(LEnvironment* environment,
460                                Translation* translation) {
461  if (environment == NULL) return;
462
463  // The translation includes one command per value in the environment.
464  int translation_size = environment->values()->length();
465  // The output frame height does not include the parameters.
466  int height = translation_size - environment->parameter_count();
467
468  WriteTranslation(environment->outer(), translation);
469  int closure_id = DefineDeoptimizationLiteral(environment->closure());
470  translation->BeginFrame(environment->ast_id(), closure_id, height);
471  for (int i = 0; i < translation_size; ++i) {
472    LOperand* value = environment->values()->at(i);
473    // spilled_registers_ and spilled_double_registers_ are either
474    // both NULL or both set.
475    if (environment->spilled_registers() != NULL && value != NULL) {
476      if (value->IsRegister() &&
477          environment->spilled_registers()[value->index()] != NULL) {
478        translation->MarkDuplicate();
479        AddToTranslation(translation,
480                         environment->spilled_registers()[value->index()],
481                         environment->HasTaggedValueAt(i));
482      } else if (
483          value->IsDoubleRegister() &&
484          environment->spilled_double_registers()[value->index()] != NULL) {
485        translation->MarkDuplicate();
486        AddToTranslation(
487            translation,
488            environment->spilled_double_registers()[value->index()],
489            false);
490      }
491    }
492
493    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
494  }
495}
496
497
498void LCodeGen::AddToTranslation(Translation* translation,
499                                LOperand* op,
500                                bool is_tagged) {
501  if (op == NULL) {
502    // TODO(twuerthinger): Introduce marker operands to indicate that this value
503    // is not present and must be reconstructed from the deoptimizer. Currently
504    // this is only used for the arguments object.
505    translation->StoreArgumentsObject();
506  } else if (op->IsStackSlot()) {
507    if (is_tagged) {
508      translation->StoreStackSlot(op->index());
509    } else {
510      translation->StoreInt32StackSlot(op->index());
511    }
512  } else if (op->IsDoubleStackSlot()) {
513    translation->StoreDoubleStackSlot(op->index());
514  } else if (op->IsArgument()) {
515    ASSERT(is_tagged);
516    int src_index = GetStackSlotCount() + op->index();
517    translation->StoreStackSlot(src_index);
518  } else if (op->IsRegister()) {
519    Register reg = ToRegister(op);
520    if (is_tagged) {
521      translation->StoreRegister(reg);
522    } else {
523      translation->StoreInt32Register(reg);
524    }
525  } else if (op->IsDoubleRegister()) {
526    DoubleRegister reg = ToDoubleRegister(op);
527    translation->StoreDoubleRegister(reg);
528  } else if (op->IsConstantOperand()) {
529    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
530    int src_index = DefineDeoptimizationLiteral(literal);
531    translation->StoreLiteral(src_index);
532  } else {
533    UNREACHABLE();
534  }
535}
536
537
538void LCodeGen::CallCode(Handle<Code> code,
539                        RelocInfo::Mode mode,
540                        LInstruction* instr) {
541  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
542}
543
544
545void LCodeGen::CallCodeGeneric(Handle<Code> code,
546                               RelocInfo::Mode mode,
547                               LInstruction* instr,
548                               SafepointMode safepoint_mode) {
549  ASSERT(instr != NULL);
550  LPointerMap* pointers = instr->pointer_map();
551  RecordPosition(pointers->position());
552  __ Call(code, mode);
553  RegisterLazyDeoptimization(instr, safepoint_mode);
554
555  // Signal that we don't inline smi code before these stubs in the
556  // optimizing code generator.
557  if (code->kind() == Code::BINARY_OP_IC ||
558      code->kind() == Code::COMPARE_IC) {
559    __ nop();
560  }
561}
562
563
564void LCodeGen::CallRuntime(const Runtime::Function* function,
565                           int num_arguments,
566                           LInstruction* instr) {
567  ASSERT(instr != NULL);
568  LPointerMap* pointers = instr->pointer_map();
569  ASSERT(pointers != NULL);
570  RecordPosition(pointers->position());
571
572  __ CallRuntime(function, num_arguments);
573  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
574}
575
576
577void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
578                                       int argc,
579                                       LInstruction* instr) {
580  __ CallRuntimeSaveDoubles(id);
581  RecordSafepointWithRegisters(
582      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
583}
584
585
586void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
587                                          SafepointMode safepoint_mode) {
588  // Create the environment to bailout to. If the call has side effects
589  // execution has to continue after the call otherwise execution can continue
590  // from a previous bailout point repeating the call.
591  LEnvironment* deoptimization_environment;
592  if (instr->HasDeoptimizationEnvironment()) {
593    deoptimization_environment = instr->deoptimization_environment();
594  } else {
595    deoptimization_environment = instr->environment();
596  }
597
598  RegisterEnvironmentForDeoptimization(deoptimization_environment);
599  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
600    RecordSafepoint(instr->pointer_map(),
601                    deoptimization_environment->deoptimization_index());
602  } else {
603    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
604    RecordSafepointWithRegisters(
605        instr->pointer_map(),
606        0,
607        deoptimization_environment->deoptimization_index());
608  }
609}
610
611
612void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
613  if (!environment->HasBeenRegistered()) {
614    // Physical stack frame layout:
615    // -x ............. -4  0 ..................................... y
616    // [incoming arguments] [spill slots] [pushed outgoing arguments]
617
618    // Layout of the environment:
619    // 0 ..................................................... size-1
620    // [parameters] [locals] [expression stack including arguments]
621
622    // Layout of the translation:
623    // 0 ........................................................ size - 1 + 4
624    // [expression stack including arguments] [locals] [4 words] [parameters]
625    // |>------------  translation_size ------------<|
626
627    int frame_count = 0;
628    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
629      ++frame_count;
630    }
631    Translation translation(&translations_, frame_count);
632    WriteTranslation(environment, &translation);
633    int deoptimization_index = deoptimizations_.length();
634    environment->Register(deoptimization_index, translation.index());
635    deoptimizations_.Add(environment);
636  }
637}
638
639
640void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
641  RegisterEnvironmentForDeoptimization(environment);
642  ASSERT(environment->HasBeenRegistered());
643  int id = environment->deoptimization_index();
644  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
645  ASSERT(entry != NULL);
646  if (entry == NULL) {
647    Abort("bailout was not prepared");
648    return;
649  }
650
651  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on ARM.
652
653  if (FLAG_deopt_every_n_times == 1 &&
654      info_->shared_info()->opt_count() == id) {
655    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
656    return;
657  }
658
659  if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
660
661  if (cc == al) {
662    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
663  } else {
664    // We often have several deopts to the same entry, reuse the last
665    // jump entry if this is the case.
666    if (deopt_jump_table_.is_empty() ||
667        (deopt_jump_table_.last().address != entry)) {
668      deopt_jump_table_.Add(JumpTableEntry(entry));
669    }
670    __ b(cc, &deopt_jump_table_.last().label);
671  }
672}
673
674
675void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
676  int length = deoptimizations_.length();
677  if (length == 0) return;
678  ASSERT(FLAG_deopt);
679  Handle<DeoptimizationInputData> data =
680      factory()->NewDeoptimizationInputData(length, TENURED);
681
682  Handle<ByteArray> translations = translations_.CreateByteArray();
683  data->SetTranslationByteArray(*translations);
684  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
685
686  Handle<FixedArray> literals =
687      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
688  for (int i = 0; i < deoptimization_literals_.length(); i++) {
689    literals->set(i, *deoptimization_literals_[i]);
690  }
691  data->SetLiteralArray(*literals);
692
693  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
694  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
695
696  // Populate the deoptimization entries.
697  for (int i = 0; i < length; i++) {
698    LEnvironment* env = deoptimizations_[i];
699    data->SetAstId(i, Smi::FromInt(env->ast_id()));
700    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
701    data->SetArgumentsStackHeight(i,
702                                  Smi::FromInt(env->arguments_stack_height()));
703  }
704  code->set_deoptimization_data(*data);
705}
706
707
708int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
709  int result = deoptimization_literals_.length();
710  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
711    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
712  }
713  deoptimization_literals_.Add(literal);
714  return result;
715}
716
717
718void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
719  ASSERT(deoptimization_literals_.length() == 0);
720
721  const ZoneList<Handle<JSFunction> >* inlined_closures =
722      chunk()->inlined_closures();
723
724  for (int i = 0, length = inlined_closures->length();
725       i < length;
726       i++) {
727    DefineDeoptimizationLiteral(inlined_closures->at(i));
728  }
729
730  inlined_function_count_ = deoptimization_literals_.length();
731}
732
733
734void LCodeGen::RecordSafepoint(
735    LPointerMap* pointers,
736    Safepoint::Kind kind,
737    int arguments,
738    int deoptimization_index) {
739  ASSERT(expected_safepoint_kind_ == kind);
740
741  const ZoneList<LOperand*>* operands = pointers->operands();
742  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
743      kind, arguments, deoptimization_index);
744  for (int i = 0; i < operands->length(); i++) {
745    LOperand* pointer = operands->at(i);
746    if (pointer->IsStackSlot()) {
747      safepoint.DefinePointerSlot(pointer->index());
748    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
749      safepoint.DefinePointerRegister(ToRegister(pointer));
750    }
751  }
752  if (kind & Safepoint::kWithRegisters) {
753    // Register cp always contains a pointer to the context.
754    safepoint.DefinePointerRegister(cp);
755  }
756}
757
758
759void LCodeGen::RecordSafepoint(LPointerMap* pointers,
760                               int deoptimization_index) {
761  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
762}
763
764
765void LCodeGen::RecordSafepoint(int deoptimization_index) {
766  LPointerMap empty_pointers(RelocInfo::kNoPosition);
767  RecordSafepoint(&empty_pointers, deoptimization_index);
768}
769
770
771void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
772                                            int arguments,
773                                            int deoptimization_index) {
774  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
775      deoptimization_index);
776}
777
778
779void LCodeGen::RecordSafepointWithRegistersAndDoubles(
780    LPointerMap* pointers,
781    int arguments,
782    int deoptimization_index) {
783  RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
784      deoptimization_index);
785}
786
787
788void LCodeGen::RecordPosition(int position) {
789  if (position == RelocInfo::kNoPosition) return;
790  masm()->positions_recorder()->RecordPosition(position);
791}
792
793
794void LCodeGen::DoLabel(LLabel* label) {
795  if (label->is_loop_header()) {
796    Comment(";;; B%d - LOOP entry", label->block_id());
797  } else {
798    Comment(";;; B%d", label->block_id());
799  }
800  __ bind(label->label());
801  current_block_ = label->block_id();
802  DoGap(label);
803}
804
805
806void LCodeGen::DoParallelMove(LParallelMove* move) {
807  resolver_.Resolve(move);
808}
809
810
811void LCodeGen::DoGap(LGap* gap) {
812  for (int i = LGap::FIRST_INNER_POSITION;
813       i <= LGap::LAST_INNER_POSITION;
814       i++) {
815    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
816    LParallelMove* move = gap->GetParallelMove(inner_pos);
817    if (move != NULL) DoParallelMove(move);
818  }
819
820  LInstruction* next = GetNextInstruction();
821  if (next != NULL && next->IsLazyBailout()) {
822    int pc = masm()->pc_offset();
823    safepoints_.SetPcAfterGap(pc);
824  }
825}
826
827
828void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
829  DoGap(instr);
830}
831
832
833void LCodeGen::DoParameter(LParameter* instr) {
834  // Nothing to do.
835}
836
837
838void LCodeGen::DoCallStub(LCallStub* instr) {
839  ASSERT(ToRegister(instr->result()).is(r0));
840  switch (instr->hydrogen()->major_key()) {
841    case CodeStub::RegExpConstructResult: {
842      RegExpConstructResultStub stub;
843      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
844      break;
845    }
846    case CodeStub::RegExpExec: {
847      RegExpExecStub stub;
848      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
849      break;
850    }
851    case CodeStub::SubString: {
852      SubStringStub stub;
853      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
854      break;
855    }
856    case CodeStub::NumberToString: {
857      NumberToStringStub stub;
858      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
859      break;
860    }
861    case CodeStub::StringAdd: {
862      StringAddStub stub(NO_STRING_ADD_FLAGS);
863      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
864      break;
865    }
866    case CodeStub::StringCompare: {
867      StringCompareStub stub;
868      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
869      break;
870    }
871    case CodeStub::TranscendentalCache: {
872      __ ldr(r0, MemOperand(sp, 0));
873      TranscendentalCacheStub stub(instr->transcendental_type(),
874                                   TranscendentalCacheStub::TAGGED);
875      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
876      break;
877    }
878    default:
879      UNREACHABLE();
880  }
881}
882
883
884void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
885  // Nothing to do.
886}
887
888
889void LCodeGen::DoModI(LModI* instr) {
890  if (instr->hydrogen()->HasPowerOf2Divisor()) {
891    Register dividend = ToRegister(instr->InputAt(0));
892    Register result = ToRegister(instr->result());
893
894    int32_t divisor =
895        HConstant::cast(instr->hydrogen()->right())->Integer32Value();
896
897    if (divisor < 0) divisor = -divisor;
898
899    Label positive_dividend, done;
900    __ cmp(dividend, Operand(0));
901    __ b(pl, &positive_dividend);
902    __ rsb(result, dividend, Operand(0));
903    __ and_(result, result, Operand(divisor - 1), SetCC);
904    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
905      DeoptimizeIf(eq, instr->environment());
906    }
907    __ rsb(result, result, Operand(0));
908    __ b(&done);
909    __ bind(&positive_dividend);
910    __ and_(result, dividend, Operand(divisor - 1));
911    __ bind(&done);
912    return;
913  }
914
915  // These registers hold untagged 32 bit values.
916  Register left = ToRegister(instr->InputAt(0));
917  Register right = ToRegister(instr->InputAt(1));
918  Register result = ToRegister(instr->result());
919
920  Register scratch = scratch0();
921  Register scratch2 = ToRegister(instr->TempAt(0));
922  DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
923  DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
924  DwVfpRegister quotient = double_scratch0();
925
926  ASSERT(!dividend.is(divisor));
927  ASSERT(!dividend.is(quotient));
928  ASSERT(!divisor.is(quotient));
929  ASSERT(!scratch.is(left));
930  ASSERT(!scratch.is(right));
931  ASSERT(!scratch.is(result));
932
933  Label done, vfp_modulo, both_positive, right_negative;
934
935  // Check for x % 0.
936  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
937    __ cmp(right, Operand(0));
938    DeoptimizeIf(eq, instr->environment());
939  }
940
941  __ Move(result, left);
942
943  // (0 % x) must yield 0 (if x is finite, which is the case here).
944  __ cmp(left, Operand(0));
945  __ b(eq, &done);
946  // Preload right in a vfp register.
947  __ vmov(divisor.low(), right);
948  __ b(lt, &vfp_modulo);
949
950  __ cmp(left, Operand(right));
951  __ b(lt, &done);
952
953  // Check for (positive) power of two on the right hand side.
954  __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
955                                     scratch,
956                                     &right_negative,
957                                     &both_positive);
958  // Perform modulo operation (scratch contains right - 1).
959  __ and_(result, scratch, Operand(left));
960  __ b(&done);
961
962  __ bind(&right_negative);
963  // Negate right. The sign of the divisor does not matter.
964  __ rsb(right, right, Operand(0));
965
966  __ bind(&both_positive);
967  const int kUnfolds = 3;
968  // If the right hand side is smaller than the (nonnegative)
969  // left hand side, the left hand side is the result.
970  // Else try a few subtractions of the left hand side.
971  __ mov(scratch, left);
972  for (int i = 0; i < kUnfolds; i++) {
973    // Check if the left hand side is less or equal than the
974    // the right hand side.
975    __ cmp(scratch, Operand(right));
976    __ mov(result, scratch, LeaveCC, lt);
977    __ b(lt, &done);
978    // If not, reduce the left hand side by the right hand
979    // side and check again.
980    if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
981  }
982
983  __ bind(&vfp_modulo);
984  // Load the arguments in VFP registers.
985  // The divisor value is preloaded before. Be careful that 'right' is only live
986  // on entry.
987  __ vmov(dividend.low(), left);
988  // From here on don't use right as it may have been reallocated (for example
989  // to scratch2).
990  right = no_reg;
991
992  __ vcvt_f64_s32(dividend, dividend.low());
993  __ vcvt_f64_s32(divisor, divisor.low());
994
995  // We do not care about the sign of the divisor.
996  __ vabs(divisor, divisor);
997  // Compute the quotient and round it to a 32bit integer.
998  __ vdiv(quotient, dividend, divisor);
999  __ vcvt_s32_f64(quotient.low(), quotient);
1000  __ vcvt_f64_s32(quotient, quotient.low());
1001
1002  // Compute the remainder in result.
1003  DwVfpRegister double_scratch = dividend;
1004  __ vmul(double_scratch, divisor, quotient);
1005  __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1006  __ vmov(scratch, double_scratch.low());
1007
1008  if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1009    __ sub(result, left, scratch);
1010  } else {
1011    Label ok;
1012    // Check for -0.
1013    __ sub(scratch2, left, scratch, SetCC);
1014    __ b(ne, &ok);
1015    __ cmp(left, Operand(0));
1016    DeoptimizeIf(mi, instr->environment());
1017    __ bind(&ok);
1018    // Load the result and we are done.
1019    __ mov(result, scratch2);
1020  }
1021
1022  __ bind(&done);
1023}
1024
1025
1026void LCodeGen::DoDivI(LDivI* instr) {
1027  class DeferredDivI: public LDeferredCode {
1028   public:
1029    DeferredDivI(LCodeGen* codegen, LDivI* instr)
1030        : LDeferredCode(codegen), instr_(instr) { }
1031    virtual void Generate() {
1032      codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
1033    }
1034   private:
1035    LDivI* instr_;
1036  };
1037
1038  const Register left = ToRegister(instr->InputAt(0));
1039  const Register right = ToRegister(instr->InputAt(1));
1040  const Register scratch = scratch0();
1041  const Register result = ToRegister(instr->result());
1042
1043  // Check for x / 0.
1044  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1045    __ cmp(right, Operand(0));
1046    DeoptimizeIf(eq, instr->environment());
1047  }
1048
1049  // Check for (0 / -x) that will produce negative zero.
1050  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1051    Label left_not_zero;
1052    __ cmp(left, Operand(0));
1053    __ b(ne, &left_not_zero);
1054    __ cmp(right, Operand(0));
1055    DeoptimizeIf(mi, instr->environment());
1056    __ bind(&left_not_zero);
1057  }
1058
1059  // Check for (-kMinInt / -1).
1060  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1061    Label left_not_min_int;
1062    __ cmp(left, Operand(kMinInt));
1063    __ b(ne, &left_not_min_int);
1064    __ cmp(right, Operand(-1));
1065    DeoptimizeIf(eq, instr->environment());
1066    __ bind(&left_not_min_int);
1067  }
1068
1069  Label done, deoptimize;
1070  // Test for a few common cases first.
1071  __ cmp(right, Operand(1));
1072  __ mov(result, left, LeaveCC, eq);
1073  __ b(eq, &done);
1074
1075  __ cmp(right, Operand(2));
1076  __ tst(left, Operand(1), eq);
1077  __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
1078  __ b(eq, &done);
1079
1080  __ cmp(right, Operand(4));
1081  __ tst(left, Operand(3), eq);
1082  __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
1083  __ b(eq, &done);
1084
1085  // Call the stub. The numbers in r0 and r1 have
1086  // to be tagged to Smis. If that is not possible, deoptimize.
1087  DeferredDivI* deferred = new DeferredDivI(this, instr);
1088
1089  __ TrySmiTag(left, &deoptimize, scratch);
1090  __ TrySmiTag(right, &deoptimize, scratch);
1091
1092  __ b(al, deferred->entry());
1093  __ bind(deferred->exit());
1094
1095  // If the result in r0 is a Smi, untag it, else deoptimize.
1096  __ JumpIfNotSmi(result, &deoptimize);
1097  __ SmiUntag(result);
1098  __ b(&done);
1099
1100  __ bind(&deoptimize);
1101  DeoptimizeIf(al, instr->environment());
1102  __ bind(&done);
1103}
1104
1105
1106template<int T>
1107void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1108                                      Token::Value op) {
1109  Register left = ToRegister(instr->InputAt(0));
1110  Register right = ToRegister(instr->InputAt(1));
1111
1112  PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1113  // Move left to r1 and right to r0 for the stub call.
1114  if (left.is(r1)) {
1115    __ Move(r0, right);
1116  } else if (left.is(r0) && right.is(r1)) {
1117    __ Swap(r0, r1, r2);
1118  } else if (left.is(r0)) {
1119    ASSERT(!right.is(r1));
1120    __ mov(r1, r0);
1121    __ mov(r0, right);
1122  } else {
1123    ASSERT(!left.is(r0) && !right.is(r0));
1124    __ mov(r0, right);
1125    __ mov(r1, left);
1126  }
1127  BinaryOpStub stub(op, OVERWRITE_LEFT);
1128  __ CallStub(&stub);
1129  RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
1130                                         0,
1131                                         Safepoint::kNoDeoptimizationIndex);
1132  // Overwrite the stored value of r0 with the result of the stub.
1133  __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
1134}
1135
1136
1137void LCodeGen::DoMulI(LMulI* instr) {
1138  Register scratch = scratch0();
1139  Register result = ToRegister(instr->result());
1140  // Note that result may alias left.
1141  Register left = ToRegister(instr->InputAt(0));
1142  LOperand* right_op = instr->InputAt(1);
1143
1144  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1145  bool bailout_on_minus_zero =
1146    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1147
1148  if (right_op->IsConstantOperand() && !can_overflow) {
1149    // Use optimized code for specific constants.
1150    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1151
1152    if (bailout_on_minus_zero && (constant < 0)) {
1153      // The case of a null constant will be handled separately.
1154      // If constant is negative and left is null, the result should be -0.
1155      __ cmp(left, Operand(0));
1156      DeoptimizeIf(eq, instr->environment());
1157    }
1158
1159    switch (constant) {
1160      case -1:
1161        __ rsb(result, left, Operand(0));
1162        break;
1163      case 0:
1164        if (bailout_on_minus_zero) {
1165          // If left is strictly negative and the constant is null, the
1166          // result is -0. Deoptimize if required, otherwise return 0.
1167          __ cmp(left, Operand(0));
1168          DeoptimizeIf(mi, instr->environment());
1169        }
1170        __ mov(result, Operand(0));
1171        break;
1172      case 1:
1173        __ Move(result, left);
1174        break;
1175      default:
1176        // Multiplying by powers of two and powers of two plus or minus
1177        // one can be done faster with shifted operands.
1178        // For other constants we emit standard code.
1179        int32_t mask = constant >> 31;
1180        uint32_t constant_abs = (constant + mask) ^ mask;
1181
1182        if (IsPowerOf2(constant_abs) ||
1183            IsPowerOf2(constant_abs - 1) ||
1184            IsPowerOf2(constant_abs + 1)) {
1185          if (IsPowerOf2(constant_abs)) {
1186            int32_t shift = WhichPowerOf2(constant_abs);
1187            __ mov(result, Operand(left, LSL, shift));
1188          } else if (IsPowerOf2(constant_abs - 1)) {
1189            int32_t shift = WhichPowerOf2(constant_abs - 1);
1190            __ add(result, left, Operand(left, LSL, shift));
1191          } else if (IsPowerOf2(constant_abs + 1)) {
1192            int32_t shift = WhichPowerOf2(constant_abs + 1);
1193            __ rsb(result, left, Operand(left, LSL, shift));
1194          }
1195
1196          // Correct the sign of the result is the constant is negative.
1197          if (constant < 0)  __ rsb(result, result, Operand(0));
1198
1199        } else {
1200          // Generate standard code.
1201          __ mov(ip, Operand(constant));
1202          __ mul(result, left, ip);
1203        }
1204    }
1205
1206  } else {
1207    Register right = EmitLoadRegister(right_op, scratch);
1208    if (bailout_on_minus_zero) {
1209      __ orr(ToRegister(instr->TempAt(0)), left, right);
1210    }
1211
1212    if (can_overflow) {
1213      // scratch:result = left * right.
1214      __ smull(result, scratch, left, right);
1215      __ cmp(scratch, Operand(result, ASR, 31));
1216      DeoptimizeIf(ne, instr->environment());
1217    } else {
1218      __ mul(result, left, right);
1219    }
1220
1221    if (bailout_on_minus_zero) {
1222      // Bail out if the result is supposed to be negative zero.
1223      Label done;
1224      __ cmp(result, Operand(0));
1225      __ b(ne, &done);
1226      __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
1227      DeoptimizeIf(mi, instr->environment());
1228      __ bind(&done);
1229    }
1230  }
1231}
1232
1233
1234void LCodeGen::DoBitI(LBitI* instr) {
1235  LOperand* left_op = instr->InputAt(0);
1236  LOperand* right_op = instr->InputAt(1);
1237  ASSERT(left_op->IsRegister());
1238  Register left = ToRegister(left_op);
1239  Register result = ToRegister(instr->result());
1240  Operand right(no_reg);
1241
1242  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1243    right = Operand(EmitLoadRegister(right_op, ip));
1244  } else {
1245    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1246    right = ToOperand(right_op);
1247  }
1248
1249  switch (instr->op()) {
1250    case Token::BIT_AND:
1251      __ and_(result, left, right);
1252      break;
1253    case Token::BIT_OR:
1254      __ orr(result, left, right);
1255      break;
1256    case Token::BIT_XOR:
1257      __ eor(result, left, right);
1258      break;
1259    default:
1260      UNREACHABLE();
1261      break;
1262  }
1263}
1264
1265
1266void LCodeGen::DoShiftI(LShiftI* instr) {
1267  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1268  // result may alias either of them.
1269  LOperand* right_op = instr->InputAt(1);
1270  Register left = ToRegister(instr->InputAt(0));
1271  Register result = ToRegister(instr->result());
1272  Register scratch = scratch0();
1273  if (right_op->IsRegister()) {
1274    // Mask the right_op operand.
1275    __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1276    switch (instr->op()) {
1277      case Token::SAR:
1278        __ mov(result, Operand(left, ASR, scratch));
1279        break;
1280      case Token::SHR:
1281        if (instr->can_deopt()) {
1282          __ mov(result, Operand(left, LSR, scratch), SetCC);
1283          DeoptimizeIf(mi, instr->environment());
1284        } else {
1285          __ mov(result, Operand(left, LSR, scratch));
1286        }
1287        break;
1288      case Token::SHL:
1289        __ mov(result, Operand(left, LSL, scratch));
1290        break;
1291      default:
1292        UNREACHABLE();
1293        break;
1294    }
1295  } else {
1296    // Mask the right_op operand.
1297    int value = ToInteger32(LConstantOperand::cast(right_op));
1298    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1299    switch (instr->op()) {
1300      case Token::SAR:
1301        if (shift_count != 0) {
1302          __ mov(result, Operand(left, ASR, shift_count));
1303        } else {
1304          __ Move(result, left);
1305        }
1306        break;
1307      case Token::SHR:
1308        if (shift_count != 0) {
1309          __ mov(result, Operand(left, LSR, shift_count));
1310        } else {
1311          if (instr->can_deopt()) {
1312            __ tst(left, Operand(0x80000000));
1313            DeoptimizeIf(ne, instr->environment());
1314          }
1315          __ Move(result, left);
1316        }
1317        break;
1318      case Token::SHL:
1319        if (shift_count != 0) {
1320          __ mov(result, Operand(left, LSL, shift_count));
1321        } else {
1322          __ Move(result, left);
1323        }
1324        break;
1325      default:
1326        UNREACHABLE();
1327        break;
1328    }
1329  }
1330}
1331
1332
1333void LCodeGen::DoSubI(LSubI* instr) {
1334  LOperand* left = instr->InputAt(0);
1335  LOperand* right = instr->InputAt(1);
1336  LOperand* result = instr->result();
1337  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1338  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1339
1340  if (right->IsStackSlot() || right->IsArgument()) {
1341    Register right_reg = EmitLoadRegister(right, ip);
1342    __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1343  } else {
1344    ASSERT(right->IsRegister() || right->IsConstantOperand());
1345    __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1346  }
1347
1348  if (can_overflow) {
1349    DeoptimizeIf(vs, instr->environment());
1350  }
1351}
1352
1353
1354void LCodeGen::DoConstantI(LConstantI* instr) {
1355  ASSERT(instr->result()->IsRegister());
1356  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1357}
1358
1359
1360void LCodeGen::DoConstantD(LConstantD* instr) {
1361  ASSERT(instr->result()->IsDoubleRegister());
1362  DwVfpRegister result = ToDoubleRegister(instr->result());
1363  double v = instr->value();
1364  __ Vmov(result, v);
1365}
1366
1367
1368void LCodeGen::DoConstantT(LConstantT* instr) {
1369  ASSERT(instr->result()->IsRegister());
1370  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1371}
1372
1373
1374void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1375  Register result = ToRegister(instr->result());
1376  Register array = ToRegister(instr->InputAt(0));
1377  __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
1378}
1379
1380
1381void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1382  Register result = ToRegister(instr->result());
1383  Register array = ToRegister(instr->InputAt(0));
1384  __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1385}
1386
1387
1388void LCodeGen::DoElementsKind(LElementsKind* instr) {
1389  Register result = ToRegister(instr->result());
1390  Register input = ToRegister(instr->InputAt(0));
1391
1392  // Load map into |result|.
1393  __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
1394  // Load the map's "bit field 2" into |result|. We only need the first byte,
1395  // but the following bit field extraction takes care of that anyway.
1396  __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
1397  // Retrieve elements_kind from bit field 2.
1398  __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1399}
1400
1401
1402void LCodeGen::DoValueOf(LValueOf* instr) {
1403  Register input = ToRegister(instr->InputAt(0));
1404  Register result = ToRegister(instr->result());
1405  Register map = ToRegister(instr->TempAt(0));
1406  Label done;
1407
1408  // If the object is a smi return the object.
1409  __ tst(input, Operand(kSmiTagMask));
1410  __ Move(result, input, eq);
1411  __ b(eq, &done);
1412
1413  // If the object is not a value type, return the object.
1414  __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
1415  __ Move(result, input, ne);
1416  __ b(ne, &done);
1417  __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
1418
1419  __ bind(&done);
1420}
1421
1422
1423void LCodeGen::DoBitNotI(LBitNotI* instr) {
1424  Register input = ToRegister(instr->InputAt(0));
1425  Register result = ToRegister(instr->result());
1426  __ mvn(result, Operand(input));
1427}
1428
1429
1430void LCodeGen::DoThrow(LThrow* instr) {
1431  Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1432  __ push(input_reg);
1433  CallRuntime(Runtime::kThrow, 1, instr);
1434
1435  if (FLAG_debug_code) {
1436    __ stop("Unreachable code.");
1437  }
1438}
1439
1440
1441void LCodeGen::DoAddI(LAddI* instr) {
1442  LOperand* left = instr->InputAt(0);
1443  LOperand* right = instr->InputAt(1);
1444  LOperand* result = instr->result();
1445  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1446  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1447
1448  if (right->IsStackSlot() || right->IsArgument()) {
1449    Register right_reg = EmitLoadRegister(right, ip);
1450    __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1451  } else {
1452    ASSERT(right->IsRegister() || right->IsConstantOperand());
1453    __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1454  }
1455
1456  if (can_overflow) {
1457    DeoptimizeIf(vs, instr->environment());
1458  }
1459}
1460
1461
1462void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1463  DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1464  DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1465  DoubleRegister result = ToDoubleRegister(instr->result());
1466  switch (instr->op()) {
1467    case Token::ADD:
1468      __ vadd(result, left, right);
1469      break;
1470    case Token::SUB:
1471      __ vsub(result, left, right);
1472      break;
1473    case Token::MUL:
1474      __ vmul(result, left, right);
1475      break;
1476    case Token::DIV:
1477      __ vdiv(result, left, right);
1478      break;
1479    case Token::MOD: {
1480      // Save r0-r3 on the stack.
1481      __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1482
1483      __ PrepareCallCFunction(0, 2, scratch0());
1484      __ SetCallCDoubleArguments(left, right);
1485      __ CallCFunction(
1486          ExternalReference::double_fp_operation(Token::MOD, isolate()),
1487          0, 2);
1488      // Move the result in the double result register.
1489      __ GetCFunctionDoubleResult(result);
1490
1491      // Restore r0-r3.
1492      __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1493      break;
1494    }
1495    default:
1496      UNREACHABLE();
1497      break;
1498  }
1499}
1500
1501
1502void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1503  ASSERT(ToRegister(instr->InputAt(0)).is(r1));
1504  ASSERT(ToRegister(instr->InputAt(1)).is(r0));
1505  ASSERT(ToRegister(instr->result()).is(r0));
1506
1507  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1508  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1509  __ nop();  // Signals no inlined code.
1510}
1511
1512
1513int LCodeGen::GetNextEmittedBlock(int block) {
1514  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1515    LLabel* label = chunk_->GetLabel(i);
1516    if (!label->HasReplacement()) return i;
1517  }
1518  return -1;
1519}
1520
1521
1522void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1523  int next_block = GetNextEmittedBlock(current_block_);
1524  right_block = chunk_->LookupDestination(right_block);
1525  left_block = chunk_->LookupDestination(left_block);
1526
1527  if (right_block == left_block) {
1528    EmitGoto(left_block);
1529  } else if (left_block == next_block) {
1530    __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1531  } else if (right_block == next_block) {
1532    __ b(cc, chunk_->GetAssemblyLabel(left_block));
1533  } else {
1534    __ b(cc, chunk_->GetAssemblyLabel(left_block));
1535    __ b(chunk_->GetAssemblyLabel(right_block));
1536  }
1537}
1538
1539
1540void LCodeGen::DoBranch(LBranch* instr) {
1541  int true_block = chunk_->LookupDestination(instr->true_block_id());
1542  int false_block = chunk_->LookupDestination(instr->false_block_id());
1543
1544  Representation r = instr->hydrogen()->value()->representation();
1545  if (r.IsInteger32()) {
1546    Register reg = ToRegister(instr->InputAt(0));
1547    __ cmp(reg, Operand(0));
1548    EmitBranch(true_block, false_block, ne);
1549  } else if (r.IsDouble()) {
1550    DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1551    Register scratch = scratch0();
1552
1553    // Test the double value. Zero and NaN are false.
1554    __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1555    __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1556    EmitBranch(true_block, false_block, eq);
1557  } else {
1558    ASSERT(r.IsTagged());
1559    Register reg = ToRegister(instr->InputAt(0));
1560    HType type = instr->hydrogen()->value()->type();
1561    if (type.IsBoolean()) {
1562      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1563      EmitBranch(true_block, false_block, eq);
1564    } else if (type.IsSmi()) {
1565      __ cmp(reg, Operand(0));
1566      EmitBranch(true_block, false_block, ne);
1567    } else {
1568      Label* true_label = chunk_->GetAssemblyLabel(true_block);
1569      Label* false_label = chunk_->GetAssemblyLabel(false_block);
1570
1571      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1572      // Avoid deopts in the case where we've never executed this path before.
1573      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1574
1575      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1576        // undefined -> false.
1577        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1578        __ b(eq, false_label);
1579      }
1580      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1581        // Boolean -> its value.
1582        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1583        __ b(eq, true_label);
1584        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1585        __ b(eq, false_label);
1586      }
1587      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1588        // 'null' -> false.
1589        __ CompareRoot(reg, Heap::kNullValueRootIndex);
1590        __ b(eq, false_label);
1591      }
1592
1593      if (expected.Contains(ToBooleanStub::SMI)) {
1594        // Smis: 0 -> false, all other -> true.
1595        __ cmp(reg, Operand(0));
1596        __ b(eq, false_label);
1597        __ JumpIfSmi(reg, true_label);
1598      } else if (expected.NeedsMap()) {
1599        // If we need a map later and have a Smi -> deopt.
1600        __ tst(reg, Operand(kSmiTagMask));
1601        DeoptimizeIf(eq, instr->environment());
1602      }
1603
1604      const Register map = scratch0();
1605      if (expected.NeedsMap()) {
1606        __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1607
1608        if (expected.CanBeUndetectable()) {
1609          // Undetectable -> false.
1610          __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1611          __ tst(ip, Operand(1 << Map::kIsUndetectable));
1612          __ b(ne, false_label);
1613        }
1614      }
1615
1616      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1617        // spec object -> true.
1618        __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1619        __ b(ge, true_label);
1620      }
1621
1622      if (expected.Contains(ToBooleanStub::STRING)) {
1623        // String value -> false iff empty.
1624        Label not_string;
1625        __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1626        __ b(ge, &not_string);
1627        __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
1628        __ cmp(ip, Operand(0));
1629        __ b(ne, true_label);
1630        __ b(false_label);
1631        __ bind(&not_string);
1632      }
1633
1634      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1635        // heap number -> false iff +0, -0, or NaN.
1636        DoubleRegister dbl_scratch = double_scratch0();
1637        Label not_heap_number;
1638        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1639        __ b(ne, &not_heap_number);
1640        __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1641        __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
1642        __ b(vs, false_label);  // NaN -> false.
1643        __ b(eq, false_label);  // +0, -0 -> false.
1644        __ b(true_label);
1645        __ bind(&not_heap_number);
1646      }
1647
1648      // We've seen something for the first time -> deopt.
1649      DeoptimizeIf(al, instr->environment());
1650    }
1651  }
1652}
1653
1654
1655void LCodeGen::EmitGoto(int block) {
1656  block = chunk_->LookupDestination(block);
1657  int next_block = GetNextEmittedBlock(current_block_);
1658  if (block != next_block) {
1659    __ jmp(chunk_->GetAssemblyLabel(block));
1660  }
1661}
1662
1663
1664void LCodeGen::DoGoto(LGoto* instr) {
1665  EmitGoto(instr->block_id());
1666}
1667
1668
1669Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1670  Condition cond = kNoCondition;
1671  switch (op) {
1672    case Token::EQ:
1673    case Token::EQ_STRICT:
1674      cond = eq;
1675      break;
1676    case Token::LT:
1677      cond = is_unsigned ? lo : lt;
1678      break;
1679    case Token::GT:
1680      cond = is_unsigned ? hi : gt;
1681      break;
1682    case Token::LTE:
1683      cond = is_unsigned ? ls : le;
1684      break;
1685    case Token::GTE:
1686      cond = is_unsigned ? hs : ge;
1687      break;
1688    case Token::IN:
1689    case Token::INSTANCEOF:
1690    default:
1691      UNREACHABLE();
1692  }
1693  return cond;
1694}
1695
1696
1697void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1698  __ cmp(ToRegister(left), ToRegister(right));
1699}
1700
1701
1702void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1703  LOperand* left = instr->InputAt(0);
1704  LOperand* right = instr->InputAt(1);
1705  int false_block = chunk_->LookupDestination(instr->false_block_id());
1706  int true_block = chunk_->LookupDestination(instr->true_block_id());
1707
1708  if (instr->is_double()) {
1709    // Compare left and right as doubles and load the
1710    // resulting flags into the normal status register.
1711    __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1712    // If a NaN is involved, i.e. the result is unordered (V set),
1713    // jump to false block label.
1714    __ b(vs, chunk_->GetAssemblyLabel(false_block));
1715  } else {
1716    EmitCmpI(left, right);
1717  }
1718
1719  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1720  EmitBranch(true_block, false_block, cc);
1721}
1722
1723
1724void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1725  Register left = ToRegister(instr->InputAt(0));
1726  Register right = ToRegister(instr->InputAt(1));
1727  int false_block = chunk_->LookupDestination(instr->false_block_id());
1728  int true_block = chunk_->LookupDestination(instr->true_block_id());
1729
1730  __ cmp(left, Operand(right));
1731  EmitBranch(true_block, false_block, eq);
1732}
1733
1734
1735void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1736  Register left = ToRegister(instr->InputAt(0));
1737  int true_block = chunk_->LookupDestination(instr->true_block_id());
1738  int false_block = chunk_->LookupDestination(instr->false_block_id());
1739
1740  __ cmp(left, Operand(instr->hydrogen()->right()));
1741  EmitBranch(true_block, false_block, eq);
1742}
1743
1744
1745void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1746  Register scratch = scratch0();
1747  Register reg = ToRegister(instr->InputAt(0));
1748
1749  // TODO(fsc): If the expression is known to be a smi, then it's
1750  // definitely not null. Jump to the false block.
1751
1752  int true_block = chunk_->LookupDestination(instr->true_block_id());
1753  int false_block = chunk_->LookupDestination(instr->false_block_id());
1754
1755  __ LoadRoot(ip, Heap::kNullValueRootIndex);
1756  __ cmp(reg, ip);
1757  if (instr->is_strict()) {
1758    EmitBranch(true_block, false_block, eq);
1759  } else {
1760    Label* true_label = chunk_->GetAssemblyLabel(true_block);
1761    Label* false_label = chunk_->GetAssemblyLabel(false_block);
1762    __ b(eq, true_label);
1763    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
1764    __ cmp(reg, ip);
1765    __ b(eq, true_label);
1766    __ JumpIfSmi(reg, false_label);
1767    // Check for undetectable objects by looking in the bit field in
1768    // the map. The object has already been smi checked.
1769    __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1770    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1771    __ tst(scratch, Operand(1 << Map::kIsUndetectable));
1772    EmitBranch(true_block, false_block, ne);
1773  }
1774}
1775
1776
1777Condition LCodeGen::EmitIsObject(Register input,
1778                                 Register temp1,
1779                                 Label* is_not_object,
1780                                 Label* is_object) {
1781  Register temp2 = scratch0();
1782  __ JumpIfSmi(input, is_not_object);
1783
1784  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1785  __ cmp(input, temp2);
1786  __ b(eq, is_object);
1787
1788  // Load map.
1789  __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1790  // Undetectable objects behave like undefined.
1791  __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1792  __ tst(temp2, Operand(1 << Map::kIsUndetectable));
1793  __ b(ne, is_not_object);
1794
1795  // Load instance type and check that it is in object type range.
1796  __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1797  __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1798  __ b(lt, is_not_object);
1799  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1800  return le;
1801}
1802
1803
1804void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1805  Register reg = ToRegister(instr->InputAt(0));
1806  Register temp1 = ToRegister(instr->TempAt(0));
1807
1808  int true_block = chunk_->LookupDestination(instr->true_block_id());
1809  int false_block = chunk_->LookupDestination(instr->false_block_id());
1810  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1811  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1812
1813  Condition true_cond =
1814      EmitIsObject(reg, temp1, false_label, true_label);
1815
1816  EmitBranch(true_block, false_block, true_cond);
1817}
1818
1819
1820void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1821  int true_block = chunk_->LookupDestination(instr->true_block_id());
1822  int false_block = chunk_->LookupDestination(instr->false_block_id());
1823
1824  Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1825  __ tst(input_reg, Operand(kSmiTagMask));
1826  EmitBranch(true_block, false_block, eq);
1827}
1828
1829
1830void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1831  Register input = ToRegister(instr->InputAt(0));
1832  Register temp = ToRegister(instr->TempAt(0));
1833
1834  int true_block = chunk_->LookupDestination(instr->true_block_id());
1835  int false_block = chunk_->LookupDestination(instr->false_block_id());
1836
1837  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1838  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1839  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1840  __ tst(temp, Operand(1 << Map::kIsUndetectable));
1841  EmitBranch(true_block, false_block, ne);
1842}
1843
1844
1845static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1846  InstanceType from = instr->from();
1847  InstanceType to = instr->to();
1848  if (from == FIRST_TYPE) return to;
1849  ASSERT(from == to || to == LAST_TYPE);
1850  return from;
1851}
1852
1853
1854static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1855  InstanceType from = instr->from();
1856  InstanceType to = instr->to();
1857  if (from == to) return eq;
1858  if (to == LAST_TYPE) return hs;
1859  if (from == FIRST_TYPE) return ls;
1860  UNREACHABLE();
1861  return eq;
1862}
1863
1864
1865void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1866  Register scratch = scratch0();
1867  Register input = ToRegister(instr->InputAt(0));
1868
1869  int true_block = chunk_->LookupDestination(instr->true_block_id());
1870  int false_block = chunk_->LookupDestination(instr->false_block_id());
1871
1872  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1873
1874  __ JumpIfSmi(input, false_label);
1875
1876  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
1877  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1878}
1879
1880
1881void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1882  Register input = ToRegister(instr->InputAt(0));
1883  Register result = ToRegister(instr->result());
1884
1885  if (FLAG_debug_code) {
1886    __ AbortIfNotString(input);
1887  }
1888
1889  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
1890  __ IndexFromHash(result, result);
1891}
1892
1893
1894void LCodeGen::DoHasCachedArrayIndexAndBranch(
1895    LHasCachedArrayIndexAndBranch* instr) {
1896  Register input = ToRegister(instr->InputAt(0));
1897  Register scratch = scratch0();
1898
1899  int true_block = chunk_->LookupDestination(instr->true_block_id());
1900  int false_block = chunk_->LookupDestination(instr->false_block_id());
1901
1902  __ ldr(scratch,
1903         FieldMemOperand(input, String::kHashFieldOffset));
1904  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
1905  EmitBranch(true_block, false_block, eq);
1906}
1907
1908
1909// Branches to a label or falls through with the answer in flags.  Trashes
1910// the temp registers, but not the input.  Only input and temp2 may alias.
1911void LCodeGen::EmitClassOfTest(Label* is_true,
1912                               Label* is_false,
1913                               Handle<String>class_name,
1914                               Register input,
1915                               Register temp,
1916                               Register temp2) {
1917  ASSERT(!input.is(temp));
1918  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
1919  __ JumpIfSmi(input, is_false);
1920  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
1921  __ b(lt, is_false);
1922
1923  // Map is now in temp.
1924  // Functions have class 'Function'.
1925  __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
1926  if (class_name->IsEqualTo(CStrVector("Function"))) {
1927    __ b(ge, is_true);
1928  } else {
1929    __ b(ge, is_false);
1930  }
1931
1932  // Check if the constructor in the map is a function.
1933  __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
1934
1935  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
1936  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
1937  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
1938  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
1939  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
1940                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
1941
1942  // Objects with a non-function constructor have class 'Object'.
1943  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
1944  if (class_name->IsEqualTo(CStrVector("Object"))) {
1945    __ b(ne, is_true);
1946  } else {
1947    __ b(ne, is_false);
1948  }
1949
1950  // temp now contains the constructor function. Grab the
1951  // instance class name from there.
1952  __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1953  __ ldr(temp, FieldMemOperand(temp,
1954                               SharedFunctionInfo::kInstanceClassNameOffset));
1955  // The class name we are testing against is a symbol because it's a literal.
1956  // The name in the constructor is a symbol because of the way the context is
1957  // booted.  This routine isn't expected to work for random API-created
1958  // classes and it doesn't have to because you can't access it with natives
1959  // syntax.  Since both sides are symbols it is sufficient to use an identity
1960  // comparison.
1961  __ cmp(temp, Operand(class_name));
1962  // End with the answer in flags.
1963}
1964
1965
1966void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1967  Register input = ToRegister(instr->InputAt(0));
1968  Register temp = scratch0();
1969  Register temp2 = ToRegister(instr->TempAt(0));
1970  Handle<String> class_name = instr->hydrogen()->class_name();
1971
1972  int true_block = chunk_->LookupDestination(instr->true_block_id());
1973  int false_block = chunk_->LookupDestination(instr->false_block_id());
1974
1975  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1976  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1977
1978  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1979
1980  EmitBranch(true_block, false_block, eq);
1981}
1982
1983
1984void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1985  Register reg = ToRegister(instr->InputAt(0));
1986  Register temp = ToRegister(instr->TempAt(0));
1987  int true_block = instr->true_block_id();
1988  int false_block = instr->false_block_id();
1989
1990  __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
1991  __ cmp(temp, Operand(instr->map()));
1992  EmitBranch(true_block, false_block, eq);
1993}
1994
1995
1996void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1997  ASSERT(ToRegister(instr->InputAt(0)).is(r0));  // Object is in r0.
1998  ASSERT(ToRegister(instr->InputAt(1)).is(r1));  // Function is in r1.
1999
2000  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2001  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2002
2003  __ cmp(r0, Operand(0));
2004  __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2005  __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2006}
2007
2008
2009void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2010  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2011   public:
2012    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2013                                  LInstanceOfKnownGlobal* instr)
2014        : LDeferredCode(codegen), instr_(instr) { }
2015    virtual void Generate() {
2016      codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
2017    }
2018
2019    Label* map_check() { return &map_check_; }
2020
2021   private:
2022    LInstanceOfKnownGlobal* instr_;
2023    Label map_check_;
2024  };
2025
2026  DeferredInstanceOfKnownGlobal* deferred;
2027  deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2028
2029  Label done, false_result;
2030  Register object = ToRegister(instr->InputAt(0));
2031  Register temp = ToRegister(instr->TempAt(0));
2032  Register result = ToRegister(instr->result());
2033
2034  ASSERT(object.is(r0));
2035  ASSERT(result.is(r0));
2036
2037  // A Smi is not instance of anything.
2038  __ JumpIfSmi(object, &false_result);
2039
2040  // This is the inlined call site instanceof cache. The two occurences of the
2041  // hole value will be patched to the last map/result pair generated by the
2042  // instanceof stub.
2043  Label cache_miss;
2044  Register map = temp;
2045  __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2046  __ bind(deferred->map_check());  // Label for calculating code patching.
2047  // We use Factory::the_hole_value() on purpose instead of loading from the
2048  // root array to force relocation to be able to later patch with
2049  // the cached map.
2050  __ mov(ip, Operand(factory()->the_hole_value()));
2051  __ cmp(map, Operand(ip));
2052  __ b(ne, &cache_miss);
2053  // We use Factory::the_hole_value() on purpose instead of loading from the
2054  // root array to force relocation to be able to later patch
2055  // with true or false.
2056  __ mov(result, Operand(factory()->the_hole_value()));
2057  __ b(&done);
2058
2059  // The inlined call site cache did not match. Check null and string before
2060  // calling the deferred code.
2061  __ bind(&cache_miss);
2062  // Null is not instance of anything.
2063  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2064  __ cmp(object, Operand(ip));
2065  __ b(eq, &false_result);
2066
2067  // String values is not instance of anything.
2068  Condition is_string = masm_->IsObjectStringType(object, temp);
2069  __ b(is_string, &false_result);
2070
2071  // Go to the deferred code.
2072  __ b(deferred->entry());
2073
2074  __ bind(&false_result);
2075  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2076
2077  // Here result has either true or false. Deferred code also produces true or
2078  // false object.
2079  __ bind(deferred->exit());
2080  __ bind(&done);
2081}
2082
2083
2084void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2085                                                Label* map_check) {
2086  Register result = ToRegister(instr->result());
2087  ASSERT(result.is(r0));
2088
2089  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2090  flags = static_cast<InstanceofStub::Flags>(
2091      flags | InstanceofStub::kArgsInRegisters);
2092  flags = static_cast<InstanceofStub::Flags>(
2093      flags | InstanceofStub::kCallSiteInlineCheck);
2094  flags = static_cast<InstanceofStub::Flags>(
2095      flags | InstanceofStub::kReturnTrueFalseObject);
2096  InstanceofStub stub(flags);
2097
2098  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2099
2100  // Get the temp register reserved by the instruction. This needs to be r4 as
2101  // its slot of the pushing of safepoint registers is used to communicate the
2102  // offset to the location of the map check.
2103  Register temp = ToRegister(instr->TempAt(0));
2104  ASSERT(temp.is(r4));
2105  __ mov(InstanceofStub::right(), Operand(instr->function()));
2106  static const int kAdditionalDelta = 4;
2107  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2108  Label before_push_delta;
2109  __ bind(&before_push_delta);
2110  __ BlockConstPoolFor(kAdditionalDelta);
2111  __ mov(temp, Operand(delta * kPointerSize));
2112  __ StoreToSafepointRegisterSlot(temp, temp);
2113  CallCodeGeneric(stub.GetCode(),
2114                  RelocInfo::CODE_TARGET,
2115                  instr,
2116                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2117  // Put the result value into the result register slot and
2118  // restore all registers.
2119  __ StoreToSafepointRegisterSlot(result, result);
2120}
2121
2122
2123static Condition ComputeCompareCondition(Token::Value op) {
2124  switch (op) {
2125    case Token::EQ_STRICT:
2126    case Token::EQ:
2127      return eq;
2128    case Token::LT:
2129      return lt;
2130    case Token::GT:
2131      return gt;
2132    case Token::LTE:
2133      return le;
2134    case Token::GTE:
2135      return ge;
2136    default:
2137      UNREACHABLE();
2138      return kNoCondition;
2139  }
2140}
2141
2142
2143void LCodeGen::DoCmpT(LCmpT* instr) {
2144  Token::Value op = instr->op();
2145
2146  Handle<Code> ic = CompareIC::GetUninitialized(op);
2147  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2148  __ cmp(r0, Operand(0));  // This instruction also signals no smi code inlined.
2149
2150  Condition condition = ComputeCompareCondition(op);
2151  if (op == Token::GT || op == Token::LTE) {
2152    condition = ReverseCondition(condition);
2153  }
2154  __ LoadRoot(ToRegister(instr->result()),
2155              Heap::kTrueValueRootIndex,
2156              condition);
2157  __ LoadRoot(ToRegister(instr->result()),
2158              Heap::kFalseValueRootIndex,
2159              NegateCondition(condition));
2160}
2161
2162
2163void LCodeGen::DoReturn(LReturn* instr) {
2164  if (FLAG_trace) {
2165    // Push the return value on the stack as the parameter.
2166    // Runtime::TraceExit returns its parameter in r0.
2167    __ push(r0);
2168    __ CallRuntime(Runtime::kTraceExit, 1);
2169  }
2170  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2171  __ mov(sp, fp);
2172  __ ldm(ia_w, sp, fp.bit() | lr.bit());
2173  __ add(sp, sp, Operand(sp_delta));
2174  __ Jump(lr);
2175}
2176
2177
2178void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2179  Register result = ToRegister(instr->result());
2180  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2181  __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2182  if (instr->hydrogen()->check_hole_value()) {
2183    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2184    __ cmp(result, ip);
2185    DeoptimizeIf(eq, instr->environment());
2186  }
2187}
2188
2189
2190void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2191  ASSERT(ToRegister(instr->global_object()).is(r0));
2192  ASSERT(ToRegister(instr->result()).is(r0));
2193
2194  __ mov(r2, Operand(instr->name()));
2195  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2196                                             : RelocInfo::CODE_TARGET_CONTEXT;
2197  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2198  CallCode(ic, mode, instr);
2199}
2200
2201
2202void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2203  Register value = ToRegister(instr->InputAt(0));
2204  Register scratch = scratch0();
2205
2206  // Load the cell.
2207  __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
2208
2209  // If the cell we are storing to contains the hole it could have
2210  // been deleted from the property dictionary. In that case, we need
2211  // to update the property details in the property dictionary to mark
2212  // it as no longer deleted.
2213  if (instr->hydrogen()->check_hole_value()) {
2214    Register scratch2 = ToRegister(instr->TempAt(0));
2215    __ ldr(scratch2,
2216           FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2217    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2218    __ cmp(scratch2, ip);
2219    DeoptimizeIf(eq, instr->environment());
2220  }
2221
2222  // Store the value.
2223  __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2224}
2225
2226
2227void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2228  ASSERT(ToRegister(instr->global_object()).is(r1));
2229  ASSERT(ToRegister(instr->value()).is(r0));
2230
2231  __ mov(r2, Operand(instr->name()));
2232  Handle<Code> ic = instr->strict_mode()
2233      ? isolate()->builtins()->StoreIC_Initialize_Strict()
2234      : isolate()->builtins()->StoreIC_Initialize();
2235  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2236}
2237
2238
2239void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2240  Register context = ToRegister(instr->context());
2241  Register result = ToRegister(instr->result());
2242  __ ldr(result, ContextOperand(context, instr->slot_index()));
2243}
2244
2245
2246void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2247  Register context = ToRegister(instr->context());
2248  Register value = ToRegister(instr->value());
2249  __ str(value, ContextOperand(context, instr->slot_index()));
2250  if (instr->needs_write_barrier()) {
2251    int offset = Context::SlotOffset(instr->slot_index());
2252    __ RecordWrite(context, Operand(offset), value, scratch0());
2253  }
2254}
2255
2256
2257void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2258  Register object = ToRegister(instr->InputAt(0));
2259  Register result = ToRegister(instr->result());
2260  if (instr->hydrogen()->is_in_object()) {
2261    __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2262  } else {
2263    __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2264    __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2265  }
2266}
2267
2268
2269void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2270                                               Register object,
2271                                               Handle<Map> type,
2272                                               Handle<String> name) {
2273  LookupResult lookup;
2274  type->LookupInDescriptors(NULL, *name, &lookup);
2275  ASSERT(lookup.IsProperty() &&
2276         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2277  if (lookup.type() == FIELD) {
2278    int index = lookup.GetLocalFieldIndexFromMap(*type);
2279    int offset = index * kPointerSize;
2280    if (index < 0) {
2281      // Negative property indices are in-object properties, indexed
2282      // from the end of the fixed part of the object.
2283      __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
2284    } else {
2285      // Non-negative property indices are in the properties array.
2286      __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2287      __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2288    }
2289  } else {
2290    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2291    LoadHeapObject(result, Handle<HeapObject>::cast(function));
2292  }
2293}
2294
2295
2296void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2297  Register object = ToRegister(instr->object());
2298  Register result = ToRegister(instr->result());
2299  Register scratch = scratch0();
2300  int map_count = instr->hydrogen()->types()->length();
2301  Handle<String> name = instr->hydrogen()->name();
2302  if (map_count == 0) {
2303    ASSERT(instr->hydrogen()->need_generic());
2304    __ mov(r2, Operand(name));
2305    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2306    CallCode(ic, RelocInfo::CODE_TARGET, instr);
2307  } else {
2308    Label done;
2309    __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2310    for (int i = 0; i < map_count - 1; ++i) {
2311      Handle<Map> map = instr->hydrogen()->types()->at(i);
2312      Label next;
2313      __ cmp(scratch, Operand(map));
2314      __ b(ne, &next);
2315      EmitLoadFieldOrConstantFunction(result, object, map, name);
2316      __ b(&done);
2317      __ bind(&next);
2318    }
2319    Handle<Map> map = instr->hydrogen()->types()->last();
2320    __ cmp(scratch, Operand(map));
2321    if (instr->hydrogen()->need_generic()) {
2322      Label generic;
2323      __ b(ne, &generic);
2324      EmitLoadFieldOrConstantFunction(result, object, map, name);
2325      __ b(&done);
2326      __ bind(&generic);
2327      __ mov(r2, Operand(name));
2328      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2329      CallCode(ic, RelocInfo::CODE_TARGET, instr);
2330    } else {
2331      DeoptimizeIf(ne, instr->environment());
2332      EmitLoadFieldOrConstantFunction(result, object, map, name);
2333    }
2334    __ bind(&done);
2335  }
2336}
2337
2338
2339void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2340  ASSERT(ToRegister(instr->object()).is(r0));
2341  ASSERT(ToRegister(instr->result()).is(r0));
2342
2343  // Name is always in r2.
2344  __ mov(r2, Operand(instr->name()));
2345  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2346  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2347}
2348
2349
2350void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2351  Register scratch = scratch0();
2352  Register function = ToRegister(instr->function());
2353  Register result = ToRegister(instr->result());
2354
2355  // Check that the function really is a function. Load map into the
2356  // result register.
2357  __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2358  DeoptimizeIf(ne, instr->environment());
2359
2360  // Make sure that the function has an instance prototype.
2361  Label non_instance;
2362  __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2363  __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2364  __ b(ne, &non_instance);
2365
2366  // Get the prototype or initial map from the function.
2367  __ ldr(result,
2368         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2369
2370  // Check that the function has a prototype or an initial map.
2371  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2372  __ cmp(result, ip);
2373  DeoptimizeIf(eq, instr->environment());
2374
2375  // If the function does not have an initial map, we're done.
2376  Label done;
2377  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2378  __ b(ne, &done);
2379
2380  // Get the prototype from the initial map.
2381  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2382  __ jmp(&done);
2383
2384  // Non-instance prototype: Fetch prototype from constructor field
2385  // in initial map.
2386  __ bind(&non_instance);
2387  __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2388
2389  // All done.
2390  __ bind(&done);
2391}
2392
2393
2394void LCodeGen::DoLoadElements(LLoadElements* instr) {
2395  Register result = ToRegister(instr->result());
2396  Register input = ToRegister(instr->InputAt(0));
2397  Register scratch = scratch0();
2398
2399  __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
2400  if (FLAG_debug_code) {
2401    Label done, fail;
2402    __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2403    __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2404    __ cmp(scratch, ip);
2405    __ b(eq, &done);
2406    __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2407    __ cmp(scratch, ip);
2408    __ b(eq, &done);
2409    // |scratch| still contains |input|'s map.
2410    __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2411    __ ubfx(scratch, scratch, Map::kElementsKindShift,
2412            Map::kElementsKindBitCount);
2413    __ cmp(scratch, Operand(JSObject::FAST_ELEMENTS));
2414    __ b(eq, &done);
2415    __ cmp(scratch, Operand(JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2416    __ b(lt, &fail);
2417    __ cmp(scratch, Operand(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2418    __ b(le, &done);
2419    __ bind(&fail);
2420    __ Abort("Check for fast or external elements failed.");
2421    __ bind(&done);
2422  }
2423}
2424
2425
2426void LCodeGen::DoLoadExternalArrayPointer(
2427    LLoadExternalArrayPointer* instr) {
2428  Register to_reg = ToRegister(instr->result());
2429  Register from_reg  = ToRegister(instr->InputAt(0));
2430  __ ldr(to_reg, FieldMemOperand(from_reg,
2431                                 ExternalArray::kExternalPointerOffset));
2432}
2433
2434
2435void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2436  Register arguments = ToRegister(instr->arguments());
2437  Register length = ToRegister(instr->length());
2438  Register index = ToRegister(instr->index());
2439  Register result = ToRegister(instr->result());
2440
2441  // Bailout index is not a valid argument index. Use unsigned check to get
2442  // negative check for free.
2443  __ sub(length, length, index, SetCC);
2444  DeoptimizeIf(ls, instr->environment());
2445
2446  // There are two words between the frame pointer and the last argument.
2447  // Subtracting from length accounts for one of them add one more.
2448  __ add(length, length, Operand(1));
2449  __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2450}
2451
2452
2453void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2454  Register elements = ToRegister(instr->elements());
2455  Register key = EmitLoadRegister(instr->key(), scratch0());
2456  Register result = ToRegister(instr->result());
2457  Register scratch = scratch0();
2458
2459  // Load the result.
2460  __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2461  __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2462
2463  // Check for the hole value.
2464  if (instr->hydrogen()->RequiresHoleCheck()) {
2465    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2466    __ cmp(result, scratch);
2467    DeoptimizeIf(eq, instr->environment());
2468  }
2469}
2470
2471
2472void LCodeGen::DoLoadKeyedFastDoubleElement(
2473    LLoadKeyedFastDoubleElement* instr) {
2474  Register elements = ToRegister(instr->elements());
2475  bool key_is_constant = instr->key()->IsConstantOperand();
2476  Register key = no_reg;
2477  DwVfpRegister result = ToDoubleRegister(instr->result());
2478  Register scratch = scratch0();
2479
2480  int shift_size =
2481      ElementsKindToShiftSize(JSObject::FAST_DOUBLE_ELEMENTS);
2482  int constant_key = 0;
2483  if (key_is_constant) {
2484    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2485    if (constant_key & 0xF0000000) {
2486      Abort("array index constant value too big.");
2487    }
2488  } else {
2489    key = ToRegister(instr->key());
2490  }
2491
2492  Operand operand = key_is_constant
2493      ? Operand(constant_key * (1 << shift_size) +
2494                FixedDoubleArray::kHeaderSize - kHeapObjectTag)
2495      : Operand(key, LSL, shift_size);
2496  __ add(elements, elements, operand);
2497  if (!key_is_constant) {
2498    __ add(elements, elements,
2499           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2500  }
2501
2502  if (instr->hydrogen()->RequiresHoleCheck()) {
2503    // TODO(danno): If no hole check is required, there is no need to allocate
2504    // elements into a temporary register, instead scratch can be used.
2505    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2506    __ cmp(scratch, Operand(kHoleNanUpper32));
2507    DeoptimizeIf(eq, instr->environment());
2508  }
2509
2510  __ vldr(result, elements, 0);
2511}
2512
2513
2514void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2515    LLoadKeyedSpecializedArrayElement* instr) {
2516  Register external_pointer = ToRegister(instr->external_pointer());
2517  Register key = no_reg;
2518  JSObject::ElementsKind elements_kind = instr->elements_kind();
2519  bool key_is_constant = instr->key()->IsConstantOperand();
2520  int constant_key = 0;
2521  if (key_is_constant) {
2522    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2523    if (constant_key & 0xF0000000) {
2524      Abort("array index constant value too big.");
2525    }
2526  } else {
2527    key = ToRegister(instr->key());
2528  }
2529  int shift_size = ElementsKindToShiftSize(elements_kind);
2530
2531  if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
2532      elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
2533    CpuFeatures::Scope scope(VFP3);
2534    DwVfpRegister result = ToDoubleRegister(instr->result());
2535    Operand operand = key_is_constant
2536        ? Operand(constant_key * (1 << shift_size))
2537        : Operand(key, LSL, shift_size);
2538    __ add(scratch0(), external_pointer, operand);
2539    if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
2540      __ vldr(result.low(), scratch0(), 0);
2541      __ vcvt_f64_f32(result, result.low());
2542    } else  {  // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS
2543      __ vldr(result, scratch0(), 0);
2544    }
2545  } else {
2546    Register result = ToRegister(instr->result());
2547    MemOperand mem_operand(key_is_constant
2548        ? MemOperand(external_pointer, constant_key * (1 << shift_size))
2549        : MemOperand(external_pointer, key, LSL, shift_size));
2550    switch (elements_kind) {
2551      case JSObject::EXTERNAL_BYTE_ELEMENTS:
2552        __ ldrsb(result, mem_operand);
2553        break;
2554      case JSObject::EXTERNAL_PIXEL_ELEMENTS:
2555      case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2556        __ ldrb(result, mem_operand);
2557        break;
2558      case JSObject::EXTERNAL_SHORT_ELEMENTS:
2559        __ ldrsh(result, mem_operand);
2560        break;
2561      case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2562        __ ldrh(result, mem_operand);
2563        break;
2564      case JSObject::EXTERNAL_INT_ELEMENTS:
2565        __ ldr(result, mem_operand);
2566        break;
2567      case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
2568        __ ldr(result, mem_operand);
2569        __ cmp(result, Operand(0x80000000));
2570        // TODO(danno): we could be more clever here, perhaps having a special
2571        // version of the stub that detects if the overflow case actually
2572        // happens, and generate code that returns a double rather than int.
2573        DeoptimizeIf(cs, instr->environment());
2574        break;
2575      case JSObject::EXTERNAL_FLOAT_ELEMENTS:
2576      case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
2577      case JSObject::FAST_DOUBLE_ELEMENTS:
2578      case JSObject::FAST_ELEMENTS:
2579      case JSObject::DICTIONARY_ELEMENTS:
2580      case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
2581        UNREACHABLE();
2582        break;
2583    }
2584  }
2585}
2586
2587
2588void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2589  ASSERT(ToRegister(instr->object()).is(r1));
2590  ASSERT(ToRegister(instr->key()).is(r0));
2591
2592  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2593  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2594}
2595
2596
2597void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2598  Register scratch = scratch0();
2599  Register result = ToRegister(instr->result());
2600
2601  // Check if the calling frame is an arguments adaptor frame.
2602  Label done, adapted;
2603  __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2604  __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2605  __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2606
2607  // Result is the frame pointer for the frame if not adapted and for the real
2608  // frame below the adaptor frame if adapted.
2609  __ mov(result, fp, LeaveCC, ne);
2610  __ mov(result, scratch, LeaveCC, eq);
2611}
2612
2613
2614void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2615  Register elem = ToRegister(instr->InputAt(0));
2616  Register result = ToRegister(instr->result());
2617
2618  Label done;
2619
2620  // If no arguments adaptor frame the number of arguments is fixed.
2621  __ cmp(fp, elem);
2622  __ mov(result, Operand(scope()->num_parameters()));
2623  __ b(eq, &done);
2624
2625  // Arguments adaptor frame present. Get argument length from there.
2626  __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2627  __ ldr(result,
2628         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2629  __ SmiUntag(result);
2630
2631  // Argument length is in result register.
2632  __ bind(&done);
2633}
2634
2635
2636void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2637  Register receiver = ToRegister(instr->receiver());
2638  Register function = ToRegister(instr->function());
2639  Register length = ToRegister(instr->length());
2640  Register elements = ToRegister(instr->elements());
2641  Register scratch = scratch0();
2642  ASSERT(receiver.is(r0));  // Used for parameter count.
2643  ASSERT(function.is(r1));  // Required by InvokeFunction.
2644  ASSERT(ToRegister(instr->result()).is(r0));
2645
2646  // If the receiver is null or undefined, we have to pass the global
2647  // object as a receiver to normal functions. Values have to be
2648  // passed unchanged to builtins and strict-mode functions.
2649  Label global_object, receiver_ok;
2650
2651  // Do not transform the receiver to object for strict mode
2652  // functions.
2653  __ ldr(scratch,
2654         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2655  __ ldr(scratch,
2656         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2657  __ tst(scratch,
2658         Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
2659  __ b(ne, &receiver_ok);
2660
2661  // Do not transform the receiver to object for builtins.
2662  __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2663  __ b(ne, &receiver_ok);
2664
2665  // Normal function. Replace undefined or null with global receiver.
2666  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2667  __ cmp(receiver, scratch);
2668  __ b(eq, &global_object);
2669  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2670  __ cmp(receiver, scratch);
2671  __ b(eq, &global_object);
2672
2673  // Deoptimize if the receiver is not a JS object.
2674  __ tst(receiver, Operand(kSmiTagMask));
2675  DeoptimizeIf(eq, instr->environment());
2676  __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
2677  DeoptimizeIf(lt, instr->environment());
2678  __ jmp(&receiver_ok);
2679
2680  __ bind(&global_object);
2681  __ ldr(receiver, GlobalObjectOperand());
2682  __ ldr(receiver,
2683         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2684  __ bind(&receiver_ok);
2685
2686  // Copy the arguments to this function possibly from the
2687  // adaptor frame below it.
2688  const uint32_t kArgumentsLimit = 1 * KB;
2689  __ cmp(length, Operand(kArgumentsLimit));
2690  DeoptimizeIf(hi, instr->environment());
2691
2692  // Push the receiver and use the register to keep the original
2693  // number of arguments.
2694  __ push(receiver);
2695  __ mov(receiver, length);
2696  // The arguments are at a one pointer size offset from elements.
2697  __ add(elements, elements, Operand(1 * kPointerSize));
2698
2699  // Loop through the arguments pushing them onto the execution
2700  // stack.
2701  Label invoke, loop;
2702  // length is a small non-negative integer, due to the test above.
2703  __ cmp(length, Operand(0));
2704  __ b(eq, &invoke);
2705  __ bind(&loop);
2706  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
2707  __ push(scratch);
2708  __ sub(length, length, Operand(1), SetCC);
2709  __ b(ne, &loop);
2710
2711  __ bind(&invoke);
2712  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2713  LPointerMap* pointers = instr->pointer_map();
2714  LEnvironment* env = instr->deoptimization_environment();
2715  RecordPosition(pointers->position());
2716  RegisterEnvironmentForDeoptimization(env);
2717  SafepointGenerator safepoint_generator(this,
2718                                         pointers,
2719                                         env->deoptimization_index());
2720  // The number of arguments is stored in receiver which is r0, as expected
2721  // by InvokeFunction.
2722  v8::internal::ParameterCount actual(receiver);
2723  __ InvokeFunction(function, actual, CALL_FUNCTION,
2724                    safepoint_generator, CALL_AS_METHOD);
2725  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2726}
2727
2728
2729void LCodeGen::DoPushArgument(LPushArgument* instr) {
2730  LOperand* argument = instr->InputAt(0);
2731  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2732    Abort("DoPushArgument not implemented for double type.");
2733  } else {
2734    Register argument_reg = EmitLoadRegister(argument, ip);
2735    __ push(argument_reg);
2736  }
2737}
2738
2739
2740void LCodeGen::DoThisFunction(LThisFunction* instr) {
2741  Register result = ToRegister(instr->result());
2742  __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2743}
2744
2745
2746void LCodeGen::DoContext(LContext* instr) {
2747  Register result = ToRegister(instr->result());
2748  __ mov(result, cp);
2749}
2750
2751
2752void LCodeGen::DoOuterContext(LOuterContext* instr) {
2753  Register context = ToRegister(instr->context());
2754  Register result = ToRegister(instr->result());
2755  __ ldr(result,
2756         MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2757}
2758
2759
2760void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2761  Register result = ToRegister(instr->result());
2762  __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
2763}
2764
2765
2766void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2767  Register global = ToRegister(instr->global());
2768  Register result = ToRegister(instr->result());
2769  __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2770}
2771
2772
2773void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2774                                 int arity,
2775                                 LInstruction* instr,
2776                                 CallKind call_kind) {
2777  // Change context if needed.
2778  bool change_context =
2779      (info()->closure()->context() != function->context()) ||
2780      scope()->contains_with() ||
2781      (scope()->num_heap_slots() > 0);
2782  if (change_context) {
2783    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2784  }
2785
2786  // Set r0 to arguments count if adaption is not needed. Assumes that r0
2787  // is available to write to at this point.
2788  if (!function->NeedsArgumentsAdaption()) {
2789    __ mov(r0, Operand(arity));
2790  }
2791
2792  LPointerMap* pointers = instr->pointer_map();
2793  RecordPosition(pointers->position());
2794
2795  // Invoke function.
2796  __ SetCallKind(r5, call_kind);
2797  __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2798  __ Call(ip);
2799
2800  // Setup deoptimization.
2801  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
2802
2803  // Restore context.
2804  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2805}
2806
2807
2808void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2809  ASSERT(ToRegister(instr->result()).is(r0));
2810  __ mov(r1, Operand(instr->function()));
2811  CallKnownFunction(instr->function(),
2812                    instr->arity(),
2813                    instr,
2814                    CALL_AS_METHOD);
2815}
2816
2817
2818void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2819  Register input = ToRegister(instr->InputAt(0));
2820  Register result = ToRegister(instr->result());
2821  Register scratch = scratch0();
2822
2823  // Deoptimize if not a heap number.
2824  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2825  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
2826  __ cmp(scratch, Operand(ip));
2827  DeoptimizeIf(ne, instr->environment());
2828
2829  Label done;
2830  Register exponent = scratch0();
2831  scratch = no_reg;
2832  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2833  // Check the sign of the argument. If the argument is positive, just
2834  // return it.
2835  __ tst(exponent, Operand(HeapNumber::kSignMask));
2836  // Move the input to the result if necessary.
2837  __ Move(result, input);
2838  __ b(eq, &done);
2839
2840  // Input is negative. Reverse its sign.
2841  // Preserve the value of all registers.
2842  {
2843    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2844
2845    // Registers were saved at the safepoint, so we can use
2846    // many scratch registers.
2847    Register tmp1 = input.is(r1) ? r0 : r1;
2848    Register tmp2 = input.is(r2) ? r0 : r2;
2849    Register tmp3 = input.is(r3) ? r0 : r3;
2850    Register tmp4 = input.is(r4) ? r0 : r4;
2851
2852    // exponent: floating point exponent value.
2853
2854    Label allocated, slow;
2855    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
2856    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
2857    __ b(&allocated);
2858
2859    // Slow case: Call the runtime system to do the number allocation.
2860    __ bind(&slow);
2861
2862    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2863    // Set the pointer to the new heap number in tmp.
2864    if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
2865    // Restore input_reg after call to runtime.
2866    __ LoadFromSafepointRegisterSlot(input, input);
2867    __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2868
2869    __ bind(&allocated);
2870    // exponent: floating point exponent value.
2871    // tmp1: allocated heap number.
2872    __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
2873    __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
2874    __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
2875    __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
2876
2877    __ StoreToSafepointRegisterSlot(tmp1, result);
2878  }
2879
2880  __ bind(&done);
2881}
2882
2883
2884void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2885  Register input = ToRegister(instr->InputAt(0));
2886  Register result = ToRegister(instr->result());
2887  __ cmp(input, Operand(0));
2888  __ Move(result, input, pl);
2889  // We can make rsb conditional because the previous cmp instruction
2890  // will clear the V (overflow) flag and rsb won't set this flag
2891  // if input is positive.
2892  __ rsb(result, input, Operand(0), SetCC, mi);
2893  // Deoptimize on overflow.
2894  DeoptimizeIf(vs, instr->environment());
2895}
2896
2897
2898void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2899  // Class for deferred case.
2900  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2901   public:
2902    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2903                                    LUnaryMathOperation* instr)
2904        : LDeferredCode(codegen), instr_(instr) { }
2905    virtual void Generate() {
2906      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2907    }
2908   private:
2909    LUnaryMathOperation* instr_;
2910  };
2911
2912  Representation r = instr->hydrogen()->value()->representation();
2913  if (r.IsDouble()) {
2914    DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
2915    DwVfpRegister result = ToDoubleRegister(instr->result());
2916    __ vabs(result, input);
2917  } else if (r.IsInteger32()) {
2918    EmitIntegerMathAbs(instr);
2919  } else {
2920    // Representation is tagged.
2921    DeferredMathAbsTaggedHeapNumber* deferred =
2922        new DeferredMathAbsTaggedHeapNumber(this, instr);
2923    Register input = ToRegister(instr->InputAt(0));
2924    // Smi check.
2925    __ JumpIfNotSmi(input, deferred->entry());
2926    // If smi, handle it directly.
2927    EmitIntegerMathAbs(instr);
2928    __ bind(deferred->exit());
2929  }
2930}
2931
2932
2933void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2934  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2935  Register result = ToRegister(instr->result());
2936  SwVfpRegister single_scratch = double_scratch0().low();
2937  Register scratch1 = scratch0();
2938  Register scratch2 = ToRegister(instr->TempAt(0));
2939
2940  __ EmitVFPTruncate(kRoundToMinusInf,
2941                     single_scratch,
2942                     input,
2943                     scratch1,
2944                     scratch2);
2945  DeoptimizeIf(ne, instr->environment());
2946
2947  // Move the result back to general purpose register r0.
2948  __ vmov(result, single_scratch);
2949
2950  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2951    // Test for -0.
2952    Label done;
2953    __ cmp(result, Operand(0));
2954    __ b(ne, &done);
2955    __ vmov(scratch1, input.high());
2956    __ tst(scratch1, Operand(HeapNumber::kSignMask));
2957    DeoptimizeIf(ne, instr->environment());
2958    __ bind(&done);
2959  }
2960}
2961
2962
2963void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2964  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2965  Register result = ToRegister(instr->result());
2966  Register scratch = scratch0();
2967  Label done, check_sign_on_zero;
2968
2969  // Extract exponent bits.
2970  __ vmov(result, input.high());
2971  __ ubfx(scratch,
2972          result,
2973          HeapNumber::kExponentShift,
2974          HeapNumber::kExponentBits);
2975
2976  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
2977  __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
2978  __ mov(result, Operand(0), LeaveCC, le);
2979  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2980    __ b(le, &check_sign_on_zero);
2981  } else {
2982    __ b(le, &done);
2983  }
2984
2985  // The following conversion will not work with numbers
2986  // outside of ]-2^32, 2^32[.
2987  __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
2988  DeoptimizeIf(ge, instr->environment());
2989
2990  // Save the original sign for later comparison.
2991  __ and_(scratch, result, Operand(HeapNumber::kSignMask));
2992
2993  __ Vmov(double_scratch0(), 0.5);
2994  __ vadd(input, input, double_scratch0());
2995
2996  // Check sign of the result: if the sign changed, the input
2997  // value was in ]0.5, 0[ and the result should be -0.
2998  __ vmov(result, input.high());
2999  __ eor(result, result, Operand(scratch), SetCC);
3000  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3001    DeoptimizeIf(mi, instr->environment());
3002  } else {
3003    __ mov(result, Operand(0), LeaveCC, mi);
3004    __ b(mi, &done);
3005  }
3006
3007  __ EmitVFPTruncate(kRoundToMinusInf,
3008                     double_scratch0().low(),
3009                     input,
3010                     result,
3011                     scratch);
3012  DeoptimizeIf(ne, instr->environment());
3013  __ vmov(result, double_scratch0().low());
3014
3015  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3016    // Test for -0.
3017    __ cmp(result, Operand(0));
3018    __ b(ne, &done);
3019    __ bind(&check_sign_on_zero);
3020    __ vmov(scratch, input.high());
3021    __ tst(scratch, Operand(HeapNumber::kSignMask));
3022    DeoptimizeIf(ne, instr->environment());
3023  }
3024  __ bind(&done);
3025}
3026
3027
3028void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3029  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3030  DoubleRegister result = ToDoubleRegister(instr->result());
3031  __ vsqrt(result, input);
3032}
3033
3034
3035void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3036  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3037  DoubleRegister result = ToDoubleRegister(instr->result());
3038  // Add +0 to convert -0 to +0.
3039  __ vadd(result, input, kDoubleRegZero);
3040  __ vsqrt(result, result);
3041}
3042
3043
3044void LCodeGen::DoPower(LPower* instr) {
3045  LOperand* left = instr->InputAt(0);
3046  LOperand* right = instr->InputAt(1);
3047  Register scratch = scratch0();
3048  DoubleRegister result_reg = ToDoubleRegister(instr->result());
3049  Representation exponent_type = instr->hydrogen()->right()->representation();
3050  if (exponent_type.IsDouble()) {
3051    // Prepare arguments and call C function.
3052    __ PrepareCallCFunction(0, 2, scratch);
3053    __ SetCallCDoubleArguments(ToDoubleRegister(left),
3054                               ToDoubleRegister(right));
3055    __ CallCFunction(
3056        ExternalReference::power_double_double_function(isolate()), 0, 2);
3057  } else if (exponent_type.IsInteger32()) {
3058    ASSERT(ToRegister(right).is(r0));
3059    // Prepare arguments and call C function.
3060    __ PrepareCallCFunction(1, 1, scratch);
3061    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
3062    __ CallCFunction(
3063        ExternalReference::power_double_int_function(isolate()), 1, 1);
3064  } else {
3065    ASSERT(exponent_type.IsTagged());
3066    ASSERT(instr->hydrogen()->left()->representation().IsDouble());
3067
3068    Register right_reg = ToRegister(right);
3069
3070    // Check for smi on the right hand side.
3071    Label non_smi, call;
3072    __ JumpIfNotSmi(right_reg, &non_smi);
3073
3074    // Untag smi and convert it to a double.
3075    __ SmiUntag(right_reg);
3076    SwVfpRegister single_scratch = double_scratch0().low();
3077    __ vmov(single_scratch, right_reg);
3078    __ vcvt_f64_s32(result_reg, single_scratch);
3079    __ jmp(&call);
3080
3081    // Heap number map check.
3082    __ bind(&non_smi);
3083    __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
3084    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3085    __ cmp(scratch, Operand(ip));
3086    DeoptimizeIf(ne, instr->environment());
3087    int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
3088    __ add(scratch, right_reg, Operand(value_offset));
3089    __ vldr(result_reg, scratch, 0);
3090
3091    // Prepare arguments and call C function.
3092    __ bind(&call);
3093    __ PrepareCallCFunction(0, 2, scratch);
3094    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
3095    __ CallCFunction(
3096        ExternalReference::power_double_double_function(isolate()), 0, 2);
3097  }
3098  // Store the result in the result register.
3099  __ GetCFunctionDoubleResult(result_reg);
3100}
3101
3102
3103void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3104  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3105  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3106                               TranscendentalCacheStub::UNTAGGED);
3107  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3108}
3109
3110
3111void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3112  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3113  TranscendentalCacheStub stub(TranscendentalCache::COS,
3114                               TranscendentalCacheStub::UNTAGGED);
3115  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3116}
3117
3118
3119void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3120  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3121  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3122                               TranscendentalCacheStub::UNTAGGED);
3123  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3124}
3125
3126
3127void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3128  switch (instr->op()) {
3129    case kMathAbs:
3130      DoMathAbs(instr);
3131      break;
3132    case kMathFloor:
3133      DoMathFloor(instr);
3134      break;
3135    case kMathRound:
3136      DoMathRound(instr);
3137      break;
3138    case kMathSqrt:
3139      DoMathSqrt(instr);
3140      break;
3141    case kMathPowHalf:
3142      DoMathPowHalf(instr);
3143      break;
3144    case kMathCos:
3145      DoMathCos(instr);
3146      break;
3147    case kMathSin:
3148      DoMathSin(instr);
3149      break;
3150    case kMathLog:
3151      DoMathLog(instr);
3152      break;
3153    default:
3154      Abort("Unimplemented type of LUnaryMathOperation.");
3155      UNREACHABLE();
3156  }
3157}
3158
3159
3160void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3161  ASSERT(ToRegister(instr->function()).is(r1));
3162  ASSERT(instr->HasPointerMap());
3163  ASSERT(instr->HasDeoptimizationEnvironment());
3164  LPointerMap* pointers = instr->pointer_map();
3165  LEnvironment* env = instr->deoptimization_environment();
3166  RecordPosition(pointers->position());
3167  RegisterEnvironmentForDeoptimization(env);
3168  SafepointGenerator generator(this, pointers, env->deoptimization_index());
3169  ParameterCount count(instr->arity());
3170  __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3171  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3172}
3173
3174
3175void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3176  ASSERT(ToRegister(instr->result()).is(r0));
3177
3178  int arity = instr->arity();
3179  Handle<Code> ic =
3180      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
3181  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3182  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3183}
3184
3185
3186void LCodeGen::DoCallNamed(LCallNamed* instr) {
3187  ASSERT(ToRegister(instr->result()).is(r0));
3188
3189  int arity = instr->arity();
3190  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3191  Handle<Code> ic =
3192      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
3193  __ mov(r2, Operand(instr->name()));
3194  CallCode(ic, mode, instr);
3195  // Restore context register.
3196  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3197}
3198
3199
3200void LCodeGen::DoCallFunction(LCallFunction* instr) {
3201  ASSERT(ToRegister(instr->result()).is(r0));
3202
3203  int arity = instr->arity();
3204  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
3205  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3206  __ Drop(1);
3207  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3208}
3209
3210
3211void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3212  ASSERT(ToRegister(instr->result()).is(r0));
3213
3214  int arity = instr->arity();
3215  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3216  Handle<Code> ic =
3217      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
3218  __ mov(r2, Operand(instr->name()));
3219  CallCode(ic, mode, instr);
3220  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3221}
3222
3223
3224void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3225  ASSERT(ToRegister(instr->result()).is(r0));
3226  __ mov(r1, Operand(instr->target()));
3227  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3228}
3229
3230
3231void LCodeGen::DoCallNew(LCallNew* instr) {
3232  ASSERT(ToRegister(instr->InputAt(0)).is(r1));
3233  ASSERT(ToRegister(instr->result()).is(r0));
3234
3235  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
3236  __ mov(r0, Operand(instr->arity()));
3237  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
3238}
3239
3240
3241void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3242  CallRuntime(instr->function(), instr->arity(), instr);
3243}
3244
3245
3246void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3247  Register object = ToRegister(instr->object());
3248  Register value = ToRegister(instr->value());
3249  Register scratch = scratch0();
3250  int offset = instr->offset();
3251
3252  ASSERT(!object.is(value));
3253
3254  if (!instr->transition().is_null()) {
3255    __ mov(scratch, Operand(instr->transition()));
3256    __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3257  }
3258
3259  // Do the store.
3260  if (instr->is_in_object()) {
3261    __ str(value, FieldMemOperand(object, offset));
3262    if (instr->needs_write_barrier()) {
3263      // Update the write barrier for the object for in-object properties.
3264      __ RecordWrite(object, Operand(offset), value, scratch);
3265    }
3266  } else {
3267    __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3268    __ str(value, FieldMemOperand(scratch, offset));
3269    if (instr->needs_write_barrier()) {
3270      // Update the write barrier for the properties array.
3271      // object is used as a scratch register.
3272      __ RecordWrite(scratch, Operand(offset), value, object);
3273    }
3274  }
3275}
3276
3277
3278void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3279  ASSERT(ToRegister(instr->object()).is(r1));
3280  ASSERT(ToRegister(instr->value()).is(r0));
3281
3282  // Name is always in r2.
3283  __ mov(r2, Operand(instr->name()));
3284  Handle<Code> ic = instr->strict_mode()
3285      ? isolate()->builtins()->StoreIC_Initialize_Strict()
3286      : isolate()->builtins()->StoreIC_Initialize();
3287  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3288}
3289
3290
3291void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3292  __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
3293  DeoptimizeIf(hs, instr->environment());
3294}
3295
3296
3297void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3298  Register value = ToRegister(instr->value());
3299  Register elements = ToRegister(instr->object());
3300  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3301  Register scratch = scratch0();
3302
3303  // Do the store.
3304  if (instr->key()->IsConstantOperand()) {
3305    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3306    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3307    int offset =
3308        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3309    __ str(value, FieldMemOperand(elements, offset));
3310  } else {
3311    __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3312    __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3313  }
3314
3315  if (instr->hydrogen()->NeedsWriteBarrier()) {
3316    // Compute address of modified element and store it into key register.
3317    __ add(key, scratch, Operand(FixedArray::kHeaderSize));
3318    __ RecordWrite(elements, key, value);
3319  }
3320}
3321
3322
3323void LCodeGen::DoStoreKeyedFastDoubleElement(
3324    LStoreKeyedFastDoubleElement* instr) {
3325  DwVfpRegister value = ToDoubleRegister(instr->value());
3326  Register elements = ToRegister(instr->elements());
3327  Register key = no_reg;
3328  Register scratch = scratch0();
3329  bool key_is_constant = instr->key()->IsConstantOperand();
3330  int constant_key = 0;
3331  Label not_nan;
3332
3333  // Calculate the effective address of the slot in the array to store the
3334  // double value.
3335  if (key_is_constant) {
3336    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3337    if (constant_key & 0xF0000000) {
3338      Abort("array index constant value too big.");
3339    }
3340  } else {
3341    key = ToRegister(instr->key());
3342  }
3343  int shift_size = ElementsKindToShiftSize(JSObject::FAST_DOUBLE_ELEMENTS);
3344  Operand operand = key_is_constant
3345      ? Operand(constant_key * (1 << shift_size) +
3346                FixedDoubleArray::kHeaderSize - kHeapObjectTag)
3347      : Operand(key, LSL, shift_size);
3348  __ add(scratch, elements, operand);
3349  if (!key_is_constant) {
3350    __ add(scratch, scratch,
3351           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3352  }
3353
3354  // Check for NaN. All NaNs must be canonicalized.
3355  __ VFPCompareAndSetFlags(value, value);
3356
3357  // Only load canonical NaN if the comparison above set the overflow.
3358  __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
3359
3360  __ bind(&not_nan);
3361  __ vstr(value, scratch, 0);
3362}
3363
3364
3365void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3366    LStoreKeyedSpecializedArrayElement* instr) {
3367
3368  Register external_pointer = ToRegister(instr->external_pointer());
3369  Register key = no_reg;
3370  JSObject::ElementsKind elements_kind = instr->elements_kind();
3371  bool key_is_constant = instr->key()->IsConstantOperand();
3372  int constant_key = 0;
3373  if (key_is_constant) {
3374    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3375    if (constant_key & 0xF0000000) {
3376      Abort("array index constant value too big.");
3377    }
3378  } else {
3379    key = ToRegister(instr->key());
3380  }
3381  int shift_size = ElementsKindToShiftSize(elements_kind);
3382
3383  if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
3384      elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
3385    CpuFeatures::Scope scope(VFP3);
3386    DwVfpRegister value(ToDoubleRegister(instr->value()));
3387    Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
3388                                    : Operand(key, LSL, shift_size));
3389    __ add(scratch0(), external_pointer, operand);
3390    if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
3391      __ vcvt_f32_f64(double_scratch0().low(), value);
3392      __ vstr(double_scratch0().low(), scratch0(), 0);
3393    } else {  // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS
3394      __ vstr(value, scratch0(), 0);
3395    }
3396  } else {
3397    Register value(ToRegister(instr->value()));
3398    MemOperand mem_operand(key_is_constant
3399        ? MemOperand(external_pointer, constant_key * (1 << shift_size))
3400        : MemOperand(external_pointer, key, LSL, shift_size));
3401    switch (elements_kind) {
3402      case JSObject::EXTERNAL_PIXEL_ELEMENTS:
3403      case JSObject::EXTERNAL_BYTE_ELEMENTS:
3404      case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3405        __ strb(value, mem_operand);
3406        break;
3407      case JSObject::EXTERNAL_SHORT_ELEMENTS:
3408      case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3409        __ strh(value, mem_operand);
3410        break;
3411      case JSObject::EXTERNAL_INT_ELEMENTS:
3412      case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
3413        __ str(value, mem_operand);
3414        break;
3415      case JSObject::EXTERNAL_FLOAT_ELEMENTS:
3416      case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
3417      case JSObject::FAST_DOUBLE_ELEMENTS:
3418      case JSObject::FAST_ELEMENTS:
3419      case JSObject::DICTIONARY_ELEMENTS:
3420      case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
3421        UNREACHABLE();
3422        break;
3423    }
3424  }
3425}
3426
3427
3428void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3429  ASSERT(ToRegister(instr->object()).is(r2));
3430  ASSERT(ToRegister(instr->key()).is(r1));
3431  ASSERT(ToRegister(instr->value()).is(r0));
3432
3433  Handle<Code> ic = instr->strict_mode()
3434      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3435      : isolate()->builtins()->KeyedStoreIC_Initialize();
3436  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3437}
3438
3439
3440void LCodeGen::DoStringAdd(LStringAdd* instr) {
3441  __ push(ToRegister(instr->left()));
3442  __ push(ToRegister(instr->right()));
3443  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3444  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3445}
3446
3447
3448void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3449  class DeferredStringCharCodeAt: public LDeferredCode {
3450   public:
3451    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3452        : LDeferredCode(codegen), instr_(instr) { }
3453    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3454   private:
3455    LStringCharCodeAt* instr_;
3456  };
3457
3458  Register string = ToRegister(instr->string());
3459  Register index = ToRegister(instr->index());
3460  Register result = ToRegister(instr->result());
3461
3462  DeferredStringCharCodeAt* deferred =
3463      new DeferredStringCharCodeAt(this, instr);
3464
3465  // Fetch the instance type of the receiver into result register.
3466  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
3467  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
3468
3469  // We need special handling for indirect strings.
3470  Label check_sequential;
3471  __ tst(result, Operand(kIsIndirectStringMask));
3472  __ b(eq, &check_sequential);
3473
3474  // Dispatch on the indirect string shape: slice or cons.
3475  Label cons_string;
3476  __ tst(result, Operand(kSlicedNotConsMask));
3477  __ b(eq, &cons_string);
3478
3479  // Handle slices.
3480  Label indirect_string_loaded;
3481  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
3482  __ add(index, index, Operand(result, ASR, kSmiTagSize));
3483  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
3484  __ jmp(&indirect_string_loaded);
3485
3486  // Handle conses.
3487  // Check whether the right hand side is the empty string (i.e. if
3488  // this is really a flat string in a cons string). If that is not
3489  // the case we would rather go to the runtime system now to flatten
3490  // the string.
3491  __ bind(&cons_string);
3492  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
3493  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
3494  __ cmp(result, ip);
3495  __ b(ne, deferred->entry());
3496  // Get the first of the two strings and load its instance type.
3497  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
3498
3499  __ bind(&indirect_string_loaded);
3500  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
3501  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
3502
3503  // Check whether the string is sequential. The only non-sequential
3504  // shapes we support have just been unwrapped above.
3505  __ bind(&check_sequential);
3506  STATIC_ASSERT(kSeqStringTag == 0);
3507  __ tst(result, Operand(kStringRepresentationMask));
3508  __ b(ne, deferred->entry());
3509
3510  // Dispatch on the encoding: ASCII or two-byte.
3511  Label ascii_string;
3512  STATIC_ASSERT(kAsciiStringTag != 0);
3513  __ tst(result, Operand(kStringEncodingMask));
3514  __ b(ne, &ascii_string);
3515
3516  // Two-byte string.
3517  // Load the two-byte character code into the result register.
3518  Label done;
3519  __ add(result,
3520         string,
3521         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3522  __ ldrh(result, MemOperand(result, index, LSL, 1));
3523  __ jmp(&done);
3524
3525  // ASCII string.
3526  // Load the byte into the result register.
3527  __ bind(&ascii_string);
3528  __ add(result,
3529         string,
3530         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3531  __ ldrb(result, MemOperand(result, index));
3532
3533  __ bind(&done);
3534  __ bind(deferred->exit());
3535}
3536
3537
3538void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3539  Register string = ToRegister(instr->string());
3540  Register result = ToRegister(instr->result());
3541  Register scratch = scratch0();
3542
3543  // TODO(3095996): Get rid of this. For now, we need to make the
3544  // result register contain a valid pointer because it is already
3545  // contained in the register pointer map.
3546  __ mov(result, Operand(0));
3547
3548  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3549  __ push(string);
3550  // Push the index as a smi. This is safe because of the checks in
3551  // DoStringCharCodeAt above.
3552  if (instr->index()->IsConstantOperand()) {
3553    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3554    __ mov(scratch, Operand(Smi::FromInt(const_index)));
3555    __ push(scratch);
3556  } else {
3557    Register index = ToRegister(instr->index());
3558    __ SmiTag(index);
3559    __ push(index);
3560  }
3561  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3562  if (FLAG_debug_code) {
3563    __ AbortIfNotSmi(r0);
3564  }
3565  __ SmiUntag(r0);
3566  __ StoreToSafepointRegisterSlot(r0, result);
3567}
3568
3569
3570void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3571  class DeferredStringCharFromCode: public LDeferredCode {
3572   public:
3573    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3574        : LDeferredCode(codegen), instr_(instr) { }
3575    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3576   private:
3577    LStringCharFromCode* instr_;
3578  };
3579
3580  DeferredStringCharFromCode* deferred =
3581      new DeferredStringCharFromCode(this, instr);
3582
3583  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3584  Register char_code = ToRegister(instr->char_code());
3585  Register result = ToRegister(instr->result());
3586  ASSERT(!char_code.is(result));
3587
3588  __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
3589  __ b(hi, deferred->entry());
3590  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3591  __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
3592  __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3593  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3594  __ cmp(result, ip);
3595  __ b(eq, deferred->entry());
3596  __ bind(deferred->exit());
3597}
3598
3599
3600void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3601  Register char_code = ToRegister(instr->char_code());
3602  Register result = ToRegister(instr->result());
3603
3604  // TODO(3095996): Get rid of this. For now, we need to make the
3605  // result register contain a valid pointer because it is already
3606  // contained in the register pointer map.
3607  __ mov(result, Operand(0));
3608
3609  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3610  __ SmiTag(char_code);
3611  __ push(char_code);
3612  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3613  __ StoreToSafepointRegisterSlot(r0, result);
3614}
3615
3616
3617void LCodeGen::DoStringLength(LStringLength* instr) {
3618  Register string = ToRegister(instr->InputAt(0));
3619  Register result = ToRegister(instr->result());
3620  __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
3621}
3622
3623
3624void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3625  LOperand* input = instr->InputAt(0);
3626  ASSERT(input->IsRegister() || input->IsStackSlot());
3627  LOperand* output = instr->result();
3628  ASSERT(output->IsDoubleRegister());
3629  SwVfpRegister single_scratch = double_scratch0().low();
3630  if (input->IsStackSlot()) {
3631    Register scratch = scratch0();
3632    __ ldr(scratch, ToMemOperand(input));
3633    __ vmov(single_scratch, scratch);
3634  } else {
3635    __ vmov(single_scratch, ToRegister(input));
3636  }
3637  __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
3638}
3639
3640
3641void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3642  class DeferredNumberTagI: public LDeferredCode {
3643   public:
3644    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3645        : LDeferredCode(codegen), instr_(instr) { }
3646    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3647   private:
3648    LNumberTagI* instr_;
3649  };
3650
3651  LOperand* input = instr->InputAt(0);
3652  ASSERT(input->IsRegister() && input->Equals(instr->result()));
3653  Register reg = ToRegister(input);
3654
3655  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3656  __ SmiTag(reg, SetCC);
3657  __ b(vs, deferred->entry());
3658  __ bind(deferred->exit());
3659}
3660
3661
3662void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3663  Label slow;
3664  Register reg = ToRegister(instr->InputAt(0));
3665  DoubleRegister dbl_scratch = double_scratch0();
3666  SwVfpRegister flt_scratch = dbl_scratch.low();
3667
3668  // Preserve the value of all registers.
3669  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3670
3671  // There was overflow, so bits 30 and 31 of the original integer
3672  // disagree. Try to allocate a heap number in new space and store
3673  // the value in there. If that fails, call the runtime system.
3674  Label done;
3675  __ SmiUntag(reg);
3676  __ eor(reg, reg, Operand(0x80000000));
3677  __ vmov(flt_scratch, reg);
3678  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
3679  if (FLAG_inline_new) {
3680    __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3681    __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3682    if (!reg.is(r5)) __ mov(reg, r5);
3683    __ b(&done);
3684  }
3685
3686  // Slow case: Call the runtime system to do the number allocation.
3687  __ bind(&slow);
3688
3689  // TODO(3095996): Put a valid pointer value in the stack slot where the result
3690  // register is stored, as this register is in the pointer map, but contains an
3691  // integer value.
3692  __ mov(ip, Operand(0));
3693  __ StoreToSafepointRegisterSlot(ip, reg);
3694  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3695  if (!reg.is(r0)) __ mov(reg, r0);
3696
3697  // Done. Put the value in dbl_scratch into the value of the allocated heap
3698  // number.
3699  __ bind(&done);
3700  __ sub(ip, reg, Operand(kHeapObjectTag));
3701  __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
3702  __ StoreToSafepointRegisterSlot(reg, reg);
3703}
3704
3705
3706void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3707  class DeferredNumberTagD: public LDeferredCode {
3708   public:
3709    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3710        : LDeferredCode(codegen), instr_(instr) { }
3711    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3712   private:
3713    LNumberTagD* instr_;
3714  };
3715
3716  DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3717  Register scratch = scratch0();
3718  Register reg = ToRegister(instr->result());
3719  Register temp1 = ToRegister(instr->TempAt(0));
3720  Register temp2 = ToRegister(instr->TempAt(1));
3721
3722  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3723  if (FLAG_inline_new) {
3724    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3725    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
3726  } else {
3727    __ jmp(deferred->entry());
3728  }
3729  __ bind(deferred->exit());
3730  __ sub(ip, reg, Operand(kHeapObjectTag));
3731  __ vstr(input_reg, ip, HeapNumber::kValueOffset);
3732}
3733
3734
3735void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3736  // TODO(3095996): Get rid of this. For now, we need to make the
3737  // result register contain a valid pointer because it is already
3738  // contained in the register pointer map.
3739  Register reg = ToRegister(instr->result());
3740  __ mov(reg, Operand(0));
3741
3742  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3743  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3744  __ StoreToSafepointRegisterSlot(r0, reg);
3745}
3746
3747
3748void LCodeGen::DoSmiTag(LSmiTag* instr) {
3749  LOperand* input = instr->InputAt(0);
3750  ASSERT(input->IsRegister() && input->Equals(instr->result()));
3751  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3752  __ SmiTag(ToRegister(input));
3753}
3754
3755
3756void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3757  LOperand* input = instr->InputAt(0);
3758  ASSERT(input->IsRegister() && input->Equals(instr->result()));
3759  if (instr->needs_check()) {
3760    STATIC_ASSERT(kHeapObjectTag == 1);
3761    // If the input is a HeapObject, SmiUntag will set the carry flag.
3762    __ SmiUntag(ToRegister(input), SetCC);
3763    DeoptimizeIf(cs, instr->environment());
3764  } else {
3765    __ SmiUntag(ToRegister(input));
3766  }
3767}
3768
3769
3770void LCodeGen::EmitNumberUntagD(Register input_reg,
3771                                DoubleRegister result_reg,
3772                                bool deoptimize_on_undefined,
3773                                LEnvironment* env) {
3774  Register scratch = scratch0();
3775  SwVfpRegister flt_scratch = double_scratch0().low();
3776  ASSERT(!result_reg.is(double_scratch0()));
3777
3778  Label load_smi, heap_number, done;
3779
3780  // Smi check.
3781  __ JumpIfSmi(input_reg, &load_smi);
3782
3783  // Heap number map check.
3784  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3785  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3786  __ cmp(scratch, Operand(ip));
3787  if (deoptimize_on_undefined) {
3788    DeoptimizeIf(ne, env);
3789  } else {
3790    Label heap_number;
3791    __ b(eq, &heap_number);
3792
3793    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3794    __ cmp(input_reg, Operand(ip));
3795    DeoptimizeIf(ne, env);
3796
3797    // Convert undefined to NaN.
3798    __ LoadRoot(ip, Heap::kNanValueRootIndex);
3799    __ sub(ip, ip, Operand(kHeapObjectTag));
3800    __ vldr(result_reg, ip, HeapNumber::kValueOffset);
3801    __ jmp(&done);
3802
3803    __ bind(&heap_number);
3804  }
3805  // Heap number to double register conversion.
3806  __ sub(ip, input_reg, Operand(kHeapObjectTag));
3807  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
3808  __ jmp(&done);
3809
3810  // Smi to double register conversion
3811  __ bind(&load_smi);
3812  __ SmiUntag(input_reg);  // Untag smi before converting to float.
3813  __ vmov(flt_scratch, input_reg);
3814  __ vcvt_f64_s32(result_reg, flt_scratch);
3815  __ SmiTag(input_reg);  // Retag smi.
3816  __ bind(&done);
3817}
3818
3819
3820class DeferredTaggedToI: public LDeferredCode {
3821 public:
3822  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3823      : LDeferredCode(codegen), instr_(instr) { }
3824  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3825 private:
3826  LTaggedToI* instr_;
3827};
3828
3829
3830void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3831  Register input_reg = ToRegister(instr->InputAt(0));
3832  Register scratch1 = scratch0();
3833  Register scratch2 = ToRegister(instr->TempAt(0));
3834  DwVfpRegister double_scratch = double_scratch0();
3835  SwVfpRegister single_scratch = double_scratch.low();
3836
3837  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
3838  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
3839
3840  Label done;
3841
3842  // The input was optimistically untagged; revert it.
3843  // The carry flag is set when we reach this deferred code as we just executed
3844  // SmiUntag(heap_object, SetCC)
3845  STATIC_ASSERT(kHeapObjectTag == 1);
3846  __ adc(input_reg, input_reg, Operand(input_reg));
3847
3848  // Heap number map check.
3849  __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3850  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3851  __ cmp(scratch1, Operand(ip));
3852
3853  if (instr->truncating()) {
3854    Register scratch3 = ToRegister(instr->TempAt(1));
3855    DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
3856    ASSERT(!scratch3.is(input_reg) &&
3857           !scratch3.is(scratch1) &&
3858           !scratch3.is(scratch2));
3859    // Performs a truncating conversion of a floating point number as used by
3860    // the JS bitwise operations.
3861    Label heap_number;
3862    __ b(eq, &heap_number);
3863    // Check for undefined. Undefined is converted to zero for truncating
3864    // conversions.
3865    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3866    __ cmp(input_reg, Operand(ip));
3867    DeoptimizeIf(ne, instr->environment());
3868    __ mov(input_reg, Operand(0));
3869    __ b(&done);
3870
3871    __ bind(&heap_number);
3872    __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
3873    __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
3874
3875    __ EmitECMATruncate(input_reg,
3876                        double_scratch2,
3877                        single_scratch,
3878                        scratch1,
3879                        scratch2,
3880                        scratch3);
3881
3882  } else {
3883    CpuFeatures::Scope scope(VFP3);
3884    // Deoptimize if we don't have a heap number.
3885    DeoptimizeIf(ne, instr->environment());
3886
3887    __ sub(ip, input_reg, Operand(kHeapObjectTag));
3888    __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
3889    __ EmitVFPTruncate(kRoundToZero,
3890                       single_scratch,
3891                       double_scratch,
3892                       scratch1,
3893                       scratch2,
3894                       kCheckForInexactConversion);
3895    DeoptimizeIf(ne, instr->environment());
3896    // Load the result.
3897    __ vmov(input_reg, single_scratch);
3898
3899    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3900      __ cmp(input_reg, Operand(0));
3901      __ b(ne, &done);
3902      __ vmov(scratch1, double_scratch.high());
3903      __ tst(scratch1, Operand(HeapNumber::kSignMask));
3904      DeoptimizeIf(ne, instr->environment());
3905    }
3906  }
3907  __ bind(&done);
3908}
3909
3910
3911void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3912  LOperand* input = instr->InputAt(0);
3913  ASSERT(input->IsRegister());
3914  ASSERT(input->Equals(instr->result()));
3915
3916  Register input_reg = ToRegister(input);
3917
3918  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
3919
3920  // Optimistically untag the input.
3921  // If the input is a HeapObject, SmiUntag will set the carry flag.
3922  __ SmiUntag(input_reg, SetCC);
3923  // Branch to deferred code if the input was tagged.
3924  // The deferred code will take care of restoring the tag.
3925  __ b(cs, deferred->entry());
3926  __ bind(deferred->exit());
3927}
3928
3929
3930void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3931  LOperand* input = instr->InputAt(0);
3932  ASSERT(input->IsRegister());
3933  LOperand* result = instr->result();
3934  ASSERT(result->IsDoubleRegister());
3935
3936  Register input_reg = ToRegister(input);
3937  DoubleRegister result_reg = ToDoubleRegister(result);
3938
3939  EmitNumberUntagD(input_reg, result_reg,
3940                   instr->hydrogen()->deoptimize_on_undefined(),
3941                   instr->environment());
3942}
3943
3944
3945void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
3946  Register result_reg = ToRegister(instr->result());
3947  Register scratch1 = scratch0();
3948  Register scratch2 = ToRegister(instr->TempAt(0));
3949  DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
3950  SwVfpRegister single_scratch = double_scratch0().low();
3951
3952  Label done;
3953
3954  if (instr->truncating()) {
3955    Register scratch3 = ToRegister(instr->TempAt(1));
3956    __ EmitECMATruncate(result_reg,
3957                        double_input,
3958                        single_scratch,
3959                        scratch1,
3960                        scratch2,
3961                        scratch3);
3962  } else {
3963    VFPRoundingMode rounding_mode = kRoundToMinusInf;
3964    __ EmitVFPTruncate(rounding_mode,
3965                       single_scratch,
3966                       double_input,
3967                       scratch1,
3968                       scratch2,
3969                       kCheckForInexactConversion);
3970    // Deoptimize if we had a vfp invalid exception,
3971    // including inexact operation.
3972    DeoptimizeIf(ne, instr->environment());
3973    // Retrieve the result.
3974    __ vmov(result_reg, single_scratch);
3975  }
3976    __ bind(&done);
3977}
3978
3979
3980void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3981  LOperand* input = instr->InputAt(0);
3982  __ tst(ToRegister(input), Operand(kSmiTagMask));
3983  DeoptimizeIf(ne, instr->environment());
3984}
3985
3986
3987void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
3988  LOperand* input = instr->InputAt(0);
3989  __ tst(ToRegister(input), Operand(kSmiTagMask));
3990  DeoptimizeIf(eq, instr->environment());
3991}
3992
3993
3994void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
3995  Register input = ToRegister(instr->InputAt(0));
3996  Register scratch = scratch0();
3997
3998  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3999  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4000
4001  if (instr->hydrogen()->is_interval_check()) {
4002    InstanceType first;
4003    InstanceType last;
4004    instr->hydrogen()->GetCheckInterval(&first, &last);
4005
4006    __ cmp(scratch, Operand(first));
4007
4008    // If there is only one type in the interval check for equality.
4009    if (first == last) {
4010      DeoptimizeIf(ne, instr->environment());
4011    } else {
4012      DeoptimizeIf(lo, instr->environment());
4013      // Omit check for the last type.
4014      if (last != LAST_TYPE) {
4015        __ cmp(scratch, Operand(last));
4016        DeoptimizeIf(hi, instr->environment());
4017      }
4018    }
4019  } else {
4020    uint8_t mask;
4021    uint8_t tag;
4022    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4023
4024    if (IsPowerOf2(mask)) {
4025      ASSERT(tag == 0 || IsPowerOf2(tag));
4026      __ tst(scratch, Operand(mask));
4027      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
4028    } else {
4029      __ and_(scratch, scratch, Operand(mask));
4030      __ cmp(scratch, Operand(tag));
4031      DeoptimizeIf(ne, instr->environment());
4032    }
4033  }
4034}
4035
4036
4037void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4038  ASSERT(instr->InputAt(0)->IsRegister());
4039  Register reg = ToRegister(instr->InputAt(0));
4040  __ cmp(reg, Operand(instr->hydrogen()->target()));
4041  DeoptimizeIf(ne, instr->environment());
4042}
4043
4044
4045void LCodeGen::DoCheckMap(LCheckMap* instr) {
4046  Register scratch = scratch0();
4047  LOperand* input = instr->InputAt(0);
4048  ASSERT(input->IsRegister());
4049  Register reg = ToRegister(input);
4050  __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
4051  __ cmp(scratch, Operand(instr->hydrogen()->map()));
4052  DeoptimizeIf(ne, instr->environment());
4053}
4054
4055
4056void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4057  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4058  Register result_reg = ToRegister(instr->result());
4059  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4060  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4061}
4062
4063
4064void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4065  Register unclamped_reg = ToRegister(instr->unclamped());
4066  Register result_reg = ToRegister(instr->result());
4067  __ ClampUint8(result_reg, unclamped_reg);
4068}
4069
4070
4071void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4072  Register scratch = scratch0();
4073  Register input_reg = ToRegister(instr->unclamped());
4074  Register result_reg = ToRegister(instr->result());
4075  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4076  Label is_smi, done, heap_number;
4077
4078  // Both smi and heap number cases are handled.
4079  __ JumpIfSmi(input_reg, &is_smi);
4080
4081  // Check for heap number
4082  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4083  __ cmp(scratch, Operand(factory()->heap_number_map()));
4084  __ b(eq, &heap_number);
4085
4086  // Check for undefined. Undefined is converted to zero for clamping
4087  // conversions.
4088  __ cmp(input_reg, Operand(factory()->undefined_value()));
4089  DeoptimizeIf(ne, instr->environment());
4090  __ mov(result_reg, Operand(0));
4091  __ jmp(&done);
4092
4093  // Heap number
4094  __ bind(&heap_number);
4095  __ vldr(double_scratch0(), FieldMemOperand(input_reg,
4096                                             HeapNumber::kValueOffset));
4097  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4098  __ jmp(&done);
4099
4100  // smi
4101  __ bind(&is_smi);
4102  __ SmiUntag(result_reg, input_reg);
4103  __ ClampUint8(result_reg, result_reg);
4104
4105  __ bind(&done);
4106}
4107
4108
4109void LCodeGen::LoadHeapObject(Register result,
4110                              Handle<HeapObject> object) {
4111  if (heap()->InNewSpace(*object)) {
4112    Handle<JSGlobalPropertyCell> cell =
4113        factory()->NewJSGlobalPropertyCell(object);
4114    __ mov(result, Operand(cell));
4115    __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
4116  } else {
4117    __ mov(result, Operand(object));
4118  }
4119}
4120
4121
4122void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4123  Register temp1 = ToRegister(instr->TempAt(0));
4124  Register temp2 = ToRegister(instr->TempAt(1));
4125
4126  Handle<JSObject> holder = instr->holder();
4127  Handle<JSObject> current_prototype = instr->prototype();
4128
4129  // Load prototype object.
4130  LoadHeapObject(temp1, current_prototype);
4131
4132  // Check prototype maps up to the holder.
4133  while (!current_prototype.is_identical_to(holder)) {
4134    __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
4135    __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
4136    DeoptimizeIf(ne, instr->environment());
4137    current_prototype =
4138        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4139    // Load next prototype object.
4140    LoadHeapObject(temp1, current_prototype);
4141  }
4142
4143  // Check the holder map.
4144  __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
4145  __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
4146  DeoptimizeIf(ne, instr->environment());
4147}
4148
4149
4150void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4151  __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4152  __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4153  __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4154  __ mov(r1, Operand(instr->hydrogen()->constant_elements()));
4155  __ Push(r3, r2, r1);
4156
4157  // Pick the right runtime function or stub to call.
4158  int length = instr->hydrogen()->length();
4159  if (instr->hydrogen()->IsCopyOnWrite()) {
4160    ASSERT(instr->hydrogen()->depth() == 1);
4161    FastCloneShallowArrayStub::Mode mode =
4162        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4163    FastCloneShallowArrayStub stub(mode, length);
4164    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4165  } else if (instr->hydrogen()->depth() > 1) {
4166    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4167  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4168    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4169  } else {
4170    FastCloneShallowArrayStub::Mode mode =
4171        FastCloneShallowArrayStub::CLONE_ELEMENTS;
4172    FastCloneShallowArrayStub stub(mode, length);
4173    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4174  }
4175}
4176
4177
4178void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4179  __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4180  __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
4181  __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4182  __ mov(r2, Operand(instr->hydrogen()->constant_properties()));
4183  __ mov(r1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
4184  __ Push(r4, r3, r2, r1);
4185
4186  // Pick the right runtime function to call.
4187  if (instr->hydrogen()->depth() > 1) {
4188    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4189  } else {
4190    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4191  }
4192}
4193
4194
4195void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4196  ASSERT(ToRegister(instr->InputAt(0)).is(r0));
4197  __ push(r0);
4198  CallRuntime(Runtime::kToFastProperties, 1, instr);
4199}
4200
4201
4202void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4203  Label materialized;
4204  // Registers will be used as follows:
4205  // r3 = JS function.
4206  // r7 = literals array.
4207  // r1 = regexp literal.
4208  // r0 = regexp literal clone.
4209  // r2 and r4-r6 are used as temporaries.
4210  __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4211  __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4212  int literal_offset = FixedArray::kHeaderSize +
4213      instr->hydrogen()->literal_index() * kPointerSize;
4214  __ ldr(r1, FieldMemOperand(r7, literal_offset));
4215  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4216  __ cmp(r1, ip);
4217  __ b(ne, &materialized);
4218
4219  // Create regexp literal using runtime function
4220  // Result will be in r0.
4221  __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4222  __ mov(r5, Operand(instr->hydrogen()->pattern()));
4223  __ mov(r4, Operand(instr->hydrogen()->flags()));
4224  __ Push(r7, r6, r5, r4);
4225  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4226  __ mov(r1, r0);
4227
4228  __ bind(&materialized);
4229  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4230  Label allocated, runtime_allocate;
4231
4232  __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
4233  __ jmp(&allocated);
4234
4235  __ bind(&runtime_allocate);
4236  __ mov(r0, Operand(Smi::FromInt(size)));
4237  __ Push(r1, r0);
4238  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4239  __ pop(r1);
4240
4241  __ bind(&allocated);
4242  // Copy the content into the newly allocated memory.
4243  // (Unroll copy loop once for better throughput).
4244  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4245    __ ldr(r3, FieldMemOperand(r1, i));
4246    __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
4247    __ str(r3, FieldMemOperand(r0, i));
4248    __ str(r2, FieldMemOperand(r0, i + kPointerSize));
4249  }
4250  if ((size % (2 * kPointerSize)) != 0) {
4251    __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
4252    __ str(r3, FieldMemOperand(r0, size - kPointerSize));
4253  }
4254}
4255
4256
4257void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4258  // Use the fast case closure allocation code that allocates in new
4259  // space for nested functions that don't need literals cloning.
4260  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4261  bool pretenure = instr->hydrogen()->pretenure();
4262  if (!pretenure && shared_info->num_literals() == 0) {
4263    FastNewClosureStub stub(
4264        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
4265    __ mov(r1, Operand(shared_info));
4266    __ push(r1);
4267    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4268  } else {
4269    __ mov(r2, Operand(shared_info));
4270    __ mov(r1, Operand(pretenure
4271                       ? factory()->true_value()
4272                       : factory()->false_value()));
4273    __ Push(cp, r2, r1);
4274    CallRuntime(Runtime::kNewClosure, 3, instr);
4275  }
4276}
4277
4278
4279void LCodeGen::DoTypeof(LTypeof* instr) {
4280  Register input = ToRegister(instr->InputAt(0));
4281  __ push(input);
4282  CallRuntime(Runtime::kTypeof, 1, instr);
4283}
4284
4285
4286void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4287  Register input = ToRegister(instr->InputAt(0));
4288  int true_block = chunk_->LookupDestination(instr->true_block_id());
4289  int false_block = chunk_->LookupDestination(instr->false_block_id());
4290  Label* true_label = chunk_->GetAssemblyLabel(true_block);
4291  Label* false_label = chunk_->GetAssemblyLabel(false_block);
4292
4293  Condition final_branch_condition = EmitTypeofIs(true_label,
4294                                                  false_label,
4295                                                  input,
4296                                                  instr->type_literal());
4297
4298  EmitBranch(true_block, false_block, final_branch_condition);
4299}
4300
4301
4302Condition LCodeGen::EmitTypeofIs(Label* true_label,
4303                                 Label* false_label,
4304                                 Register input,
4305                                 Handle<String> type_name) {
4306  Condition final_branch_condition = kNoCondition;
4307  Register scratch = scratch0();
4308  if (type_name->Equals(heap()->number_symbol())) {
4309    __ JumpIfSmi(input, true_label);
4310    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
4311    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4312    __ cmp(input, Operand(ip));
4313    final_branch_condition = eq;
4314
4315  } else if (type_name->Equals(heap()->string_symbol())) {
4316    __ JumpIfSmi(input, false_label);
4317    __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
4318    __ b(ge, false_label);
4319    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4320    __ tst(ip, Operand(1 << Map::kIsUndetectable));
4321    final_branch_condition = eq;
4322
4323  } else if (type_name->Equals(heap()->boolean_symbol())) {
4324    __ CompareRoot(input, Heap::kTrueValueRootIndex);
4325    __ b(eq, true_label);
4326    __ CompareRoot(input, Heap::kFalseValueRootIndex);
4327    final_branch_condition = eq;
4328
4329  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4330    __ CompareRoot(input, Heap::kNullValueRootIndex);
4331    final_branch_condition = eq;
4332
4333  } else if (type_name->Equals(heap()->undefined_symbol())) {
4334    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
4335    __ b(eq, true_label);
4336    __ JumpIfSmi(input, false_label);
4337    // Check for undetectable objects => true.
4338    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
4339    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4340    __ tst(ip, Operand(1 << Map::kIsUndetectable));
4341    final_branch_condition = ne;
4342
4343  } else if (type_name->Equals(heap()->function_symbol())) {
4344    __ JumpIfSmi(input, false_label);
4345    __ CompareObjectType(input, input, scratch,
4346                         FIRST_CALLABLE_SPEC_OBJECT_TYPE);
4347    final_branch_condition = ge;
4348
4349  } else if (type_name->Equals(heap()->object_symbol())) {
4350    __ JumpIfSmi(input, false_label);
4351    if (!FLAG_harmony_typeof) {
4352      __ CompareRoot(input, Heap::kNullValueRootIndex);
4353      __ b(eq, true_label);
4354    }
4355    __ CompareObjectType(input, input, scratch,
4356                         FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
4357    __ b(lt, false_label);
4358    __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
4359    __ b(gt, false_label);
4360    // Check for undetectable objects => false.
4361    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
4362    __ tst(ip, Operand(1 << Map::kIsUndetectable));
4363    final_branch_condition = eq;
4364
4365  } else {
4366    final_branch_condition = ne;
4367    __ b(false_label);
4368    // A dead branch instruction will be generated after this point.
4369  }
4370
4371  return final_branch_condition;
4372}
4373
4374
4375void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4376  Register temp1 = ToRegister(instr->TempAt(0));
4377  int true_block = chunk_->LookupDestination(instr->true_block_id());
4378  int false_block = chunk_->LookupDestination(instr->false_block_id());
4379
4380  EmitIsConstructCall(temp1, scratch0());
4381  EmitBranch(true_block, false_block, eq);
4382}
4383
4384
4385void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4386  ASSERT(!temp1.is(temp2));
4387  // Get the frame pointer for the calling frame.
4388  __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4389
4390  // Skip the arguments adaptor frame if it exists.
4391  Label check_frame_marker;
4392  __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
4393  __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4394  __ b(ne, &check_frame_marker);
4395  __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
4396
4397  // Check the marker in the calling frame.
4398  __ bind(&check_frame_marker);
4399  __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
4400  __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4401}
4402
4403
4404void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4405  // No code for lazy bailout instruction. Used to capture environment after a
4406  // call for populating the safepoint data with deoptimization data.
4407}
4408
4409
4410void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4411  DeoptimizeIf(al, instr->environment());
4412}
4413
4414
4415void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4416  Register object = ToRegister(instr->object());
4417  Register key = ToRegister(instr->key());
4418  Register strict = scratch0();
4419  __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
4420  __ Push(object, key, strict);
4421  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4422  LPointerMap* pointers = instr->pointer_map();
4423  LEnvironment* env = instr->deoptimization_environment();
4424  RecordPosition(pointers->position());
4425  RegisterEnvironmentForDeoptimization(env);
4426  SafepointGenerator safepoint_generator(this,
4427                                         pointers,
4428                                         env->deoptimization_index());
4429  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4430}
4431
4432
4433void LCodeGen::DoIn(LIn* instr) {
4434  Register obj = ToRegister(instr->object());
4435  Register key = ToRegister(instr->key());
4436  __ Push(key, obj);
4437  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4438  LPointerMap* pointers = instr->pointer_map();
4439  LEnvironment* env = instr->deoptimization_environment();
4440  RecordPosition(pointers->position());
4441  RegisterEnvironmentForDeoptimization(env);
4442  SafepointGenerator safepoint_generator(this,
4443                                         pointers,
4444                                         env->deoptimization_index());
4445  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4446}
4447
4448
4449void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4450  {
4451    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4452    __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4453    RegisterLazyDeoptimization(
4454        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4455  }
4456
4457  // The gap code includes the restoring of the safepoint registers.
4458  int pc = masm()->pc_offset();
4459  safepoints_.SetPcAfterGap(pc);
4460}
4461
4462
4463void LCodeGen::DoStackCheck(LStackCheck* instr) {
4464  class DeferredStackCheck: public LDeferredCode {
4465   public:
4466    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4467        : LDeferredCode(codegen), instr_(instr) { }
4468    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4469   private:
4470    LStackCheck* instr_;
4471  };
4472
4473  if (instr->hydrogen()->is_function_entry()) {
4474    // Perform stack overflow check.
4475    Label done;
4476    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
4477    __ cmp(sp, Operand(ip));
4478    __ b(hs, &done);
4479    StackCheckStub stub;
4480    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4481    __ bind(&done);
4482  } else {
4483    ASSERT(instr->hydrogen()->is_backwards_branch());
4484    // Perform stack overflow check if this goto needs it before jumping.
4485    DeferredStackCheck* deferred_stack_check =
4486        new DeferredStackCheck(this, instr);
4487    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
4488    __ cmp(sp, Operand(ip));
4489    __ b(lo, deferred_stack_check->entry());
4490    __ bind(instr->done_label());
4491    deferred_stack_check->SetExit(instr->done_label());
4492  }
4493}
4494
4495
4496void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4497  // This is a pseudo-instruction that ensures that the environment here is
4498  // properly registered for deoptimization and records the assembler's PC
4499  // offset.
4500  LEnvironment* environment = instr->environment();
4501  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4502                                   instr->SpilledDoubleRegisterArray());
4503
4504  // If the environment were already registered, we would have no way of
4505  // backpatching it with the spill slot operands.
4506  ASSERT(!environment->HasBeenRegistered());
4507  RegisterEnvironmentForDeoptimization(environment);
4508  ASSERT(osr_pc_offset_ == -1);
4509  osr_pc_offset_ = masm()->pc_offset();
4510}
4511
4512
4513
4514
4515#undef __
4516
4517} }  // namespace v8::internal
4518